Merge branch 'master' into feature/crypto

Conflicts:
	swift/common/middleware/gatekeeper.py
	test/unit/common/middleware/helpers.py
	test/unit/common/middleware/test_gatekeeper.py
	test/unit/obj/test_diskfile.py
	test/unit/proxy/test_server.py

Change-Id: I17376d9e68a14a90fdaab3676f8377e368b46c43
This commit is contained in:
Alistair Coles 2016-03-31 13:53:08 +01:00
commit 77c181161a
118 changed files with 5941 additions and 2193 deletions

1
.gitignore vendored
View File

@ -9,6 +9,7 @@ ChangeLog
.coverage
*.egg
*.egg-info
.eggs/*
.DS_Store
.tox
pycscope.*

View File

@ -93,3 +93,11 @@ Richard Hawkins <richard.hawkins@rackspace.com> <hurricanerix@gmail.com>
Ondrej Novy <ondrej.novy@firma.seznam.cz>
Peter Lisak <peter.lisak@firma.seznam.cz>
Ke Liang <ke.liang@easystack.cn>
Daisuke Morita <morita.daisuke@ntti3.com> <morita.daisuke@lab.ntt.co.jp>
Andreas Jaeger <aj@suse.de> <aj@suse.com>
Hugo Kuo <tonytkdk@gmail.com>
Gage Hugo <gh159m@att.com>
Oshrit Feder <oshritf@il.ibm.com>
Larry Rensing <lr699s@att.com>
Ben Keller <bjkeller@us.ibm.com>
Chaozhe Chen <chaozhe.chen@easystack.cn>

20
AUTHORS
View File

@ -13,7 +13,7 @@ Jay Payne (letterj@gmail.com)
Will Reese (wreese@gmail.com)
Chuck Thier (cthier@gmail.com)
CORE Emeritus
Core Emeritus
-------------
Chmouel Boudjnah (chmouel@enovance.com)
Florian Hines (syn@ronin.io)
@ -33,6 +33,7 @@ Joe Arnold (joe@swiftstack.com)
Ionuț Arțăriși (iartarisi@suse.cz)
Minwoo Bae (minwoob@us.ibm.com)
Bob Ball (bob.ball@citrix.com)
Christopher Bartz (bartz@dkrz.de)
Christian Berendt (berendt@b1-systems.de)
Luis de Bethencourt (luis@debethencourt.com)
Keshava Bharadwaj (kb.sankethi@gmail.com)
@ -54,6 +55,7 @@ Emmanuel Cazenave (contact@emcaz.fr)
Mahati Chamarthy (mahati.chamarthy@gmail.com)
Zap Chang (zapchang@gmail.com)
François Charlier (francois.charlier@enovance.com)
Chaozhe Chen (chaozhe.chen@easystack.cn)
Ray Chen (oldsharp@163.com)
Harshit Chitalia (harshit@acelio.com)
Brian Cline (bcline@softlayer.com)
@ -61,6 +63,7 @@ Alistair Coles (alistair.coles@hpe.com)
Clément Contini (ccontini@cloudops.com)
Brian Curtin (brian.curtin@rackspace.com)
Thiago da Silva (thiago@redhat.com)
dangming (dangming@unitedstack.com)
Julien Danjou (julien@danjou.info)
Paul Dardeau (paul.dardeau@intel.com)
Zack M. Davis (zdavis@swiftstack.com)
@ -86,9 +89,11 @@ Filippo Giunchedi (fgiunchedi@wikimedia.org)
Mark Gius (launchpad@markgius.com)
David Goetz (david.goetz@rackspace.com)
Tushar Gohad (tushar.gohad@intel.com)
Thomas Goirand (thomas@goirand.fr)
Jonathan Gonzalez V (jonathan.abdiel@gmail.com)
Joe Gordon (jogo@cloudscaling.com)
ChangBo Guo(gcb) (eric.guo@easystack.cn)
Ankur Gupta (ankur.gupta@intel.com)
David Hadas (davidh@il.ibm.com)
Andrew Hale (andy@wwwdata.eu)
Soren Hansen (soren@linux2go.dk)
@ -106,6 +111,7 @@ Charles Hsu (charles0126@gmail.com)
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
Kun Huang (gareth@unitedstack.com)
Bill Huber (wbhuber@us.ibm.com)
Gage Hugo (gh159m@att.com)
Matthieu Huin (mhu@enovance.com)
Hodong Hwang (hodong.hwang@kt.com)
Motonobu Ichimura (motonobu@gmail.com)
@ -127,6 +133,7 @@ Ilya Kharin (ikharin@mirantis.com)
Dae S. Kim (dae@velatum.com)
Nathan Kinder (nkinder@redhat.com)
Eugene Kirpichov (ekirpichov@gmail.com)
Ben Keller (bjkeller@us.ibm.com)
Leah Klearman (lklrmn@gmail.com)
Martin Kletzander (mkletzan@redhat.com)
Jaivish Kothari (jaivish.kothari@nectechnologies.in)
@ -134,6 +141,7 @@ Steve Kowalik (steven@wedontsleep.org)
Sergey Kraynev (skraynev@mirantis.com)
Sushil Kumar (sushil.kumar2@globallogic.com)
Madhuri Kumari (madhuri.rai07@gmail.com)
Hugo Kuo (tonytkdk@gmail.com)
Steven Lang (Steven.Lang@hgst.com)
Gonéri Le Bouder (goneri.lebouder@enovance.com)
Romain Le Disez (romain.ledisez@ovh.net)
@ -143,6 +151,8 @@ Thomas Leaman (thomas.leaman@hp.com)
Eohyung Lee (liquidnuker@gmail.com)
Zhao Lei (zhaolei@cn.fujitsu.com)
Jamie Lennox (jlennox@redhat.com)
Cheng Li (shcli@cn.ibm.com)
Mingyu Li (li.mingyu@99cloud.net)
Tong Li (litong01@us.ibm.com)
Ke Liang (ke.liang@easystack.cn)
Peter Lisak (peter.lisak@firma.seznam.cz)
@ -161,6 +171,7 @@ Juan J. Martinez (juan@memset.com)
Marcelo Martins (btorch@gmail.com)
Nakagawa Masaaki (nakagawamsa@nttdata.co.jp)
Dolph Mathews (dolph.mathews@gmail.com)
Tomas Matlocha (tomas.matlocha@firma.seznam.cz)
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
Michael Matur (michael.matur@gmail.com)
Donagh McCabe (donagh.mccabe@hpe.com)
@ -171,7 +182,7 @@ Samuel Merritt (sam@swiftstack.com)
Stephen Milton (milton@isomedia.com)
Jola Mirecka (jola.mirecka@hp.com)
Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp)
Daisuke Morita (morita.daisuke@lab.ntt.co.jp)
Daisuke Morita (morita.daisuke@ntti3.com)
Dirk Mueller (dirk@dmllr.de)
Takashi Natsume (natsume.takashi@lab.ntt.co.jp)
Russ Nelson (russ@crynwr.com)
@ -198,11 +209,13 @@ Sivasathurappan Radhakrishnan (siva.radhakrishnan@intel.com)
Sarvesh Ranjan (saranjan@cisco.com)
Falk Reimann (falk.reimann@sap.com)
Brian Reitz (brian.reitz@oracle.com)
Qiaowei Ren (qiaowei.ren@intel.com)
Felipe Reyes (freyes@tty.cl)
Janie Richling (jrichli@us.ibm.com)
Matt Riedemann (mriedem@us.ibm.com)
Li Riqiang (lrqrun@gmail.com)
Rafael Rivero (rafael@cloudscaling.com)
Larry Rensing (lr699s@att.com)
Victor Rodionov (victor.rodionov@nexenta.com)
Eran Rom (eranr@il.ibm.com)
Aaron Rosen (arosen@nicira.com)
@ -211,6 +224,7 @@ Hamdi Roumani (roumani@ca.ibm.com)
Shilla Saebi (shilla.saebi@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com)
Olga Saprycheva (osapryc@us.ibm.com)
Christian Schwede (cschwede@redhat.com)
Mark Seger (mark.seger@hpe.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
@ -223,6 +237,7 @@ Michael Shuler (mshuler@gmail.com)
David Moreau Simard (dmsimard@iweb.com)
Scott Simpson (sasimpson@gmail.com)
Pradeep Kumar Singh (pradeep.singh@nectechnologies.in)
Sarafraj Singh (Sarafraj.Singh@intel.com)
Liu Siqi (meizu647@gmail.com)
Adrian Smith (adrian_f_smith@dell.com)
Jon Snitow (otherjon@swiftstack.com)
@ -259,6 +274,7 @@ Yaguang Wang (yaguang.wang@intel.com)
Chris Wedgwood (cw@f00f.org)
Conrad Weidenkeller (conrad.weidenkeller@rackspace.com)
Doug Weimer (dweimer@gmail.com)
Andrew Welleck (awellec@us.ibm.com)
Wu Wenxiang (wu.wenxiang@99cloud.net)
Cory Wright (cory.wright@rackspace.com)
Ye Jia Xu (xyj.asmy@gmail.com)

148
CHANGELOG
View File

@ -1,3 +1,151 @@
swift (2.7.0, OpenStack Mitaka)
* Bump PyECLib requirement to >= 1.2.0
* Update container on fast-POST
"Fast-POST" is the mode where `object_post_as_copy` is set to
`False` in the proxy server config. This mode now allows for
fast, efficient updates of metadata without needing to fully
recopy the contents of the object. While the default still is
`object_post_as_copy` as True, the plan is to change the default
to False and then deprecate post-as-copy functionality in later
releases. Fast-POST now supports container-sync functionality.
* Add concurrent reads option to proxy.
This change adds 2 new parameters to enable and control concurrent
GETs in Swift, these are `concurrent_gets` and `concurrency_timeout`.
`concurrent_gets` allows you to turn on or off concurrent
GETs; when on, it will set the GET/HEAD concurrency to the
replica count. And in the case of EC HEADs it will set it to
ndata. The proxy will then serve only the first valid source to
respond. This applies to all account, container, and replicated
object GETs and HEADs. For EC only HEAD requests are affected.
The default for `concurrent_gets` is off.
`concurrency_timeout` is related to `concurrent_gets` and is
the amount of time to wait before firing the next thread. A
value of 0 will fire at the same time (fully concurrent), but
setting another value will stagger the firing allowing you the
ability to give a node a short chance to respond before firing
the next. This value is a float and should be somewhere between
0 and `node_timeout`. The default is `conn_timeout`, meaning by
default it will stagger the firing.
* Added an operational procedures guide to the docs. It can be
found at http://swift.openstack.org/ops_runbook/index.html and
includes information on detecting and handling day-to-day
operational issues in a Swift cluster.
* Make `handoffs_first` a more useful mode for the object replicator.
The `handoffs_first` replication mode is used during periods of
problematic cluster behavior (e.g. full disks) when replication
needs to quickly drain partitions from a handoff node and move
them to a primary node.
Previously, `handoffs_first` would sort that handoff work before
"normal" replication jobs, but the normal replication work could
take quite some time and result in handoffs not being drained
quickly enough.
In order to focus on getting handoff partitions off the node
`handoffs_first` mode will now abort the current replication
sweep before attempting any primary suffix syncing if any of the
handoff partitions were not removed for any reason - and start
over with replication of handoffs jobs as the highest priority.
Note that `handoffs_first` being enabled will emit a warning on
start up, even if no handoff jobs fail, because of the negative
impact it can have during normal operations by dog-piling on a
node that was temporarily unavailable.
* By default, inbound `X-Timestamp` headers are now disallowed
(except when in an authorized container-sync request). This
header is useful for allowing data migration from other storage
systems to Swift and keeping the original timestamp of the data.
If you have this migration use case (or any other requirement on
allowing the clients to set an object's timestamp), set the
`shunt_inbound_x_timestamp` config variable to False in the
gatekeeper middleware config section of the proxy server config.
* Requesting a SLO manifest file with the query parameters
"?multipart-manifest=get&format=raw" will return the contents of
the manifest in the format as was originally sent by the client.
The "format=raw" is new.
* Static web page listings can now be rendered with a custom
label. By default listings are rendered with a label of:
"Listing of /v1/<account>/<container>/<path>". This change adds
a new custom metadata key/value pair
`X-Container-Meta-Web-Listings-Label: My Label` that when set,
will cause the following: "Listing of My Label/<path>" to be
rendered instead.
* Previously, static large objects (SLOs) had a minimum segment
size (default to 1MiB). This limit has been removed, but small
segments will be ratelimited. The config parameter
`rate_limit_under_size` controls the definition of "small"
segments (1MiB by default), and `rate_limit_segments_per_sec`
controls how many segments per second can be served (default is 1).
With the default values, the effective behavior is identical to the
previous behavior when serving SLOs.
* Container sync has been improved to perform a HEAD on the remote
side of the sync for each object being synced. If the object
exists on the remote side, container-sync will no longer
transfer the object, thus significantly lowering the network
requirements to use the feature.
* The object auditor will now clean up any old, stale rsync temp
files that it finds. These rsync temp files are left if the
rsync process fails without completing a full transfer of an
object. Since these files can be large, the temp files may end
up filling a disk. The new auditor functionality will reap these
rsync temp files if they are old. The new object-auditor config
variable `rsync_tempfile_timeout` is the number of seconds old a
tempfile must be before it is reaped. By default, this variable
is set to "auto" or the rsync_timeout plus 900 seconds (falling
back to a value of 1 day).
* The Erasure Code reconstruction process has been made more
efficient by not syncing data files when only the durable commit
file is missing.
* Fixed a bug where 304 and 416 response may not have the right
Etag and Accept-Ranges headers when the object is stored in an
Erasure Coded policy.
* Versioned writes now correctly stores the date of previous versions
using GMT instead of local time.
* The deprecated Keystone middleware option is_admin has been removed.
* Fixed log format in object auditor.
* The zero-byte mode (ZBF) of the object auditor will now properly
observe the `--once` option.
* Swift keeps track, internally, of "dirty" parts of the partition
keyspace with a "hashes.pkl" file. Operations on this file no
longer require a read-modify-write cycle and use a new
"hashes.invalid" file to track dirty partitions. This change
will improve end-user performance for PUT and DELETE operations.
* The object replicator's succeeded and failed counts are now logged.
* `swift-recon` can now query hosts by storage policy.
* The log_statsd_host value can now be an IPv6 address or a hostname
which only resolves to an IPv6 address.
* Erasure coded fragments now properly call fallocate to reserve disk
space before being written.
* Various other minor bug fixes and improvements.
swift (2.6.0)
* Dependency changes

View File

@ -77,7 +77,7 @@ def main():
# SIGKILL daemon after kill_wait period
parser.add_option('--kill-after-timeout', dest='kill_after_timeout',
action='store_true',
help="Kill daemon and all childs after kill-wait "
help="Kill daemon and all children after kill-wait "
"period.")
options, args = parser.parse_args()

View File

@ -25,7 +25,7 @@ from swift.container.reconciler import add_to_reconciler_queue
"""
This tool is primarily for debugging and development but can be used an example
of how an operator could enqueue objects manually if a problem is discovered -
might be particularlly useful if you need to hack a fix into the reconciler
might be particularly useful if you need to hack a fix into the reconciler
and re-run it.
"""

View File

@ -207,20 +207,21 @@ set to a True value (e.g. "True" or "1"). To handle only non-replication
verbs, set to "False". Unless you have a separate replication network, you
should not specify any value for "replication_server".
.IP "\fBreplication_concurrency\fR"
Set to restrict the number of concurrent incoming REPLICATION requests
Set to 0 for unlimited (the default is 4). Note that REPLICATION is currently an ssync only item.
Set to restrict the number of concurrent incoming SSYNC requests
Set to 0 for unlimited (the default is 4). Note that SSYNC requests are only used
by the object reconstructor or the object replicator when configured to use ssync.
.IP "\fBreplication_one_per_device\fR"
Restricts incoming REPLICATION requests to one per device,
Restricts incoming SSYNC requests to one per device,
replication_currency above allowing. This can help control I/O to each
device, but you may wish to set this to False to allow multiple REPLICATION
device, but you may wish to set this to False to allow multiple SSYNC
requests (up to the above replication_concurrency setting) per device. The default is true.
.IP "\fBreplication_lock_timeout\fR"
Number of seconds to wait for an existing replication device lock before
giving up. The default is 15.
.IP "\fBreplication_failure_threshold\fR"
.IP "\fBreplication_failure_ratio\fR"
These two settings control when the REPLICATION subrequest handler will
abort an incoming REPLICATION attempt. An abort will occur if there are at
These two settings control when the SSYNC subrequest handler will
abort an incoming SSYNC attempt. An abort will occur if there are at
least threshold number of failures and the value of failures / successes
exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
failures have to occur and there have to be more failures than successes for
@ -498,6 +499,9 @@ and ensure that swift has read/write. The default is /var/cache/swift.
Takes a comma separated list of ints. If set, the object auditor will
increment a counter for every object whose size is <= to the given break
points and report the result after a full scan.
.IP \fBrsync_tempfile_timeout\fR
Time elapsed in seconds before rsync tempfiles will be unlinked. Config value of "auto"
will try to use object-replicator's rsync_timeout + 900 or fall-back to 86400 (1 day).
.RE

View File

@ -275,11 +275,14 @@ there you can change it to: authtoken keystoneauth
.PD 0
.RS 10
.IP "paste.filter_factory = keystonemiddleware.auth_token:filter_factory"
.IP "identity_uri = http://keystonehost:35357/"
.IP "auth_uri = http://keystonehost:5000/"
.IP "admin_tenant_name = service"
.IP "admin_user = swift"
.IP "admin_password = password"
.IP "auth_uri = http://keystonehost:5000"
.IP "auth_url = http://keystonehost:35357"
.IP "auth_plugin = password"
.IP "project_domain_id = default"
.IP "user_domain_id = default"
.IP "project_name = service"
.IP "username = swift"
.IP "password = password"
.IP ""
.IP "# delay_auth_decision defaults to False, but leaving it as false will"
.IP "# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from"
@ -330,11 +333,6 @@ This allows middleware higher in the WSGI pipeline to override auth
processing, useful for middleware such as tempurl and formpost. If you know
you're not going to use such middleware and you want a bit of extra security,
you can set this to false.
.IP \fBis_admin\fR
[DEPRECATED] If is_admin is true, a user whose username is the same as the project name
and who has any role on the project will have access rights elevated to be
the same as if the user had an operator role. Note that the condition
compares names rather than UUIDs. This option is deprecated.
.IP \fBservice_roles\fR
If the service_roles parameter is present, an X-Service-Token must be
present in the request that when validated, grants at least one role listed
@ -973,8 +971,7 @@ is false.
.IP \fBobject_post_as_copy\fR
Set object_post_as_copy = false to turn on fast posts where only the metadata changes
are stored as new and the original data file is kept in place. This makes for quicker
posts; but since the container metadata isn't updated in this mode, features like
container sync won't be able to sync posts. The default is True.
posts. The default is True.
.IP \fBaccount_autocreate\fR
If set to 'true' authorized accounts that do not yet exist within the Swift cluster
will be automatically created. The default is set to false.

View File

@ -14,31 +14,31 @@
.\" implied.
.\" See the License for the specific language governing permissions and
.\" limitations under the License.
.\"
.\"
.TH swift-object-expirer 1 "3/15/2012" "Linux" "OpenStack Swift"
.SH NAME
.SH NAME
.LP
.B swift-object-expirer
\- Openstack-swift object expirer
.SH SYNOPSIS
.LP
.B swift-object-expirer
.B swift-object-expirer
[CONFIG] [-h|--help] [-v|--verbose] [-o|--once]
.SH DESCRIPTION
.SH DESCRIPTION
.PP
The swift-object-expirer offers scheduled deletion of objects. The Swift client would
use the X-Delete-At or X-Delete-After headers during an object PUT or POST and the
cluster would automatically quit serving that object at the specified time and would
The swift-object-expirer offers scheduled deletion of objects. The Swift client would
use the X-Delete-At or X-Delete-After headers during an object PUT or POST and the
cluster would automatically quit serving that object at the specified time and would
shortly thereafter remove the object from the system.
The X-Delete-At header takes a Unix Epoch timestamp, in integer form; for example:
The X-Delete-At header takes a Unix Epoch timestamp, in integer form; for example:
1317070737 represents Mon Sep 26 20:58:57 2011 UTC.
The X-Delete-After header takes a integer number of seconds. The proxy server
that receives the request will convert this header into an X-Delete-At header
The X-Delete-After header takes a integer number of seconds. The proxy server
that receives the request will convert this header into an X-Delete-At header
using its current time plus the value given.
The options are as follows:
@ -53,19 +53,19 @@ The options are as follows:
.IP "-o"
.IP "--once"
.RS 4
.IP "only run one pass of daemon"
.IP "only run one pass of daemon"
.RE
.PD
.RE
.SH DOCUMENTATION
.LP
More in depth documentation in regards to
More in depth documentation in regards to
.BI swift-object-expirer
can be foud at
can be found at
.BI http://swift.openstack.org/overview_expiring_objects.html
and also about Openstack-Swift as a whole can be found at
and also about Openstack-Swift as a whole can be found at
.BI http://swift.openstack.org/index.html

View File

@ -234,9 +234,11 @@ using the format `regex_pattern_X = regex_expression`, where `X` is a number.
This script has been tested on Ubuntu 10.04 and Ubuntu 12.04, so if you are
using a different distro or OS, some care should be taken before using in production.
--------------
Cluster Health
--------------
.. _dispersion_report:
-----------------
Dispersion Report
-----------------
There is a swift-dispersion-report tool for measuring overall cluster health.
This is accomplished by checking if a set of deliberately distributed
@ -684,8 +686,7 @@ of async_pendings in real-time, but will not tell you the current number of
async_pending container updates on disk at any point in time.
Note also that the set of metrics collected, their names, and their semantics
are not locked down and will change over time. StatsD logging is currently in
a "beta" stage and will continue to evolve.
are not locked down and will change over time.
Metrics for `account-auditor`:

View File

@ -1,4 +1,17 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2010-2012 OpenStack Foundation.
#
# Swift documentation build configuration file, created by
@ -15,6 +28,7 @@
import datetime
import os
from swift import __version__
import subprocess
import sys
@ -29,7 +43,7 @@ sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'),
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.ifconfig', 'oslosphinx']
todo_include_todos = True
@ -37,17 +51,17 @@ todo_include_todos = True
# Changing the path so that the Hudson build output contains GA code and the
# source docs do not contain the code so local, offline sphinx builds are
# "clean."
#templates_path = []
#if os.getenv('HUDSON_PUBLISH_DOCS'):
# templates_path = ['_ga', '_templates']
#else:
# templates_path = ['_templates']
# templates_path = []
# if os.getenv('HUDSON_PUBLISH_DOCS'):
# templates_path = ['_ga', '_templates']
# else:
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
@ -61,23 +75,22 @@ copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
# built documents.
#
# The short X.Y version.
from swift import __version__
version = __version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
@ -85,14 +98,14 @@ exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
@ -110,76 +123,76 @@ modindex_common_prefix = ['swift.']
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
#html_theme_path = ["."]
#html_theme = '_theme'
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
"-n1"]
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'swiftdoc'
@ -188,10 +201,10 @@ htmlhelp_basename = 'swiftdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
@ -203,17 +216,17 @@ latex_documents = [
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# latex_use_modindex = True

View File

@ -569,15 +569,15 @@ replication_server Configure parameter for cr
should not specify any value for
"replication_server".
replication_concurrency 4 Set to restrict the number of
concurrent incoming REPLICATION
concurrent incoming SSYNC
requests; set to 0 for unlimited
replication_one_per_device True Restricts incoming REPLICATION
replication_one_per_device True Restricts incoming SSYNC
requests to one per device,
replication_currency above
allowing. This can help control
I/O to each device, but you may
wish to set this to False to
allow multiple REPLICATION
allow multiple SSYNC
requests (up to the above
replication_concurrency setting)
per device.
@ -589,9 +589,9 @@ replication_failure_threshold 100 The number of subrequest f
replication_failure_ratio is
checked
replication_failure_ratio 1.0 If the value of failures /
successes of REPLICATION
successes of SSYNC
subrequests exceeds this ratio,
the overall REPLICATION request
the overall SSYNC request
will be aborted
splice no Use splice() for zero-copy object
GETs. This requires Linux kernel
@ -738,6 +738,11 @@ concurrency 1 The number of parallel processes
zero_byte_files_per_second 50
object_size_stats
recon_cache_path /var/cache/swift Path to recon cache
rsync_tempfile_timeout auto Time elapsed in seconds before rsync
tempfiles will be unlinked. Config value
of "auto" try to use object-replicator's
rsync_timeout + 900 or fallback to 86400
(1 day).
=========================== =================== ==========================================
------------------------------
@ -1325,11 +1330,7 @@ object_post_as_copy true Set object_post_as_copy = false
the metadata changes are stored
anew and the original data file
is kept in place. This makes for
quicker posts; but since the
container metadata isn't updated
in this mode, features like
container sync won't be able to
sync posts.
quicker posts.
account_autocreate false If set to 'true' authorized
accounts that do not yet exist
within the Swift cluster will
@ -1367,6 +1368,36 @@ swift_owner_headers <see the sample These are the headers whose
headers> up to the auth system in use,
but usually indicates
administrative responsibilities.
sorting_method shuffle Storage nodes can be chosen at
random (shuffle), by using timing
measurements (timing), or by using
an explicit match (affinity).
Using timing measurements may allow
for lower overall latency, while
using affinity allows for finer
control. In both the timing and
affinity cases, equally-sorting nodes
are still randomly chosen to spread
load.
timing_expiry 300 If the "timing" sorting_method is
used, the timings will only be valid
for the number of seconds configured
by timing_expiry.
concurrent_gets off Use replica count number of
threads concurrently during a
GET/HEAD and return with the
first successful response. In
the EC case, this parameter only
effects an EC HEAD as an EC GET
behaves differently.
concurrency_timeout conn_timeout This parameter controls how long
to wait before firing off the
next concurrent_get thread. A
value of 0 would we fully concurrent
any other number will stagger the
firing of the threads. This number
should be between 0 and node_timeout.
The default is conn_timeout (0.5).
============================ =============== =============================
[tempauth]

View File

@ -9,63 +9,64 @@ Coding Guidelines
For the most part we try to follow PEP 8 guidelines which can be viewed
here: http://www.python.org/dev/peps/pep-0008/
There is a useful pep8 command line tool for checking files for pep8
compliance which can be installed with ``easy_install pep8``.
------------------
Testing Guidelines
------------------
Swift has a comprehensive suite of tests that are run on all submitted code,
and it is recommended that developers execute the tests themselves to
catch regressions early. Developers are also expected to keep the
test suite up-to-date with any submitted code changes.
Swift has a comprehensive suite of tests and pep8 checks that are run on all
submitted code, and it is recommended that developers execute the tests
themselves to catch regressions early. Developers are also expected to keep
the test suite up-to-date with any submitted code changes.
Swift's suite of unit tests can be executed in an isolated environment
Swift's tests and pep8 checks can be executed in an isolated environment
with Tox: http://tox.testrun.org/
To execute the unit tests:
To execute the tests:
* Install Tox:
* Install Tox::
- `pip install tox`
pip install tox
* If you do not have python 2.6 installed (as in 12.04):
- Add `export TOXENV=py27,pep8` to your `~/.bashrc`
* Run Tox from the root of the swift repo::
- `. ~/.bashrc`
* Run Tox from the root of the swift repo:
- `tox`
tox
Remarks:
If you installed using: `cd ~/swift; sudo python setup.py develop`,
you may need to do: `cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`
prior to running tox.
If you installed using ``cd ~/swift; sudo python setup.py develop``, you may
need to do ``cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`` prior
to running tox.
* Optionally, run only specific tox builds:
* By default ``tox`` will run all of the unit test and pep8 checks listed in
the ``tox.ini`` file ``envlist`` option. A subset of the test environments
can be specified on the tox command line or by setting the ``TOXENV``
environment variable. For example, to run only the pep8 checks and python2.7
unit tests use::
- `tox -e pep8,py27`
tox -e pep8,py27
or::
TOXENV=py27,pep8 tox
.. note::
As of tox version 2.0.0, most environment variables are not automatically
passed to the test environment. Swift's `tox.ini` overrides this default
passed to the test environment. Swift's ``tox.ini`` overrides this default
behavior so that variable names matching ``SWIFT_*`` and ``*_proxy`` will be
passed, but you may need to run `tox --recreate` for this to take effect
passed, but you may need to run ``tox --recreate`` for this to take effect
after upgrading from tox<2.0.0.
Conversely, if you do not want those environment variables to be passed to
the test environment then you will need to unset them before calling tox.
the test environment then you will need to unset them before calling ``tox``.
Also, if you ever encounter DistributionNotFound, try to use `tox --recreate`
or remove the `.tox` directory to force tox to recreate the dependency list.
Also, if you ever encounter DistributionNotFound, try to use ``tox
--recreate`` or remove the ``.tox`` directory to force tox to recreate the
dependency list.
The functional tests may be executed against a :doc:`development_saio` or
other running Swift cluster using the command:
Swift's functional tests may be executed against a :doc:`development_saio` or
other running Swift cluster using the command::
- `tox -e func`
tox -e func
The endpoint and authorization credentials to be used by functional tests
should be configured in the ``test.conf`` file as described in the section

View File

@ -2,15 +2,53 @@
Identifying issues and resolutions
==================================
Is the system up?
-----------------
If you have a report that Swift is down, perform the following basic checks:
#. Run swift functional tests.
#. From a server in your data center, use ``curl`` to check ``/healthcheck``
(see below).
#. If you have a monitoring system, check your monitoring system.
#. Check your hardware load balancers infrastructure.
#. Run swift-recon on a proxy node.
Functional tests usage
-----------------------
We would recommend that you set up the functional tests to run against your
production system. Run regularly this can be a useful tool to validate
that the system is configured correctly. In addition, it can provide
early warning about failures in your system (if the functional tests stop
working, user applications will also probably stop working).
A script for running the function tests is located in ``swift/.functests``.
External monitoring
-------------------
We use pingdom.com to monitor the external Swift API. We suggest the
following:
- Do a GET on ``/healthcheck``
- Create a container, make it public (x-container-read:
.r*,.rlistings), create a small file in the container; do a GET
on the object
Diagnose: General approach
--------------------------
- Look at service status in your monitoring system.
- In addition to system monitoring tools and issue logging by users,
swift errors will often result in log entries in the ``/var/log/swift``
files: ``proxy.log``, ``server.log`` and ``background.log`` (see:``Swift
logs``).
swift errors will often result in log entries (see :ref:`swift_logs`).
- Look at any logs your deployment tool produces.
@ -33,22 +71,24 @@ Diagnose: Swift-dispersion-report
---------------------------------
The swift-dispersion-report is a useful tool to gauge the general
health of the system. Configure the ``swift-dispersion`` report for
100% coverage. The dispersion report regularly monitors
these and gives a report of the amount of objects/containers are still
available as well as how many copies of them are also there.
health of the system. Configure the ``swift-dispersion`` report to cover at
a minimum every disk drive in your system (usually 1% coverage).
See :ref:`dispersion_report` for details of how to configure and
use the dispersion reporting tool.
The dispersion-report output is logged on the first proxy of the first
AZ or each system (proxy with the monitoring role) under
``/var/log/swift/swift-dispersion-report.log``.
The ``swift-dispersion-report`` tool can take a long time to run, especially
if any servers are down. We suggest you run it regularly
(e.g., in a cron job) and save the results. This makes it easy to refer
to the last report without having to wait for a long-running command
to complete.
Diagnose: Is swift running?
---------------------------
Diagnose: Is system responding to /healthcheck?
-----------------------------------------------
When you want to establish if a swift endpoint is running, run ``curl -k``
against either: https://*[REPLACEABLE]*./healthcheck OR
https:*[REPLACEABLE]*.crossdomain.xml
against https://*[ENDPOINT]*/healthcheck.
.. _swift_logs:
Diagnose: Interpreting messages in ``/var/log/swift/`` files
------------------------------------------------------------
@ -70,25 +110,20 @@ The following table lists known issues:
- **Signature**
- **Issue**
- **Steps to take**
* - /var/log/syslog
- kernel: [] hpsa .... .... .... has check condition: unknown type:
Sense: 0x5, ASC: 0x20, ASC Q: 0x0 ....
- An unsupported command was issued to the storage hardware
- Understood to be a benign monitoring issue, ignore
* - /var/log/syslog
- kernel: [] sd .... [csbu:sd...] Sense Key: Medium Error
- Suggests disk surface issues
- Run swift diagnostics on the target node to check for disk errors,
- Run ``swift-drive-audit`` on the target node to check for disk errors,
repair disk errors
* - /var/log/syslog
- kernel: [] sd .... [csbu:sd...] Sense Key: Hardware Error
- Suggests storage hardware issues
- Run swift diagnostics on the target node to check for disk failures,
- Run diagnostics on the target node to check for disk failures,
replace failed disks
* - /var/log/syslog
- kernel: [] .... I/O error, dev sd.... ,sector ....
-
- Run swift diagnostics on the target node to check for disk errors
- Run diagnostics on the target node to check for disk errors
* - /var/log/syslog
- pound: NULL get_thr_arg
- Multiple threads woke up
@ -96,59 +131,61 @@ The following table lists known issues:
* - /var/log/swift/proxy.log
- .... ERROR .... ConnectionTimeout ....
- A storage node is not responding in a timely fashion
- Run swift diagnostics on the target node to check for node down,
node unconfigured, storage off-line or network issues between the
- Check if node is down, not running Swift,
unconfigured, storage off-line or for network issues between the
proxy and non responding node
* - /var/log/swift/proxy.log
- proxy-server .... HTTP/1.0 500 ....
- A proxy server has reported an internal server error
- Run swift diagnostics on the target node to check for issues
- Examine the logs for any errors at the time the error was reported to
attempt to understand the cause of the error.
* - /var/log/swift/server.log
- .... ERROR .... ConnectionTimeout ....
- A storage server is not responding in a timely fashion
- Run swift diagnostics on the target node to check for a node or
service, down, unconfigured, storage off-line or network issues
between the two nodes
- Check if node is down, not running Swift,
unconfigured, storage off-line or for network issues between the
server and non responding node
* - /var/log/swift/server.log
- .... ERROR .... Remote I/O error: '/srv/node/disk....
- A storage device is not responding as expected
- Run swift diagnostics and check the filesystem named in the error
for corruption (unmount & xfs_repair)
- Run ``swift-drive-audit`` and check the filesystem named in the error
for corruption (unmount & xfs_repair). Check if the filesystem
is mounted and working.
* - /var/log/swift/background.log
- object-server ERROR container update failed .... Connection refused
- Peer node is not responding
- Check status of the network and peer node
- A container server node could not be contacted
- Check if node is down, not running Swift,
unconfigured, storage off-line or for network issues between the
server and non responding node
* - /var/log/swift/background.log
- object-updater ERROR with remote .... ConnectionTimeout
-
- Check status of the network and peer node
- The remote container server is busy
- If the container is very large, some errors updating it can be
expected. However, this error can also occur if there is a networking
issue.
* - /var/log/swift/background.log
- account-reaper STDOUT: .... error: ECONNREFUSED
- Network connectivity issue
- Resolve network issue and re-run diagnostics
- Network connectivity issue or the target server is down.
- Resolve network issue or reboot the target server
* - /var/log/swift/background.log
- .... ERROR .... ConnectionTimeout
- A storage server is not responding in a timely fashion
- Run swift diagnostics on the target node to check for a node
or service, down, unconfigured, storage off-line or network issues
between the two nodes
- The target server may be busy. However, this error can also occur if
there is a networking issue.
* - /var/log/swift/background.log
- .... ERROR syncing .... Timeout
- A storage server is not responding in a timely fashion
- Run swift diagnostics on the target node to check for a node
or service, down, unconfigured, storage off-line or network issues
between the two nodes
- A timeout occurred syncing data to another node.
- The target server may be busy. However, this error can also occur if
there is a networking issue.
* - /var/log/swift/background.log
- .... ERROR Remote drive not mounted ....
- A storage server disk is unavailable
- Run swift diagnostics on the target node to check for a node or
service, failed or unmounted disk on the target, or a network issue
- Repair and remount the file system (on the remote node)
* - /var/log/swift/background.log
- object-replicator .... responded as unmounted
- A storage server disk is unavailable
- Run swift diagnostics on the target node to check for a node or
service, failed or unmounted disk on the target, or a network issue
* - /var/log/swift/\*.log
- Repair and remount the file system (on the remote node)
* - /var/log/swift/*.log
- STDOUT: EXCEPTION IN
- A unexpected error occurred
- Read the Traceback details, if it matches known issues
@ -157,19 +194,14 @@ The following table lists known issues:
* - /var/log/rsyncd.log
- rsync: mkdir "/disk....failed: No such file or directory....
- A local storage server disk is unavailable
- Run swift diagnostics on the node to check for a failed or
- Run diagnostics on the node to check for a failed or
unmounted disk
* - /var/log/swift*
- Exception: Could not bind to 0.0.0.0:600xxx
- Exception: Could not bind to 0.0.0.0:6xxx
- Possible Swift process restart issue. This indicates an old swift
process is still running.
- Run swift diagnostics, if some swift services are reported down,
- Restart Swift services. If some swift services are reported down,
check if they left residual process behind.
* - /var/log/rsyncd.log
- rsync: recv_generator: failed to stat "/disk....." (in object)
failed: Not a directory (20)
- Swift directory structure issues
- Run swift diagnostics on the node to check for issues
Diagnose: Parted reports the backup GPT table is corrupt
--------------------------------------------------------
@ -188,7 +220,7 @@ Diagnose: Parted reports the backup GPT table is corrupt
OK/Cancel?
To fix, go to: Fix broken GPT table (broken disk partition)
To fix, go to :ref:`fix_broken_gpt_table`
Diagnose: Drives diagnostic reports a FS label is not acceptable
@ -240,9 +272,10 @@ Diagnose: Failed LUNs
.. note::
The HPE Helion Public Cloud uses direct attach SmartArry
The HPE Helion Public Cloud uses direct attach SmartArray
controllers/drives. The information here is specific to that
environment.
environment. The hpacucli utility mentioned here may be called
hpssacli in your environment.
The ``swift_diagnostics`` mount checks may return a warning that a LUN has
failed, typically accompanied by DriveAudit check failures and device
@ -254,7 +287,7 @@ the procedure to replace the disk.
Otherwise the lun can be re-enabled as follows:
#. Generate a hpssacli diagnostic report. This report allows the swift
#. Generate a hpssacli diagnostic report. This report allows the DC
team to troubleshoot potential cabling or hardware issues so it is
imperative that you run it immediately when troubleshooting a failed
LUN. You will come back later and grep this file for more details, but
@ -262,8 +295,7 @@ Otherwise the lun can be re-enabled as follows:
.. code::
sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on \
xml=off zip=off
sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on xml=off zip=off
Export the following variables using the below instructions before
proceeding further.
@ -317,8 +349,7 @@ proceeding further.
.. code::
sudo hpssacli controller slot=1 ld ${LDRIVE} show detail \
grep -i "Disk Name"
sudo hpssacli controller slot=1 ld ${LDRIVE} show detail | grep -i "Disk Name"
#. Export the device name variable from the preceding command (example:
/dev/sdk):
@ -396,6 +427,8 @@ proceeding further.
should be checked. For example, log a DC ticket to check the sas cables
between the drive and the expander.
.. _diagnose_slow_disk_drives:
Diagnose: Slow disk devices
---------------------------
@ -404,7 +437,8 @@ Diagnose: Slow disk devices
collectl is an open-source performance gathering/analysis tool.
If the diagnostics report a message such as ``sda: drive is slow``, you
should log onto the node and run the following comand:
should log onto the node and run the following command (remove ``-c 1`` option to continuously monitor
the data):
.. code::
@ -431,13 +465,12 @@ should log onto the node and run the following comand:
dm-3 0 0 0 0 0 0 0 0 0 0 0 0 0
dm-4 0 0 0 0 0 0 0 0 0 0 0 0 0
dm-5 0 0 0 0 0 0 0 0 0 0 0 0 0
...
(repeats -- type Ctrl/C to stop)
Look at the ``Wait`` and ``SvcTime`` values. It is not normal for
these values to exceed 50msec. This is known to impact customer
performance (upload/download. For a controller problem, many/all drives
will show how wait and service times. A reboot may correct the prblem;
performance (upload/download). For a controller problem, many/all drives
will show long wait and service times. A reboot may correct the problem;
otherwise hardware replacement is needed.
Another way to look at the data is as follows:
@ -526,12 +559,12 @@ be disabled on a per-drive basis.
Diagnose: Slow network link - Measuring network performance
-----------------------------------------------------------
Network faults can cause performance between Swift nodes to degrade. The
following tests are recommended. Other methods (such as copying large
Network faults can cause performance between Swift nodes to degrade. Testing
with ``netperf`` is recommended. Other methods (such as copying large
files) may also work, but can produce inconclusive results.
Use netperf on all production systems. Install on all systems if not
already installed. And the UFW rules for its control port are in place.
Install ``netperf`` on all systems if not
already installed. Check that the UFW rules for its control port are in place.
However, there are no pre-opened ports for netperf's data connection. Pick a
port number. In this example, 12866 is used because it is one higher
than netperf's default control port number, 12865. If you get very
@ -542,7 +575,7 @@ command-line wrong.
Pick a ``source`` and ``target`` node. The source is often a proxy node
and the target is often an object node. Using the same source proxy you
can test communication to different object nodes in different AZs to
identity possible bottlekecks.
identity possible bottlenecks.
Running tests
^^^^^^^^^^^^^
@ -561,11 +594,11 @@ Running tests
#. On the ``source`` node, run the following command to check
throughput. Note the double-dash before the -P option.
The command takes 10 seconds to complete.
The command takes 10 seconds to complete. The ``target`` node is 192.168.245.5.
.. code::
$ netperf -H <redacted>.72.4
$ netperf -H 192.168.245.5 -- -P 12866
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to
<redacted>.72.4 (<redacted>.72.4) port 12866 AF_INET : demo
Recv Send Send
@ -578,7 +611,7 @@ Running tests
.. code::
$ netperf -H <redacted>.72.4 -t TCP_RR -- -P 12866
$ netperf -H 192.168.245.5 -t TCP_RR -- -P 12866
MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 12866
AF_INET to <redacted>.72.4 (<redacted>.72.4) port 12866 AF_INET : demo
: first burst 0
@ -763,7 +796,7 @@ Diagnose: High system latency
used by the monitor program happen to live on the bad object server.
- A general network problem within the data canter. Compare the results
with the Pingdom monitors too see if they also have a problem.
with the Pingdom monitors to see if they also have a problem.
Diagnose: Interface reports errors
----------------------------------
@ -802,59 +835,21 @@ If the nick supports self test, this can be performed with:
Self tests should read ``PASS`` if the nic is operating correctly.
Nic module drivers can be re-initialised by carefully removing and
re-installing the modules. Case in point being the mellanox drivers on
Swift Proxy servers. which use a two part driver mlx4_en and
re-installing the modules (this avoids rebooting the server).
For example, mellanox drivers use a two part driver mlx4_en and
mlx4_core. To reload these you must carefully remove the mlx4_en
(ethernet) then the mlx4_core modules, and reinstall them in the
reverse order.
As the interface will be disabled while the modules are unloaded, you
must be very careful not to lock the interface out. The following
script can be used to reload the melanox drivers, as a side effect, this
resets error counts on the interface.
Diagnose: CorruptDir diagnostic reports corrupt directories
-----------------------------------------------------------
From time to time Swift data structures may become corrupted by
misplaced files in filesystem locations that swift would normally place
a directory. This causes issues for swift when directory creation is
attempted at said location, it may fail due to the pre-existent file. If
the CorruptDir diagnostic reports Corrupt directories, they should be
checked to see if they exist.
Checking existence of entries
-----------------------------
Swift data filesystems are located under the ``/srv/node/disk``
mountpoints and contain accounts, containers and objects
subdirectories which in turn contain partition number subdirectories.
The partition number directories contain md5 hash subdirectories. md5
hash directories contain md5sum subdirectories. md5sum directories
contain the Swift data payload as either a database (.db), for
accounts and containers, or a data file (.data) for objects.
If the entries reported in diagnostics correspond to a partition
number, md5 hash or md5sum directory, check the entry with ``ls
-ld *entry*``.
If it turns out to be a file rather than a directory, it should be
carefully removed.
.. note::
Please do not ``ls`` the partition level directory contents, as
this *especially objects* may take a lot of time and system resources,
if you need to check the contents, use:
.. code::
echo /srv/node/disk#/type/partition#/
must be very careful not to lock yourself out so it may be better
to script this.
Diagnose: Hung swift object replicator
--------------------------------------
The swift diagnostic message ``Object replicator: remaining exceeds
100hrs:`` may indicate that the swift ``object-replicator`` is stuck and not
A replicator reports in its log that remaining time exceeds
100 hours. This may indicate that the swift ``object-replicator`` is stuck and not
making progress. Another useful way to check this is with the
'swift-recon -r' command on a swift proxy server:
@ -866,42 +861,41 @@ making progress. Another useful way to check this is with the
--> Starting reconnaissance on 384 hosts
===============================================================================
[2013-07-17 12:56:19] Checking on replication
http://<redacted>.72.63:6000/recon/replication: <urlopen error timed out>
[replication_time] low: 2, high: 80, avg: 28.8, total: 11037, Failed: 0.0%, no_result: 0, reported: 383
Oldest completion was 2013-06-12 22:46:50 (12 days ago) by <redacted>.31:6000.
Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by <redacted>.204.113:6000.
Oldest completion was 2013-06-12 22:46:50 (12 days ago) by 192.168.245.3:6000.
Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by 192.168.245.5:6000.
===============================================================================
The ``Oldest completion`` line in this example indicates that the
object-replicator on swift object server <redacted>.31 has not completed
object-replicator on swift object server 192.168.245.3 has not completed
the replication cycle in 12 days. This replicator is stuck. The object
replicator cycle is generally less than 1 hour. Though an replicator
cycle of 15-20 hours can occur if nodes are added to the system and a
new ring has been deployed.
You can further check if the object replicator is stuck by logging on
the the object server and checking the object replicator progress with
the object server and checking the object replicator progress with
the following command:
.. code::
# sudo grep object-rep /var/log/swift/background.log | grep -e "Starting object replication" -e "Object replication complete" -e "partitions rep"
Jul 16 06:25:46 <redacted> object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining)
Jul 16 06:30:46 <redacted> object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining)
Jul 16 06:35:46 <redacted> object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining)
Jul 16 06:40:46 <redacted> object-replicator 15344/16450 (93.28%) partitions replicated in 69918.73s (0.22/sec, 23h remaining)
Jul 16 06:45:46 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 70218.75s (0.22/sec, 24h remaining)
Jul 16 06:50:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 70518.85s (0.22/sec, 24h remaining)
Jul 16 06:55:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 70818.95s (0.22/sec, 25h remaining)
Jul 16 07:00:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 71119.05s (0.22/sec, 25h remaining)
Jul 16 07:05:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 71419.15s (0.21/sec, 26h remaining)
Jul 16 07:10:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 71719.25s (0.21/sec, 26h remaining)
Jul 16 07:15:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 72019.27s (0.21/sec, 27h remaining)
Jul 16 07:20:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 72319.37s (0.21/sec, 27h remaining)
Jul 16 07:25:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 72619.47s (0.21/sec, 28h remaining)
Jul 16 07:30:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 72919.56s (0.21/sec, 28h remaining)
Jul 16 07:35:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 73219.67s (0.21/sec, 29h remaining)
Jul 16 07:40:47 <redacted> object-replicator 15348/16450 (93.30%) partitions replicated in 73519.76s (0.21/sec, 29h remaining)
Jul 16 06:25:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining)
Jul 16 06:30:46 192.168.245.4object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining)
Jul 16 06:35:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining)
Jul 16 06:40:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69918.73s (0.22/sec, 23h remaining)
Jul 16 06:45:46 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70218.75s (0.22/sec, 24h remaining)
Jul 16 06:50:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 70518.85s (0.22/sec, 24h remaining)
Jul 16 06:55:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70818.95s (0.22/sec, 25h remaining)
Jul 16 07:00:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71119.05s (0.22/sec, 25h remaining)
Jul 16 07:05:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71419.15s (0.21/sec, 26h remaining)
Jul 16 07:10:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 71719.25s (0.21/sec, 26h remaining)
Jul 16 07:15:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72019.27s (0.21/sec, 27h remaining)
Jul 16 07:20:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 72319.37s (0.21/sec, 27h remaining)
Jul 16 07:25:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72619.47s (0.21/sec, 28h remaining)
Jul 16 07:30:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72919.56s (0.21/sec, 28h remaining)
Jul 16 07:35:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73219.67s (0.21/sec, 29h remaining)
Jul 16 07:40:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73519.76s (0.21/sec, 29h remaining)
The above status is output every 5 minutes to ``/var/log/swift/background.log``.
@ -921,7 +915,7 @@ of a corrupted filesystem detected by the object replicator:
.. code::
# sudo bzgrep "Remote I/O error" /var/log/swift/background.log* |grep srv | - tail -1
Jul 12 03:33:30 <redacted> object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File
Jul 12 03:33:30 192.168.245.4 object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File
"/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 199, in get_hashes#012 hashes[suffix] = hash_suffix(suffix_dir,
reclaim_age)#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 84, in hash_suffix#012 path_contents =
sorted(os.listdir(path))#012OSError: [Errno 121] Remote I/O error: '/srv/node/disk4/objects/1643763/b51'
@ -996,7 +990,7 @@ to repair the problem filesystem.
# sudo xfs_repair -P /dev/sde1
#. If the ``xfs_repair`` fails then it may be necessary to re-format the
filesystem. See Procedure: fix broken XFS filesystem. If the
filesystem. See :ref:`fix_broken_xfs_filesystem`. If the
``xfs_repair`` is successful, re-enable chef using the following command
and replication should commence again.
@ -1025,7 +1019,183 @@ load:
$ uptime
07:44:02 up 18:22, 1 user, load average: 407.12, 406.36, 404.59
.. toctree::
:maxdepth: 2
Further issues and resolutions
------------------------------
.. note::
The urgency levels in each **Action** column indicates whether or
not it is required to take immediate action, or if the problem can be worked
on during business hours.
.. list-table::
:widths: 33 33 33
:header-rows: 1
* - **Scenario**
- **Description**
- **Action**
* - ``/healthcheck`` latency is high.
- The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to
network issues, rather than the proxies being very busy. A very slow proxy might impact the average
number, but it would need to be very slow to shift the number that much.
- Check networks. Do a ``curl https://<ip-address>:<port>/healthcheck`` where
``ip-address`` is individual proxy IP address.
Repeat this for every proxy server to see if you can pin point the problem.
Urgency: If there are other indications that your system is slow, you should treat
this as an urgent problem.
* - Swift process is not running.
- You can use ``swift-init`` status to check if swift processes are running on any
given server.
- Run this command:
.. code::
sudo swift-init all start
Examine messages in the swift log files to see if there are any
error messages related to any of the swift processes since the time you
ran the ``swift-init`` command.
Take any corrective actions that seem necessary.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - ntpd is not running.
- NTP is not running.
- Configure and start NTP.
Urgency: For proxy servers, this is vital.
* - Host clock is not syncd to an NTP server.
- Node time settings does not match NTP server time.
This may take some time to sync after a reboot.
- Assuming NTP is configured and running, you have to wait until the times sync.
* - A swift process has hundreds, to thousands of open file descriptors.
- May happen to any of the swift processes.
Known to have happened with a ``rsyslod`` restart and where ``/tmp`` was hanging.
- Restart the swift processes on the affected node:
.. code::
% sudo swift-init all reload
Urgency:
If known performance problem: Immediate
If system seems fine: Medium
* - A swift process is not owned by the swift user.
- If the UID of the swift user has changed, then the processes might not be
owned by that UID.
- Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - Object account or container files not owned by swift.
- This typically happens if during a reinstall or a re-image of a server that the UID
of the swift user was changed. The data files in the object account and container
directories are owned by the original swift UID. As a result, the current swift
user does not own these files.
- Correct the UID of the swift user to reflect that of the original UID. An alternate
action is to change the ownership of every file on all file systems. This alternate
action is often impractical and will take considerable time.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - A disk drive has a high IO wait or service time.
- If high wait IO times are seen for a single disk, then the disk drive is the problem.
If most/all devices are slow, the controller is probably the source of the problem.
The controller cache may also be miss configured which will cause similar long
wait or service times.
- As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor
is working.
Second, reboot the server.
If problem persists, file a DC ticket to have the drive or controller replaced.
See :ref:`diagnose_slow_disk_drives` on how to check the drive wait or service times.
Urgency: Medium
* - The network interface is not up.
- Use the ``ifconfig`` and ``ethtool`` commands to determine the network state.
- You can try restarting the interface. However, generally the interface
(or cable) is probably broken, especially if the interface is flapping.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - Network interface card (NIC) is not operating at the expected speed.
- The NIC is running at a slower speed than its nominal rated speed.
For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC.
- 1. Try resetting the interface with:
.. code::
sudo ethtool -s eth0 speed 1000
... and then run:
.. code::
sudo lshw -class
See if size goes to the expected speed. Failing
that, check hardware (NIC cable/switch port).
2. If persistent, consider shutting down the server (especially if a proxy)
until the problem is identified and resolved. If you leave this server
running it can have a large impact on overall performance.
Urgency: High
* - The interface RX/TX error count is non-zero.
- A value of 0 is typical, but counts of 1 or 2 do not indicate a problem.
- 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range
3-30 probably indicate that the error count has crept up slowly over a long time.
Consider rebooting the server to remove the report from the noise.
Typically, when a cable or interface is bad, the error count goes to 400+. For example,
it stands out. There may be other symptoms such as the interface going up and down or
not running at correct speed. A server with a high error count should be watched.
2. If the error count continues to climb, consider taking the server down until
it can be properly investigated. In any case, a reboot should be done to clear
the error count.
Urgency: High, if the error count increasing.
* - In a swift log you see a message that a process has not replicated in over 24 hours.
- The replicator has not successfully completed a run in the last 24 hours.
This indicates that the replicator has probably hung.
- Use ``swift-init`` to stop and then restart the replicator process.
Urgency: Low. However if you
recently added or replaced disk drives then you should treat this urgently.
* - Container Updater has not run in 4 hour(s).
- The service may appear to be running however, it may be hung. Examine their swift
logs to see if there are any error messages relating to the container updater. This
may potentially explain why the container is not running.
- Urgency: Medium
This may have been triggered by a recent restart of the rsyslog daemon.
Restart the service with:
.. code::
sudo swift-init <service> reload
* - Object replicator: Reports the remaining time and that time is more than 100 hours.
- Each replication cycle the object replicator writes a log message to its log
reporting statistics about the current cycle. This includes an estimate for the
remaining time needed to replicate all objects. If this time is longer than
100 hours, there is a problem with the replication process.
- Urgency: Medium
Restart the service with:
.. code::
sudo swift-init object-replicator reload
Check that the remaining replication time is going down.
sec-furtherdiagnose.rst

View File

@ -1,36 +0,0 @@
==================
General Procedures
==================
Getting a swift account stats
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at
``swifty`` for an alternate, this is an example.
This procedure describes how you determine the swift usage for a given
swift account, that is the number of containers, number of objects and
total bytes used. To do this you will need the project ID.
Log onto one of the swift proxy servers.
Use swift-direct to show this accounts usage:
.. code::
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_redacted-9a11-45f8-aa1c-9e7b1c7904c8
Status: 200
Content-Length: 0
Accept-Ranges: bytes
X-Timestamp: 1379698586.88364
X-Account-Bytes-Used: 67440225625994
X-Account-Container-Count: 1
Content-Type: text/plain; charset=utf-8
X-Account-Object-Count: 8436776
Status: 200
name: my_container count: 8436776 bytes: 67440225625994
This account has 1 container. That container has 8436776 objects. The
total bytes used is 67440225625994.

View File

@ -13,67 +13,15 @@ information, suggestions or recommendations. This document are provided
for reference only. We are not responsible for your use of any
information, suggestions or recommendations contained herein.
This document also contains references to certain tools that we use to
operate the Swift system within the HPE Helion Public Cloud.
Descriptions of these tools are provided for reference only, as the tools themselves
are not publically available at this time.
- ``swift-direct``: This is similar to the ``swiftly`` tool.
.. toctree::
:maxdepth: 2
general.rst
diagnose.rst
procedures.rst
maintenance.rst
troubleshooting.rst
Is the system up?
~~~~~~~~~~~~~~~~~
If you have a report that Swift is down, perform the following basic checks:
#. Run swift functional tests.
#. From a server in your data center, use ``curl`` to check ``/healthcheck``.
#. If you have a monitoring system, check your monitoring system.
#. Check on your hardware load balancers infrastructure.
#. Run swift-recon on a proxy node.
Run swift function tests
------------------------
We would recommend that you set up your function tests against your production
system.
A script for running the function tests is located in ``swift/.functests``.
External monitoring
-------------------
- We use pingdom.com to monitor the external Swift API. We suggest the
following:
- Do a GET on ``/healthcheck``
- Create a container, make it public (x-container-read:
.r\*,.rlistings), create a small file in the container; do a GET
on the object
Reference information
~~~~~~~~~~~~~~~~~~~~~
Reference: Swift startup/shutdown
---------------------------------
- Use reload - not stop/start/restart.
- Try to roll sets of servers (especially proxy) in groups of less
than 20% of your servers.

View File

@ -54,8 +54,8 @@ system. Rules-of-thumb for 'good' recon output are:
.. code::
\-> [http://<redacted>.29:6000/recon/load:] <urlopen error [Errno 111] ECONNREFUSED>
\-> [http://<redacted>.31:6000/recon/load:] <urlopen error timed out>
-> [http://<redacted>.29:6000/recon/load:] <urlopen error [Errno 111] ECONNREFUSED>
-> [http://<redacted>.31:6000/recon/load:] <urlopen error timed out>
- That could be okay or could require investigation.
@ -154,18 +154,18 @@ Running reccon shows some async pendings:
.. code::
bob@notso:~/swift-1.4.4/swift$ ssh \\-q <redacted>.132.7 sudo swift-recon \\-alr
bob@notso:~/swift-1.4.4/swift$ ssh -q <redacted>.132.7 sudo swift-recon -alr
===============================================================================
\[2012-03-14 17:25:55\\] Checking async pendings on 384 hosts...
[2012-03-14 17:25:55] Checking async pendings on 384 hosts...
Async stats: low: 0, high: 23, avg: 8, total: 3356
===============================================================================
\[2012-03-14 17:25:55\\] Checking replication times on 384 hosts...
\[Replication Times\\] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066
[2012-03-14 17:25:55] Checking replication times on 384 hosts...
[Replication Times] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066
===============================================================================
\[2012-03-14 17:25:56\\] Checking load avg's on 384 hosts...
\[5m load average\\] lowest: 2.35, highest: 8.88, avg: 4.45911458333
\[15m load average\\] lowest: 2.41, highest: 9.11, avg: 4.504765625
\[1m load average\\] lowest: 1.95, highest: 8.56, avg: 4.40588541667
[2012-03-14 17:25:56] Checking load avg's on 384 hosts...
[5m load average] lowest: 2.35, highest: 8.88, avg: 4.45911458333
[15m load average] lowest: 2.41, highest: 9.11, avg: 4.504765625
[1m load average] lowest: 1.95, highest: 8.56, avg: 4.40588541667
===============================================================================
Why? Running recon again with -av swift (not shown here) tells us that
@ -231,7 +231,7 @@ Procedure
This procedure should be run three times, each time specifying the
appropriate ``*.builder`` file.
#. Determine whether all three nodes are different Swift zones by
#. Determine whether all three nodes are in different Swift zones by
running the ring builder on a proxy node to determine which zones
the storage nodes are in. For example:
@ -241,22 +241,22 @@ Procedure
/etc/swift/object.builder, build version 1467
2097152 partitions, 3 replicas, 5 zones, 1320 devices, 0.02 balance
The minimum number of hours before a partition can be reassigned is 24
Devices: id zone ip address port name weight partitions balance meta
0 1 <redacted>.4 6000 disk0 1708.00 4259 -0.00
1 1 <redacted>.4 6000 disk1 1708.00 4260 0.02
2 1 <redacted>.4 6000 disk2 1952.00 4868 0.01
3 1 <redacted>.4 6000 disk3 1952.00 4868 0.01
4 1 <redacted>.4 6000 disk4 1952.00 4867 -0.01
Devices: id zone ip address port name weight partitions balance meta
0 1 <redacted>.4 6000 disk0 1708.00 4259 -0.00
1 1 <redacted>.4 6000 disk1 1708.00 4260 0.02
2 1 <redacted>.4 6000 disk2 1952.00 4868 0.01
3 1 <redacted>.4 6000 disk3 1952.00 4868 0.01
4 1 <redacted>.4 6000 disk4 1952.00 4867 -0.01
#. Here, node <redacted>.4 is in zone 1. If two or more of the three
nodes under consideration are in the same Swift zone, they do not
have any ring partitions in common; there is little/no data
availability risk if all three nodes are down.
#. If the nodes are in three distinct Swift zonesit is necessary to
#. If the nodes are in three distinct Swift zones it is necessary to
whether the nodes have ring partitions in common. Run ``swift-ring``
builder again, this time with the ``list_parts`` option and specify
the nodes under consideration. For example (all on one line):
the nodes under consideration. For example:
.. code::
@ -302,12 +302,12 @@ Procedure
.. code::
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2 | grep “3$” - wc \\-l
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2 | grep "3$" | wc -l
30
#. In this case the nodes have 30 out of a total of 2097152 partitions
in common; about 0.001%. In this case the risk is small nonzero.
in common; about 0.001%. In this case the risk is small/nonzero.
Recall that a partition is simply a portion of the ring mapping
space, not actual data. So having partitions in common is a necessary
but not sufficient condition for data unavailability.
@ -320,3 +320,11 @@ Procedure
If three nodes that have 3 partitions in common are all down, there is
a nonzero probability that data are unavailable and we should work to
bring some or all of the nodes up ASAP.
Swift startup/shutdown
~~~~~~~~~~~~~~~~~~~~~~
- Use reload - not stop/start/restart.
- Try to roll sets of servers (especially proxy) in groups of less
than 20% of your servers.

View File

@ -2,6 +2,8 @@
Software configuration procedures
=================================
.. _fix_broken_gpt_table:
Fix broken GPT table (broken disk partition)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -102,6 +104,8 @@ Fix broken GPT table (broken disk partition)
$ sudo aptitude remove gdisk
.. _fix_broken_xfs_filesystem:
Procedure: Fix broken XFS filesystem
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -165,7 +169,7 @@ Procedure: Fix broken XFS filesystem
.. code::
$ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024\*1024)) count=1
$ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024*1024)) count=1
1+0 records in
1+0 records out
1048576 bytes (1.0 MB) copied, 0.00480617 s, 218 MB/s
@ -187,129 +191,173 @@ Procedure: Fix broken XFS filesystem
$ mount
.. _checking_if_account_ok:
Procedure: Checking if an account is okay
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
``swift-direct`` is only available in the HPE Helion Public Cloud.
Use ``swiftly`` as an alternate.
Use ``swiftly`` as an alternate (or use ``swift-get-nodes`` as explained
here).
If you have a tenant ID you can check the account is okay as follows from a proxy.
You must know the tenant/project ID. You can check if the account is okay as follows from a proxy.
.. code::
$ sudo -u swift /opt/hp/swift/bin/swift-direct show <Api-Auth-Hash-or-TenantId>
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_<project-id>
The response will either be similar to a swift list of the account
containers, or an error indicating that the resource could not be found.
In the latter case you can establish if a backend database exists for
the tenantId by running the following on a proxy:
Alternatively, you can use ``swift-get-nodes`` to find the account database
files. Run the following on a proxy:
.. code::
$ sudo -u swift swift-get-nodes /etc/swift/account.ring.gz <Api-Auth-Hash-or-TenantId>
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_<project-id>
The response will list ssh commands that will list the replicated
account databases, if they exist.
The response will print curl/ssh commands that will list the replicated
account databases. Use the indicated ``curl`` or ``ssh`` commands to check
the status and existence of the account.
Procedure: Getting swift account stats
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at
``swifty`` for an alternate or use ``swift-get-nodes`` as explained
in :ref:`checking_if_account_ok`.
This procedure describes how you determine the swift usage for a given
swift account, that is the number of containers, number of objects and
total bytes used. To do this you will need the project ID.
Log onto one of the swift proxy servers.
Use swift-direct to show this accounts usage:
.. code::
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_<project-id>
Status: 200
Content-Length: 0
Accept-Ranges: bytes
X-Timestamp: 1379698586.88364
X-Account-Bytes-Used: 67440225625994
X-Account-Container-Count: 1
Content-Type: text/plain; charset=utf-8
X-Account-Object-Count: 8436776
Status: 200
name: my_container count: 8436776 bytes: 67440225625994
This account has 1 container. That container has 8436776 objects. The
total bytes used is 67440225625994.
Procedure: Revive a deleted account
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Swift accounts are normally not recreated. If a tenant unsubscribes from
Swift, the account is deleted. To re-subscribe to Swift, you can create
a new tenant (new tenant ID), and subscribe to Swift. This creates a
new Swift account with the new tenant ID.
Swift accounts are normally not recreated. If a tenant/project is deleted,
the account can then be deleted. If the user wishes to use Swift again,
the normal process is to create a new tenant/project -- and hence a
new Swift account.
However, until the unsubscribe/new tenant process is supported, you may
hit a situation where a Swift account is deleted and the user is locked
out of Swift.
However, if the Swift account is deleted, but the tenant/project is not
deleted from Keystone, the user can no longer access the account. This
is because the account is marked deleted in Swift. You can revive
the account as described in this process.
Deleting the account database files
-----------------------------------
.. note::
Here is one possible solution. The containers and objects may be lost
forever. The solution is to delete the account database files and
re-create the account. This may only be done once the containers and
objects are completely deleted. This process is untested, but could
work as follows:
The containers and objects in the "old" account cannot be listed
anymore. In addition, if the Account Reaper process has not
finished reaping the containers and objects in the "old" account, these
are effectively orphaned and it is virtually impossible to find and delete
them to free up disk space.
#. Use swift-get-nodes to locate the account's database file (on three
servers).
The solution is to delete the account database files and
re-create the account as follows:
#. Rename the database files (on three servers).
#. You must know the tenant/project ID. The account name is AUTH_<project-id>.
In this example, the tenant/project is is ``4ebe3039674d4864a11fe0864ae4d905``
so the Swift account name is ``AUTH_4ebe3039674d4864a11fe0864ae4d905``.
#. Use ``swiftly`` to create the account (use original name).
Renaming account database so it can be revived
----------------------------------------------
Get the locations of the database files that hold the account data.
#. Use ``swift-get-nodes`` to locate the account's database files (on three
servers). The output has been truncated so we can focus on the import pieces
of data:
.. code::
sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-1856-44ae-97db-31242f7ad7a1
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_4ebe3039674d4864a11fe0864ae4d905
...
curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905"
curl -I -XHEAD "http://192.168.245.3:6002/disk0/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905"
curl -I -XHEAD "http://192.168.245.4:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905"
...
Use your own device location of servers:
such as "export DEVICE=/srv/node"
ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052"
ssh 192.168.245.3 "ls -lah ${DEVICE:-/srv/node*}/disk0/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052"
ssh 192.168.245.4 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052"
...
note: `/srv/node*` is used as default value of `devices`, the real value is set in the config file on each storage node.
Account AUTH_redacted-1856-44ae-97db-31242f7ad7a1
Container None
Object None
#. Before proceeding check that the account is really deleted by using curl. Execute the
commands printed by ``swift-get-nodes``. For example:
Partition 18914
.. code::
Hash 93c41ef56dd69173a9524193ab813e78
$ curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905"
HTTP/1.1 404 Not Found
Content-Length: 0
Content-Type: text/html; charset=utf-8
Server:Port Device 15.184.9.126:6002 disk7
Server:Port Device 15.184.9.94:6002 disk11
Server:Port Device 15.184.9.103:6002 disk10
Server:Port Device 15.184.9.80:6002 disk2 [Handoff]
Server:Port Device 15.184.9.120:6002 disk2 [Handoff]
Server:Port Device 15.184.9.98:6002 disk2 [Handoff]
Repeat for the other two servers (192.168.245.3 and 192.168.245.4).
A ``404 Not Found`` indicates that the account is deleted (or never existed).
curl -I -XHEAD "`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.126:6002/disk7/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
curl -I -XHEAD "`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.94:6002/disk11/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
If you get a ``204 No Content`` response, do **not** proceed.
curl -I -XHEAD "`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.103:6002/disk10/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
#. Use the ssh commands printed by ``swift-get-nodes`` to check if database
files exist. For example:
curl -I -XHEAD "`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.80:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
curl -I -XHEAD "`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.120:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
curl -I -XHEAD "`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.98:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
.. code::
ssh 15.184.9.126 "ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
ssh 15.184.9.103 "ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
ssh 15.184.9.120 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
$ ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052"
total 20K
drwxr-xr-x 2 swift swift 110 Mar 9 10:22 .
drwxr-xr-x 3 swift swift 45 Mar 9 10:18 ..
-rw------- 1 swift swift 17K Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db
-rw-r--r-- 1 swift swift 0 Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db.pending
-rwxr-xr-x 1 swift swift 0 Mar 9 10:18 .lock
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH\_redacted-1856-44ae-97db-31242f7ad7a1Account AUTH_redacted-1856-44ae-97db-
31242f7ad7a1Container NoneObject NonePartition 18914Hash 93c41ef56dd69173a9524193ab813e78Server:Port Device 15.184.9.126:6002 disk7Server:Port Device 15.184.9.94:6002 disk11Server:Port Device 15.184.9.103:6002 disk10Server:Port Device 15.184.9.80:6002
disk2 [Handoff]Server:Port Device 15.184.9.120:6002 disk2 [Handoff]Server:Port Device 15.184.9.98:6002 disk2 [Handoff]curl -I -XHEAD
"`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"*<http://15.184.9.126:6002/disk7/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
Repeat for the other two servers (192.168.245.3 and 192.168.245.4).
"`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.94:6002/disk11/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
If no files exist, no further action is needed.
"`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.103:6002/disk10/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
#. Stop Swift processes on all nodes listed by ``swift-get-nodes``
(In this example, that is 192.168.245.3, 192.168.245.4 and 192.168.245.5).
"`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.80:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]curl -I -XHEAD
#. We recommend you make backup copies of the database files.
"`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.120:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]curl -I -XHEAD
#. Delete the database files. For example:
"`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.98:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]ssh 15.184.9.126
.. code::
"ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.103
"ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.120
"ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
$ ssh 192.168.245.5
$ cd /srv/node/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052
$ sudo rm *
Check that the handoff nodes do not have account databases:
Repeat for the other two servers (192.168.245.3 and 192.168.245.4).
.. code::
#. Restart Swift on all three servers
$ ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
ls: cannot access /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/: No such file or directory
At this stage, the account is fully deleted. If you enable the auto-create option, the
next time the user attempts to access the account, the account will be created.
You may also use swiftly to recreate the account.
If the handoff node has a database, wait for rebalancing to occur.
Procedure: Temporarily stop load balancers from directing traffic to a proxy server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -319,7 +367,7 @@ follows. This can be useful when a proxy is misbehaving but you need
Swift running to help diagnose the problem. By removing from the load
balancers, customer's are not impacted by the misbehaving proxy.
#. Ensure that in proxyserver.com the ``disable_path`` variable is set to
#. Ensure that in /etc/swift/proxy-server.conf the ``disable_path`` variable is set to
``/etc/swift/disabled-by-file``.
#. Log onto the proxy node.
@ -330,9 +378,9 @@ balancers, customer's are not impacted by the misbehaving proxy.
sudo swift-init proxy shutdown
.. note::
.. note::
Shutdown, not stop.
Shutdown, not stop.
#. Create the ``/etc/swift/disabled-by-file`` file. For example:
@ -346,13 +394,10 @@ balancers, customer's are not impacted by the misbehaving proxy.
sudo swift-init proxy start
It works because the healthcheck middleware looks for this file. If it
find it, it will return 503 error instead of 200/OK. This means the load balancer
It works because the healthcheck middleware looks for /etc/swift/disabled-by-file.
If it exists, the middleware will return 503/error instead of 200/OK. This means the load balancer
should stop sending traffic to the proxy.
``/healthcheck`` will report
``FAIL: disabled by file`` if the ``disabled-by-file`` file exists.
Procedure: Ad-Hoc disk performance test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -1,177 +0,0 @@
==============================
Further issues and resolutions
==============================
.. note::
The urgency levels in each **Action** column indicates whether or
not it is required to take immediate action, or if the problem can be worked
on during business hours.
.. list-table::
:widths: 33 33 33
:header-rows: 1
* - **Scenario**
- **Description**
- **Action**
* - ``/healthcheck`` latency is high.
- The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to
network issues, rather than the proxies being very busy. A very slow proxy might impact the average
number, but it would need to be very slow to shift the number that much.
- Check networks. Do a ``curl https://<ip-address>/healthcheck where ip-address`` is individual proxy
IP address to see if you can pin point a problem in the network.
Urgency: If there are other indications that your system is slow, you should treat
this as an urgent problem.
* - Swift process is not running.
- You can use ``swift-init`` status to check if swift processes are running on any
given server.
- Run this command:
.. code::
sudo swift-init all start
Examine messages in the swift log files to see if there are any
error messages related to any of the swift processes since the time you
ran the ``swift-init`` command.
Take any corrective actions that seem necessary.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - ntpd is not running.
- NTP is not running.
- Configure and start NTP.
Urgency: For proxy servers, this is vital.
* - Host clock is not syncd to an NTP server.
- Node time settings does not match NTP server time.
This may take some time to sync after a reboot.
- Assuming NTP is configured and running, you have to wait until the times sync.
* - A swift process has hundreds, to thousands of open file descriptors.
- May happen to any of the swift processes.
Known to have happened with a ``rsyslod restart`` and where ``/tmp`` was hanging.
- Restart the swift processes on the affected node:
.. code::
% sudo swift-init all reload
Urgency:
If known performance problem: Immediate
If system seems fine: Medium
* - A swift process is not owned by the swift user.
- If the UID of the swift user has changed, then the processes might not be
owned by that UID.
- Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - Object account or container files not owned by swift.
- This typically happens if during a reinstall or a re-image of a server that the UID
of the swift user was changed. The data files in the object account and container
directories are owned by the original swift UID. As a result, the current swift
user does not own these files.
- Correct the UID of the swift user to reflect that of the original UID. An alternate
action is to change the ownership of every file on all file systems. This alternate
action is often impractical and will take considerable time.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - A disk drive has a high IO wait or service time.
- If high wait IO times are seen for a single disk, then the disk drive is the problem.
If most/all devices are slow, the controller is probably the source of the problem.
The controller cache may also be miss configured which will cause similar long
wait or service times.
- As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor
is working.
Second, reboot the server.
If problem persists, file a DC ticket to have the drive or controller replaced.
See `Diagnose: Slow disk devices` on how to check the drive wait or service times.
Urgency: Medium
* - The network interface is not up.
- Use the ``ifconfig`` and ``ethtool`` commands to determine the network state.
- You can try restarting the interface. However, generally the interface
(or cable) is probably broken, especially if the interface is flapping.
Urgency: If this only affects one server, and you have more than one,
identifying and fixing the problem can wait until business hours.
If this same problem affects many servers, then you need to take corrective
action immediately.
* - Network interface card (NIC) is not operating at the expected speed.
- The NIC is running at a slower speed than its nominal rated speed.
For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC.
- 1. Try resetting the interface with:
.. code::
sudo ethtool -s eth0 speed 1000
... and then run:
.. code::
sudo lshw -class
See if size goes to the expected speed. Failing
that, check hardware (NIC cable/switch port).
2. If persistent, consider shutting down the server (especially if a proxy)
until the problem is identified and resolved. If you leave this server
running it can have a large impact on overall performance.
Urgency: High
* - The interface RX/TX error count is non-zero.
- A value of 0 is typical, but counts of 1 or 2 do not indicate a problem.
- 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range
3-30 probably indicate that the error count has crept up slowly over a long time.
Consider rebooting the server to remove the report from the noise.
Typically, when a cable or interface is bad, the error count goes to 400+. For example,
it stands out. There may be other symptoms such as the interface going up and down or
not running at correct speed. A server with a high error count should be watched.
2. If the error count continue to climb, consider taking the server down until
it can be properly investigated. In any case, a reboot should be done to clear
the error count.
Urgency: High, if the error count increasing.
* - In a swift log you see a message that a process has not replicated in over 24 hours.
- The replicator has not successfully completed a run in the last 24 hours.
This indicates that the replicator has probably hung.
- Use ``swift-init`` to stop and then restart the replicator process.
Urgency: Low (high if recent adding or replacement of disk drives), however if you
recently added or replaced disk drives then you should treat this urgently.
* - Container Updater has not run in 4 hour(s).
- The service may appear to be running however, it may be hung. Examine their swift
logs to see if there are any error messages relating to the container updater. This
may potentially explain why the container is not running.
- Urgency: Medium
This may have been triggered by a recent restart of the rsyslog daemon.
Restart the service with:
.. code::
sudo swift-init <service> reload
* - Object replicator: Reports the remaining time and that time is more than 100 hours.
- Each replication cycle the object replicator writes a log message to its log
reporting statistics about the current cycle. This includes an estimate for the
remaining time needed to replicate all objects. If this time is longer than
100 hours, there is a problem with the replication process.
- Urgency: Medium
Restart the service with:
.. code::
sudo swift-init object-replicator reload
Check that the remaining replication time is going down.

View File

@ -18,16 +18,14 @@ files. For example:
.. code::
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139
4-11,132-139] 'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\*'
dshbak -c
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139] \
'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log*' | dshbak -c
.
.
\---------------\-
----------------
<redacted>.132.6
\---------------\-
----------------
Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server <redacted>.16.132
<redacted>.66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af
/%3Fformat%3Djson HTTP/1.0 404 - - <REDACTED>_4f4d50c5e4b064d88bd7ab82 - - -
@ -37,52 +35,49 @@ This shows a ``GET`` operation on the users account.
.. note::
The HTTP status returned is 404, not found, rather than 500 as reported by the user.
The HTTP status returned is 404, Not found, rather than 500 as reported by the user.
Using the transaction ID, ``tx429fc3be354f434ab7f9c6c4206c1dc3`` you can
search the swift object servers log files for this transaction ID:
.. code::
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername>
-R ssh
-w <redacted>.72.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.204.[4-131| 4-131]
'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*'
| dshbak -c
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.72.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.204.[4-131] \
'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*' | dshbak -c
.
.
\---------------\-
----------------
<redacted>.72.16
\---------------\-
----------------
Feb 29 08:51:57 sw-aw2az1-object013 account-server <redacted>.132.6 - -
[29/Feb/2012:08:51:57 +0000|] "GET /disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-"
0.0016 ""
\---------------\-
<redacted>.31
\---------------\-
Feb 29 08:51:57 node-az2-object060 account-server <redacted>.132.6 - -
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 ""
\---------------\-
<redacted>.204.70
\---------------\-
----------------
<redacted>.31
----------------
Feb 29 08:51:57 node-az2-object060 account-server <redacted>.132.6 - -
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 ""
----------------
<redacted>.204.70
----------------
Feb 29 08:51:57 sw-aw2az3-object0067 account-server <redacted>.132.6 - -
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 ""
Feb 29 08:51:57 sw-aw2az3-object0067 account-server <redacted>.132.6 - -
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 ""
.. note::
The 3 GET operations to 3 different object servers that hold the 3
replicas of this users account. Each ``GET`` returns a HTTP status of 404,
not found.
Not found.
Next, use the ``swift-get-nodes`` command to determine exactly where the
users account data is stored:
user's account data is stored:
.. code::
@ -114,23 +109,23 @@ users account data is stored:
curl -I -XHEAD "`http://<redacted>.72.27:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
<http://15.185.72.27:6002/disk11/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_ # [Handoff]
ssh <redacted>.31 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.204.70 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.72.16 "ls \-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.204.64 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
ssh <redacted>.26 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
ssh <redacted>.72.27 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
ssh <redacted>.31 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.204.70 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.72.16 "ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
ssh <redacted>.204.64 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
ssh <redacted>.26 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
ssh <redacted>.72.27 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
Check each of the primary servers, <redacted>.31, <redacted>.204.70 and <redacted>.72.16, for
this users account. For example on <redacted>.72.16:
.. code::
$ ls \\-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/
$ ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/
total 1.0M
drwxrwxrwx 2 swift swift 98 2012-02-23 14:49 .
drwxrwxrwx 3 swift swift 45 2012-02-03 23:28 ..
-rw-\\-----\\- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db
-rw------- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db
-rw-rw-rw- 1 swift swift 0 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db.pending
So this users account db, an sqlite db is present. Use sqlite to
@ -155,7 +150,7 @@ checkout the account:
status_changed_at = 1330001026.00514
metadata =
.. note::
.. note:
The status is ``DELETED``. So this account was deleted. This explains
why the GET operations are returning 404, not found. Check the account
@ -174,14 +169,14 @@ server logs:
.. code::
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh -w <redacted>.68.[4-11,132-139 4-11,132-
139],<redacted>.132.[4-11,132-139|4-11,132-139] 'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\* | grep -w
DELETE |awk "{print \\$3,\\$10,\\$12}"' |- dshbak -c
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139|4-11,132-139] \
'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log* \
| grep -w DELETE | awk "{print $3,$10,$12}"' |- dshbak -c
.
.
Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server 15.203.233.76 <redacted>.66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb-
Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server <redacted> <redacted>.66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb-
52ddda82a5af/ HTTP/1.0 204 - Apache-HttpClient/4.1.2%20%28java%201.5%29 <REDACTED>_4f458ee4e4b02a869c3aad02 - - -
tx4471188b0b87406899973d297c55ab53 - 0.0086
From this you can see the operation that resulted in the account being deleted.
@ -252,8 +247,8 @@ Finally, use ``swift-direct`` to delete the container.
Procedure: Decommissioning swift nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Should Swift nodes need to be decommissioned. For example, where they are being
re-purposed, it is very important to follow the following steps.
Should Swift nodes need to be decommissioned (e.g.,, where they are being
re-purposed), it is very important to follow the following steps.
#. In the case of object servers, follow the procedure for removing
the node from the rings.

View File

@ -154,11 +154,14 @@ add the configuration for the authtoken middleware::
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
identity_uri = http://keystonehost:35357/
admin_tenant_name = service
admin_user = swift
admin_password = password
auth_uri = http://keystonehost:5000/
auth_url = http://keystonehost:35357/
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = swift
password = password
cache = swift.cache
include_service_catalog = False
delay_auth_decision = True
@ -166,16 +169,17 @@ add the configuration for the authtoken middleware::
The actual values for these variables will need to be set depending on
your situation, but in short:
* ``identity_uri`` points to the Keystone Admin service. This information is
used by the middleware to actually query Keystone about the validity of the
authentication tokens. It is not necessary to append any Keystone API version
number to this URI.
* The admin auth credentials (``admin_user``, ``admin_tenant_name``,
``admin_password``) will be used to retrieve an admin token. That
token will be used to authorize user tokens behind the scenes.
* ``auth_uri`` should point to a Keystone service from which users may
retrieve tokens. This value is used in the `WWW-Authenticate` header that
auth_token sends with any denial response.
* ``auth_url`` points to the Keystone Admin service. This information is
used by the middleware to actually query Keystone about the validity of the
authentication tokens. It is not necessary to append any Keystone API version
number to this URI.
* The auth credentials (``project_domain_id``, ``user_domain_id``,
``username``, ``project_name``, ``password``) will be used to retrieve an
admin token. That token will be used to authorize user tokens behind the
scenes.
* ``cache`` is set to ``swift.cache``. This means that the middleware
will get the Swift memcache from the request environment.
* ``include_service_catalog`` defaults to ``True`` if not set. This means

View File

@ -12,13 +12,6 @@ configure their cluster to allow/accept sync requests to/from other clusters,
and the user specifies where to sync their container to along with a secret
synchronization key.
.. note::
Container sync will sync object POSTs only if the proxy server is set to
use "object_post_as_copy = true" which is the default. So-called fast
object posts, "object_post_as_copy = false" do not update the container
listings and therefore can't be detected for synchronization.
.. note::
If you are using the large objects feature you will need to ensure both
@ -386,13 +379,6 @@ from ``sync-containers``.
cluster. Therefore, the container servers must be permitted to initiate
outbound connections to the remote proxy servers (or load balancers).
.. note::
Container sync will sync object POSTs only if the proxy server is set to
use "object_post_as_copy = true" which is the default. So-called fast
object posts, "object_post_as_copy = false" do not update the container
listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to

View File

@ -182,17 +182,13 @@ similar to that of replication with a few notable exceptions:
Performance Considerations
--------------------------
Efforts are underway to characterize performance of various Erasure Code
schemes. One of the main goals of the beta release is to perform this
characterization and encourage others to do so and provide meaningful feedback
to the development community. There are many factors that will affect
performance of EC so it is vital that we have multiple characterization
activities happening.
In general, EC has different performance characteristics than replicated data.
EC requires substantially more CPU to read and write data, and is more suited
for larger objects that are not frequently accessed (eg backups).
Operators are encouraged to characterize the performance of various EC schemes
and share their observations with the developer community.
----------------------------
Using an Erasure Code Policy
----------------------------

View File

@ -37,8 +37,7 @@ There are many reasons why this might be desirable:
.. note::
Today, Swift supports two different policy types: Replication and Erasure
Code. Erasure Code policy is currently a beta release and should not be
used in a Production cluster. See :doc:`overview_erasure_code` for details.
Code. See :doc:`overview_erasure_code` for details.
Also note that Diskfile refers to backend object storage plug-in
architecture. See :doc:`development_ondisk_backends` for details.

View File

@ -103,10 +103,14 @@ meta string A general-use field for storing additional information for the
====== ======= ==============================================================
Note: The list of devices may contain holes, or indexes set to None, for
devices that have been removed from the cluster. Generally, device ids are not
reused. Also, some devices may be temporarily disabled by setting their weight
to 0.0. To obtain a list of active devices (for uptime polling, for example)
the Python code would look like: ``devices = list(self._iter_devs())``
devices that have been removed from the cluster. However, device ids are
reused. Device ids are reused to avoid potentially running out of device id
slots when there are available slots (from prior removal of devices). A
consequence of this device id reuse is that the device id (integer value) does
not necessarily correspond with the chronology of when the device was added to
the ring. Also, some devices may be temporarily disabled by setting their
weight to 0.0. To obtain a list of active devices (for uptime polling, for
example) the Python code would look like: ``devices = list(self._iter_devs())``
*************************
Partition Assignment List

View File

@ -40,7 +40,7 @@
# The endpoint is what the container sync daemon will use when sending out
# requests to that cluster. Keep in mind this endpoint must be reachable by all
# container servers, since that is where the container sync daemon runs. Note
# the the endpoint ends with /v1/ and that the container sync daemon will then
# that the endpoint ends with /v1/ and that the container sync daemon will then
# add the account/container/obj name after that.
#
# Distribute this container-sync-realms.conf file to all your proxy servers

View File

@ -118,14 +118,15 @@ use = egg:swift#object
# should not specify any value for "replication_server".
# replication_server = false
#
# Set to restrict the number of concurrent incoming REPLICATION requests
# Set to restrict the number of concurrent incoming SSYNC requests
# Set to 0 for unlimited
# Note that REPLICATION is currently an ssync only item
# Note that SSYNC requests are only used by the object reconstructor or the
# object replicator when configured to use ssync.
# replication_concurrency = 4
#
# Restricts incoming REPLICATION requests to one per device,
# Restricts incoming SSYNC requests to one per device,
# replication_currency above allowing. This can help control I/O to each
# device, but you may wish to set this to False to allow multiple REPLICATION
# device, but you may wish to set this to False to allow multiple SSYNC
# requests (up to the above replication_concurrency setting) per device.
# replication_one_per_device = True
#
@ -133,8 +134,8 @@ use = egg:swift#object
# giving up.
# replication_lock_timeout = 15
#
# These next two settings control when the REPLICATION subrequest handler will
# abort an incoming REPLICATION attempt. An abort will occur if there are at
# These next two settings control when the SSYNC subrequest handler will
# abort an incoming SSYNC attempt. An abort will occur if there are at
# least threshold number of failures and the value of failures / successes
# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
# failures have to occur and there have to be more failures than successes for
@ -305,6 +306,13 @@ use = egg:swift#recon
# points and report the result after a full scan.
# object_size_stats =
# The auditor will cleanup old rsync tempfiles after they are "old
# enough" to delete. You can configure the time elapsed in seconds
# before rsync tempfiles will be unlinked, or the default value of
# "auto" try to use object-replicator's rsync_timeout + 900 and fallback
# to 86400 (1 day).
# rsync_tempfile_timeout = auto
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after healthcheck.
[filter:xprofile]

View File

@ -131,8 +131,7 @@ use = egg:swift#proxy
#
# Set object_post_as_copy = false to turn on fast posts where only the metadata
# changes are stored anew and the original data file is kept in place. This
# makes for quicker posts; but since the container metadata isn't updated in
# this mode, features like container sync won't be able to sync posts.
# makes for quicker posts.
# object_post_as_copy = true
#
# If set to 'true' authorized accounts that do not yet exist within the Swift
@ -164,13 +163,28 @@ use = egg:swift#proxy
# using affinity allows for finer control. In both the timing and
# affinity cases, equally-sorting nodes are still randomly chosen to
# spread load.
# The valid values for sorting_method are "affinity", "shuffle", and "timing".
# The valid values for sorting_method are "affinity", "shuffle", or "timing".
# sorting_method = shuffle
#
# If the "timing" sorting_method is used, the timings will only be valid for
# the number of seconds configured by timing_expiry.
# timing_expiry = 300
#
# By default on a GET/HEAD swift will connect to a storage node one at a time
# in a single thread. There is smarts in the order they are hit however. If you
# turn on concurrent_gets below, then replica count threads will be used.
# With addition of the concurrency_timeout option this will allow swift to send
# out GET/HEAD requests to the storage nodes concurrently and answer with the
# first to respond. With an EC policy the parameter only affects HEAD requests.
# concurrent_gets = off
#
# This parameter controls how long to wait before firing off the next
# concurrent_get thread. A value of 0 would be fully concurrent, any other
# number will stagger the firing of the threads. This number should be
# between 0 and node_timeout. The default is what ever you set for the
# conn_timeout parameter.
# concurrency_timeout = 0.5
#
# Set to the number of nodes to contact for a normal request. You can use
# '* replicas' at the end to have it use the number given times the number of
# replicas for the ring being used for the request.
@ -342,12 +356,6 @@ user_test5_tester5 = testing5 service
# you can set this to false.
# allow_overrides = true
#
# If is_admin is true, a user whose username is the same as the project name
# and who has any role on the project will have access rights elevated to be
# the same as if the user had an operator role. Note that the condition
# compares names rather than UUIDs. This option is deprecated.
# is_admin = false
#
# If the service_roles parameter is present, an X-Service-Token must be
# present in the request that when validated, grants at least one role listed
# in the parameter. The X-Service-Token may be scoped to any project.
@ -674,6 +682,12 @@ use = egg:swift#account_quotas
[filter:gatekeeper]
use = egg:swift#gatekeeper
# Set this to false if you want to allow clients to set arbitrary X-Timestamps
# on uploaded objects. This may be used to preserve timestamps when migrating
# from a previous storage system, but risks allowing users to upload
# difficult-to-delete data.
# shunt_inbound_x_timestamp = true
#
# You can override the default log routing for this filter here:
# set log_name = gatekeeper
# set log_facility = LOG_LOCAL0

View File

@ -51,8 +51,7 @@ aliases = yellow, orange
#policy_type = replication
# The following declares a storage policy of type 'erasure_coding' which uses
# Erasure Coding for data reliability. The 'erasure_coding' storage policy in
# Swift is available as a "beta". Please refer to Swift documentation for
# Erasure Coding for data reliability. Please refer to Swift documentation for
# details on how the 'erasure_coding' storage policy is implemented.
#
# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode

View File

@ -171,7 +171,9 @@ class AccountReaper(Daemon):
container_shard = None
for container_shard, node in enumerate(nodes):
if is_local_device(self.myips, None, node['ip'], None) and \
(not self.bind_port or self.bind_port == node['port']):
(not self.bind_port or
self.bind_port == node['port']) and \
(device == node['device']):
break
else:
continue

View File

@ -21,6 +21,7 @@ from eventlet.green import urllib2, socket
from six.moves.urllib.parse import urlparse
from swift.common.utils import SWIFT_CONF_FILE
from swift.common.ring import Ring
from swift.common.storage_policy import POLICIES
from hashlib import md5
import eventlet
import json
@ -202,18 +203,19 @@ class SwiftRecon(object):
block = f.read(4096)
return md5sum.hexdigest()
def get_devices(self, region_filter, zone_filter, swift_dir, ring_name):
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
"""
Get a list of hosts in the ring
Get a list of hosts in the rings.
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_name: Name of the ring, such as 'object'
:param ring_names: Collection of ring names, such as
['object', 'object-2']
:returns: a set of tuples containing the ip and port of hosts
"""
ring_data = Ring(swift_dir, ring_name=ring_name)
devs = [d for d in ring_data.devs if d]
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
devs = [d for r in rings for d in r.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
@ -913,6 +915,26 @@ class SwiftRecon(object):
matches, len(hosts), errors))
print("=" * 79)
def _get_ring_names(self, policy=None):
'''
Retrieve name of ring files.
If no policy is passed and the server type is object,
the ring names of all storage-policies are retrieved.
:param policy: name or index of storage policy, only applicable
with server_type==object.
:returns: list of ring names.
'''
if self.server_type == 'object':
ring_names = [p.ring_name for p in POLICIES if (
p.name == policy or not policy or (
policy.isdigit() and int(policy) == int(p)))]
else:
ring_names = [self.server_type]
return ring_names
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
@ -982,6 +1004,9 @@ class SwiftRecon(object):
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
args.add_option('--policy', '-p',
help='Only query object servers in specified '
'storage policy (specified as name or index).')
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > 1:
@ -1003,8 +1028,14 @@ class SwiftRecon(object):
self.suppress_errors = options.suppress
self.timeout = options.timeout
hosts = self.get_devices(options.region, options.zone,
swift_dir, self.server_type)
ring_names = self._get_ring_names(options.policy)
if not ring_names:
print('Invalid Storage Policy')
args.print_help()
sys.exit(0)
hosts = self.get_hosts(options.region, options.zone,
swift_dir, ring_names)
print("--> Starting reconnaissance on %s hosts" % len(hosts))
print("=" * 79)

View File

@ -1183,7 +1183,7 @@ swift-ring-builder <builder_file> set_overload <overload>[%]
def main(arguments=None):
global argv, backup_dir, builder, builder_file, ring_file
if arguments:
if arguments is not None:
argv = arguments
else:
argv = sys_argv

View File

@ -45,7 +45,8 @@ DB_PREALLOCATION = False
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
#: Max size of .pending file in bytes. When this is exceeded, the pending
# records will be merged.
PENDING_CAP = 131072

View File

@ -33,7 +33,7 @@ from swift.common.exceptions import ClientException
from swift.common.utils import Timestamp, FileLikeIter
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
from swift.common.swob import HeaderKeyDict
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import quote

View File

@ -0,0 +1,63 @@
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class HeaderKeyDict(dict):
"""
A dict that title-cases all keys on the way in, so as to be
case-insensitive.
"""
def __init__(self, base_headers=None, **kwargs):
if base_headers:
self.update(base_headers)
self.update(kwargs)
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[key.title()] = other[key]
else:
for key, value in other:
self[key.title()] = value
def __getitem__(self, key):
return dict.get(self, key.title())
def __setitem__(self, key, value):
if value is None:
self.pop(key.title(), None)
elif isinstance(value, six.text_type):
return dict.__setitem__(self, key.title(), value.encode('utf-8'))
else:
return dict.__setitem__(self, key.title(), str(value))
def __contains__(self, key):
return dict.__contains__(self, key.title())
def __delitem__(self, key):
return dict.__delitem__(self, key.title())
def get(self, key, default=None):
return dict.get(self, key.title(), default)
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def pop(self, key, default=None):
return dict.pop(self, key.title(), default)

View File

@ -741,11 +741,25 @@ class SimpleClient(object):
def base_request(self, method, container=None, name=None, prefix=None,
headers=None, proxy=None, contents=None,
full_listing=None, logger=None, additional_info=None,
timeout=None):
timeout=None, marker=None):
# Common request method
trans_start = time()
url = self.url
if full_listing:
info, body_data = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
listing = body_data
while listing:
marker = listing[-1]['name']
info, listing = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
if listing:
body_data.extend(listing)
return [info, body_data]
if headers is None:
headers = {}
@ -762,6 +776,9 @@ class SimpleClient(object):
if prefix:
url += '&prefix=%s' % prefix
if marker:
url += '&marker=%s' % quote(marker)
req = urllib2.Request(url, headers=headers, data=contents)
if proxy:
proxy = urllib.parse.urlparse(proxy)
@ -769,6 +786,7 @@ class SimpleClient(object):
req.get_method = lambda: method
conn = urllib2.urlopen(req, timeout=timeout)
body = conn.read()
info = conn.info()
try:
body_data = json.loads(body)
except ValueError:
@ -792,13 +810,13 @@ class SimpleClient(object):
url,
conn.getcode(),
sent_content_length,
conn.info()['content-length'],
info['content-length'],
trans_start,
trans_stop,
trans_stop - trans_start,
additional_info
)))
return [None, body_data]
return [info, body_data]
def retry_request(self, method, **kwargs):
retries = kwargs.pop('retries', self.retries)
@ -837,6 +855,12 @@ class SimpleClient(object):
contents=contents.read(), **kwargs)
def head_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
return client.retry_request('HEAD', **kwargs)
def put_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)

View File

@ -561,8 +561,8 @@ class Server(object):
safe_kill(pid, sig, 'swift-%s' % self.server)
except InvalidPidFileException as e:
if kwargs.get('verbose'):
print(_('Removing pid file %s with wrong pid %d') % (
pid_file, pid))
print(_('Removing pid file %(pid_file)s with wrong pid '
'%(pid)d'), {'pid_file': pid_file, 'pid': pid})
remove_file(pid_file)
except OSError as e:
if e.errno == errno.ESRCH:

View File

@ -97,6 +97,11 @@ class ContainerSync(object):
req.environ.setdefault('swift.log_info', []).append(
'cs:no-local-user-key')
else:
# x-timestamp headers get shunted by gatekeeper
if 'x-backend-inbound-x-timestamp' in req.headers:
req.headers['x-timestamp'] = req.headers.pop(
'x-backend-inbound-x-timestamp')
expected = self.realms_conf.get_sig(
req.method, req.path,
req.headers.get('x-timestamp', '0'), nonce,

View File

@ -32,7 +32,7 @@ automatically inserted close to the start of the pipeline by the proxy server.
from swift.common.swob import Request
from swift.common.utils import get_logger
from swift.common.utils import get_logger, config_true_value
from swift.common.request_helpers import (
remove_items, get_sys_meta_prefix, get_object_transient_sysmeta_prefix
)
@ -72,6 +72,8 @@ class GatekeeperMiddleware(object):
self.logger = get_logger(conf, log_route='gatekeeper')
self.inbound_condition = make_exclusion_test(inbound_exclusions)
self.outbound_condition = make_exclusion_test(outbound_exclusions)
self.shunt_x_timestamp = config_true_value(
conf.get('shunt_inbound_x_timestamp', 'true'))
def __call__(self, env, start_response):
req = Request(env)
@ -79,6 +81,13 @@ class GatekeeperMiddleware(object):
if removed:
self.logger.debug('removed request headers: %s' % removed)
if 'X-Timestamp' in req.headers and self.shunt_x_timestamp:
ts = req.headers.pop('X-Timestamp')
req.headers['X-Backend-Inbound-X-Timestamp'] = ts
# log in a similar format as the removed headers
self.logger.debug('shunted request headers: %s' %
[('X-Timestamp', ts)])
def gatekeeper_response(status, response_headers, exc_info=None):
removed = filter(
lambda h: self.outbound_condition(h[0]),

View File

@ -75,12 +75,6 @@ class KeystoneAuth(object):
id.. For example, if the project id is ``1234``, the path is
``/v1/AUTH_1234``.
If the ``is_admin`` option is ``true``, a user whose username is the same
as the project name and who has any role on the project will have access
rights elevated to be the same as if the user had one of the
``operator_roles``. Note that the condition compares names rather than
UUIDs. This option is deprecated. It is ``false`` by default.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
``reseller_prefix`` in your keystoneauth entry like this::
@ -188,7 +182,11 @@ class KeystoneAuth(object):
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin').lower()
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = swift_utils.config_true_value(config_is_admin)
if swift_utils.config_true_value(config_is_admin):
self.logger.warning("The 'is_admin' option for keystoneauth is no "
"longer supported. Remove the 'is_admin' "
"option from your keystoneauth config")
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = swift_utils.config_true_value(config_overrides)
self.default_domain_id = conf.get('default_domain_id', 'default')
@ -484,14 +482,6 @@ class KeystoneAuth(object):
req.environ['swift_owner'] = True
return
# If user is of the same name of the tenant then make owner of it.
if self.is_admin and user_name == tenant_name:
self.logger.warning("the is_admin feature has been deprecated "
"and will be removed in the future "
"update your config file")
req.environ['swift_owner'] = True
return
if acl_authorized is not None:
return self.denied_response(req)

View File

@ -206,6 +206,9 @@ class ReconMiddleware(object):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
mounted = check_mount(self.devices, entry)
except OSError as err:
@ -219,6 +222,9 @@ class ReconMiddleware(object):
"""get disk utilization statistics"""
devices = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
mounted = check_mount(self.devices, entry)
except OSError as err:

View File

@ -149,9 +149,15 @@ A GET request with the query parameter::
?multipart-manifest=get
Will return the actual manifest file itself. This is generated json and does
not match the data sent from the original multipart-manifest=put. This call's
main purpose is for debugging.
will return a transformed version of the original manifest, containing
additional fields and different key names.
A GET request with the query parameters::
?multipart-manifest=get&format=raw
will return the contents of the original manifest as it was sent by the client.
The main purpose for both calls is solely debugging.
When the manifest object is uploaded you are more or less guaranteed that
every segment in the manifest exists and matched the specifications.
@ -573,6 +579,9 @@ class SloGetContext(WSGIContext):
# Handle pass-through request for the manifest itself
if req.params.get('multipart-manifest') == 'get':
if req.params.get('format') == 'raw':
resp_iter = self.convert_segment_listing(
self._response_headers, resp_iter)
new_headers = []
for header, value in self._response_headers:
if header.lower() == 'content-type':
@ -606,7 +615,40 @@ class SloGetContext(WSGIContext):
req, resp_headers, resp_iter)
return response(req.environ, start_response)
def get_or_head_response(self, req, resp_headers, resp_iter):
def convert_segment_listing(self, resp_headers, resp_iter):
"""
Converts the manifest data to match with the format
that was put in through ?multipart-manifest=put
:param resp_headers: response headers
:param resp_iter: a response iterable
"""
segments = self._get_manifest_read(resp_iter)
for seg_dict in segments:
seg_dict.pop('content_type', None)
seg_dict.pop('last_modified', None)
seg_dict.pop('sub_slo', None)
seg_dict['path'] = seg_dict.pop('name', None)
seg_dict['size_bytes'] = seg_dict.pop('bytes', None)
seg_dict['etag'] = seg_dict.pop('hash', None)
json_data = json.dumps(segments) # convert to string
if six.PY3:
json_data = json_data.encode('utf-8')
new_headers = []
for header, value in resp_headers:
if header.lower() == 'content-length':
new_headers.append(('Content-Length',
len(json_data)))
else:
new_headers.append((header, value))
self._response_headers = new_headers
return [json_data]
def _get_manifest_read(self, resp_iter):
with closing_if_possible(resp_iter):
resp_body = ''.join(resp_iter)
try:
@ -614,6 +656,11 @@ class SloGetContext(WSGIContext):
except ValueError:
segments = []
return segments
def get_or_head_response(self, req, resp_headers, resp_iter):
segments = self._get_manifest_read(resp_iter)
etag = md5()
content_length = 0
for seg_dict in segments:

View File

@ -169,8 +169,9 @@ from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlencode
from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.swob import HeaderKeyDict, header_to_environ_key, \
HTTPUnauthorized, HTTPBadRequest
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \
HTTPBadRequest
from swift.common.utils import split_path, get_valid_utf8_str, \
register_swift_info, get_hmac, streq_const_time, quote
@ -399,7 +400,7 @@ class TempURL(object):
def _start_response(status, headers, exc_info=None):
headers = self._clean_outgoing_headers(headers)
if env['REQUEST_METHOD'] == 'GET' and status[0] == '2':
if env['REQUEST_METHOD'] in ('GET', 'HEAD') and status[0] == '2':
# figure out the right value for content-disposition
# 1) use the value from the query string
# 2) use the value from the object metadata

View File

@ -443,10 +443,10 @@ class RingBuilder(object):
self._set_parts_wanted(replica_plan)
assign_parts = defaultdict(list)
# gather parts from failed devices
removed_devs = self._gather_parts_from_failed_devices(assign_parts)
# gather parts from replica count adjustment
self._adjust_replica2part2dev_size(assign_parts)
# gather parts from failed devices
removed_devs = self._gather_parts_from_failed_devices(assign_parts)
# gather parts for dispersion (N.B. this only picks up parts that
# *must* disperse according to the replica plan)
self._gather_parts_for_dispersion(assign_parts, replica_plan)
@ -1688,3 +1688,38 @@ class RingBuilder(object):
if matched:
matched_devs.append(dev)
return matched_devs
def increase_partition_power(self):
""" Increases ring partition power by one.
Devices will be assigned to partitions like this:
OLD: 0, 3, 7, 5, 2, 1, ...
NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ...
"""
new_replica2part2dev = []
for replica in self._replica2part2dev:
new_replica = array('H')
for device in replica:
new_replica.append(device)
new_replica.append(device) # append device a second time
new_replica2part2dev.append(new_replica)
self._replica2part2dev = new_replica2part2dev
for device in self._iter_devs():
device['parts'] *= 2
# We need to update the time when a partition has been moved the last
# time. Since this is an array of all partitions, we need to double it
# two
new_last_part_moves = []
for partition in self._last_part_moves:
new_last_part_moves.append(partition)
new_last_part_moves.append(partition)
self._last_part_moves = new_last_part_moves
self.part_power += 1
self.parts *= 2
self.version += 1

View File

@ -170,16 +170,13 @@ class BaseStoragePolicy(object):
if self.idx < 0:
raise PolicyError('Invalid index', idx)
self.alias_list = []
if not name or not self._validate_policy_name(name):
raise PolicyError('Invalid name %r' % name, idx)
self.alias_list.append(name)
self.add_name(name)
if aliases:
names_list = list_from_csv(aliases)
for alias in names_list:
if alias == name:
continue
self._validate_policy_name(alias)
self.alias_list.append(alias)
self.add_name(alias)
self.is_deprecated = config_true_value(is_deprecated)
self.is_default = config_true_value(is_default)
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
@ -288,14 +285,16 @@ class BaseStoragePolicy(object):
to check policy names before setting them.
:param name: a name string for a single policy name.
:returns: true if the name is valid.
:raises: PolicyError if the policy name is invalid.
"""
if not name:
raise PolicyError('Invalid name %r' % name, self.idx)
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
raise PolicyError('Names are used as HTTP headers, and can not '
'reliably contain any characters not in %r. '
'Invalid name %r' % (VALID_CHARS, name))
msg = 'Names are used as HTTP headers, and can not ' \
'reliably contain any characters not in %r. ' \
'Invalid name %r' % (VALID_CHARS, name)
raise PolicyError(msg, self.idx)
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
@ -305,8 +304,6 @@ class BaseStoragePolicy(object):
msg = 'The name %s is already assigned to this policy.' % name
raise PolicyError(msg, self.idx)
return True
def add_name(self, name):
"""
Adds an alias name to the storage policy. Shouldn't be called
@ -316,8 +313,8 @@ class BaseStoragePolicy(object):
:param name: a new alias for the storage policy
"""
if self._validate_policy_name(name):
self.alias_list.append(name)
self._validate_policy_name(name)
self.alias_list.append(name)
def remove_name(self, name):
"""

View File

@ -50,6 +50,7 @@ from six import BytesIO
from six import StringIO
from six.moves import urllib
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import reiterate, split_path, Timestamp, pairs, \
close_if_possible
from swift.common.exceptions import InvalidTimestamp
@ -271,53 +272,6 @@ class HeaderEnvironProxy(MutableMapping):
return keys
class HeaderKeyDict(dict):
"""
A dict that title-cases all keys on the way in, so as to be
case-insensitive.
"""
def __init__(self, base_headers=None, **kwargs):
if base_headers:
self.update(base_headers)
self.update(kwargs)
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[key.title()] = other[key]
else:
for key, value in other:
self[key.title()] = value
def __getitem__(self, key):
return dict.get(self, key.title())
def __setitem__(self, key, value):
if value is None:
self.pop(key.title(), None)
elif isinstance(value, six.text_type):
return dict.__setitem__(self, key.title(), value.encode('utf-8'))
else:
return dict.__setitem__(self, key.title(), str(value))
def __contains__(self, key):
return dict.__contains__(self, key.title())
def __delitem__(self, key):
return dict.__delitem__(self, key.title())
def get(self, key, default=None):
return dict.get(self, key.title(), default)
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def pop(self, key, default=None):
return dict.pop(self, key.title(), default)
def _resp_status_property():
"""
Set and retrieve the value of Response.status
@ -1345,7 +1299,7 @@ class Response(object):
object length and body or app_iter to reset the content_length
properties on the request.
It is ok to not call this method, the conditional resposne will be
It is ok to not call this method, the conditional response will be
maintained for you when you __call__ the response.
"""
self.response_iter = self._response_iter(self.app_iter, self._body)

View File

@ -68,6 +68,7 @@ from swift import gettext_ as _
import swift.common.exceptions
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.common.header_key_dict import HeaderKeyDict
if six.PY3:
stdlib_queue = eventlet.patcher.original('queue')
@ -2121,10 +2122,21 @@ def unlink_older_than(path, mtime):
Remove any file in a given path that that was last modified before mtime.
:param path: path to remove file from
:mtime: timestamp of oldest file to keep
:param mtime: timestamp of oldest file to keep
"""
for fname in listdir(path):
fpath = os.path.join(path, fname)
filepaths = map(functools.partial(os.path.join, path), listdir(path))
return unlink_paths_older_than(filepaths, mtime)
def unlink_paths_older_than(filepaths, mtime):
"""
Remove any files from the given list that that were
last modified before mtime.
:param filepaths: a list of strings, the full paths of files to check
:param mtime: timestamp of oldest file to keep
"""
for fpath in filepaths:
try:
if os.path.getmtime(fpath) < mtime:
os.unlink(fpath)
@ -2470,6 +2482,10 @@ class GreenAsyncPile(object):
finally:
self._inflight -= 1
@property
def inflight(self):
return self._inflight
def spawn(self, func, *args, **kwargs):
"""
Spawn a job in a green thread on the pile.
@ -2478,6 +2494,16 @@ class GreenAsyncPile(object):
self._inflight += 1
self._pool.spawn(self._run_func, func, args, kwargs)
def waitfirst(self, timeout):
"""
Wait up to timeout seconds for first result to come in.
:param timeout: seconds to wait for results
:returns: first item to come back, or None
"""
for result in self._wait(timeout, first_n=1):
return result
def waitall(self, timeout):
"""
Wait timeout seconds for any results to come in.
@ -2485,11 +2511,16 @@ class GreenAsyncPile(object):
:param timeout: seconds to wait for results
:returns: list of results accrued in that time
"""
return self._wait(timeout)
def _wait(self, timeout, first_n=None):
results = []
try:
with GreenAsyncPileWaitallTimeout(timeout):
while True:
results.append(next(self))
if first_n and len(results) >= first_n:
break
except (GreenAsyncPileWaitallTimeout, StopIteration):
pass
return results
@ -3648,7 +3679,6 @@ def parse_mime_headers(doc_file):
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers
"""
from swift.common.swob import HeaderKeyDict # avoid circular import
headers = []
while True:
line = doc_file.readline()

View File

@ -146,7 +146,7 @@ def update_new_item_from_existing(new_item, existing):
their timestamps are newer.
The multiple timestamps are encoded into a single string for storing
in the 'created_at' column of the the objects db table.
in the 'created_at' column of the objects db table.
:param new_item: A dict of object update attributes
:param existing: A dict of existing object attributes
@ -410,6 +410,7 @@ class ContainerBroker(DatabaseBroker):
:param name: object name to be deleted
:param timestamp: timestamp when the object was marked as deleted
:param storage_policy_index: the storage policy index for the object
"""
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
deleted=1, storage_policy_index=storage_policy_index)
@ -670,6 +671,7 @@ class ContainerBroker(DatabaseBroker):
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimiter based on
the path
:param storage_policy_index: storage policy index for query
:param reverse: reverse the result order.
:returns: list of tuples of (name, created_at, size, content_type,

View File

@ -41,10 +41,11 @@ from swift.common.exceptions import ConnectionTimeout
from swift.common.http import HTTP_NOT_FOUND, is_success
from swift.common.storage_policy import POLICIES
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPException, HeaderKeyDict
HTTPInsufficientStorage, HTTPException
def gen_resp_headers(info, is_deleted=False):
@ -251,7 +252,7 @@ class ContainerController(BaseStorageServer):
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s during %s' %
broker.db_file, method)
(broker.db_file, method))
@public
@timing_stats()

View File

@ -18,7 +18,7 @@ import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random, shuffle
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
@ -29,7 +29,8 @@ from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, InternalClient, UnexpectedResponse)
delete_object, put_object, head_object,
InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
@ -39,7 +40,6 @@ from swift.common.utils import (
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
from swift.common.storage_policy import POLICIES
from swift.common.wsgi import ConfigString
@ -100,13 +100,6 @@ class ContainerSync(Daemon):
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
.. note::
Container sync will sync object POSTs only if the proxy server is set
to use "object_post_as_copy = true" which is the default. So-called
fast object posts, "object_post_as_copy = false" do not update the
container listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
@ -238,15 +231,6 @@ class ContainerSync(Daemon):
_('Unable to load internal client from config: %r (%s)') %
(internal_client_conf_path, err))
def get_object_ring(self, policy_idx):
"""
Get the ring object to use based on its policy.
:policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
@ -364,8 +348,6 @@ class ContainerSync(Daemon):
row = rows[0]
if row['ROWID'] > sync_point1:
break
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.) and will skip
# problematic rows as needed in case of faults.
@ -408,10 +390,84 @@ class ContainerSync(Daemon):
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
realm, realm_key, method, headers):
"""
Updates container sync headers
:param name: The name of the object
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param method: HTTP method to create sig with
:param headers: headers to update with container sync headers
"""
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(name)
sig = self.realms_conf.get_sig(method, path,
headers.get('x-timestamp', 0),
nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (realm,
nonce,
sig)
else:
headers['x-container-sync-key'] = user_key
def _object_in_remote_container(self, name, sync_to, user_key,
realm, realm_key, timestamp):
"""
Performs head object on remote to eliminate extra remote put and
local get object calls
:param name: The name of the object in the updated row in the local
database triggering the sync update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param timestamp: last modified date of local object
:returns: True if object already exists in remote
"""
headers = {'x-timestamp': timestamp.internal}
self._update_sync_to_headers(name, sync_to, user_key, realm,
realm_key, 'HEAD', headers)
try:
metadata, _ = head_object(sync_to, name=name,
headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
retries=0)
remote_ts = Timestamp(metadata.get('x-timestamp', 0))
self.logger.debug("remote obj timestamp %s local obj %s" %
(timestamp.internal, remote_ts.internal))
if timestamp <= remote_ts:
return True
# Object in remote should be updated
return False
except ClientException as http_err:
# Object not in remote
if http_err.http_status == 404:
return False
raise http_err
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
Update can be either delete or put.
:param row: The updated row in the local database triggering the sync
update.
@ -439,17 +495,9 @@ class ContainerSync(Daemon):
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(
row['name'])
sig = self.realms_conf.get_sig(
'DELETE', path, headers['x-timestamp'], nonce,
realm_key, user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
self._update_sync_to_headers(row['name'], sync_to,
user_key, realm, realm_key,
'DELETE', headers)
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
@ -463,11 +511,10 @@ class ContainerSync(Daemon):
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
part, nodes = \
self.get_object_ring(info['storage_policy_index']). \
get_nodes(info['account'], info['container'],
row['name'])
shuffle(nodes)
if self._object_in_remote_container(row['name'],
sync_to, user_key, realm,
realm_key, ts_meta):
return True
exc = None
# look up for the newest one
headers_out = {'X-Newest': True,
@ -502,16 +549,8 @@ class ContainerSync(Daemon):
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(row['name'])
sig = self.realms_conf.get_sig(
'PUT', path, headers['x-timestamp'], nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
self._update_sync_to_headers(row['name'], sync_to, user_key,
realm, realm_key, 'PUT', headers)
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,

View File

@ -6,18 +6,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2014
# Ettore Atalan <atalanttore@googlemail.com>, 2014-2015
# Jonas John <jonas.john@e-werkzeug.eu>, 2015
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
# Frank Kloeker <eumel@arcor.de>, 2016. #zanata
# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev268\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-24 22:25+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-24 03:15+0000\n"
"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
@ -54,6 +54,16 @@ msgstr "%(ip)s/%(device)s zurückgemeldet als ausgehängt"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) Partitionen von %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) Geräten rekonstruiert in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s verbleibend)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -218,6 +228,14 @@ msgstr "Kann nicht auf die Datei %s zugreifen."
msgid "Can not load profile data from %s."
msgstr "Die Profildaten von %s können nicht geladen werden."
#, python-format
msgid "Cannot read %s (%s)"
msgstr "%s (%s) kann nicht gelesen werden."
#, python-format
msgid "Cannot write %s (%s)"
msgstr "Schreiben von %s (%s) nicht möglich."
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen"
@ -228,6 +246,11 @@ msgstr "Client beim Lesen getrennt"
msgid "Client disconnected without sending enough data"
msgstr "Client getrennt ohne dem Senden von genügend Daten"
msgid "Client disconnected without sending last chunk"
msgstr ""
"Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet "
"wurde. "
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -235,6 +258,14 @@ msgstr ""
"Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten "
"gespeicherten Pfad %(meta)s"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"Konfigurationsoption internal_client_conf_path nicht definiert. "
"Standardkonfiguration wird verwendet. Informationen zu den Optionen finden "
"Sie in internal-client.conf-sample."
msgid "Connection refused"
msgstr "Verbindung abgelehnt"
@ -294,6 +325,11 @@ msgstr "Fehler beim Downloaden von Daten: %s"
msgid "Devices pass completed: %.02fs"
msgstr "Gerätedurchgang abgeschlossen: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr ""
"Das Verzeichnis %r kann keiner gültigen Richtlinie (%s) zugeordnet werden."
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s"
@ -373,6 +409,10 @@ msgid "ERROR Exception causing client disconnect"
msgstr ""
"FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s"
msgid "ERROR Failed to get my own IPs?"
msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?"
@ -465,7 +505,7 @@ msgstr ""
"FEHLER beim Synchronisieren von %(file)s Dateien mit dem Knoten %(node)s"
msgid "ERROR trying to replicate"
msgstr "FEHLER beim Versuch, zu replizieren"
msgstr "FEHLER beim Versuch zu replizieren"
#, python-format
msgid "ERROR while trying to clean up %s"
@ -526,10 +566,10 @@ msgid "Error on render profiling results: %s"
msgstr "Fehler beim Wiedergeben der Profilerstellungsergebnisse: %s"
msgid "Error parsing recon cache file"
msgstr "Fehler beim Analysieren von recon-Cachedatei"
msgstr "Fehler beim Analysieren von recon-Zwischenspeicherdatei"
msgid "Error reading recon cache file"
msgstr "Fehler beim Lesen von recon-Cachedatei"
msgstr "Fehler beim Lesen von recon-Zwischenspeicherdatei"
msgid "Error reading ringfile"
msgstr "Fehler beim Lesen der Ringdatei"
@ -550,6 +590,12 @@ msgstr "Fehler beim Syncen der Partition"
msgid "Error syncing with node: %s"
msgstr "Fehler beim Synchronisieren mit Knoten: %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#"
"%(policy)d frag#%(frag_index)s"
msgid "Error: An error occurred"
msgstr "Fehler: Ein Fehler ist aufgetreten"
@ -569,6 +615,9 @@ msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene"
msgid "Exception in top-level replication loop"
msgstr "Ausnahme in Replizierungsloop der höchsten Ebene"
msgid "Exception in top-levelreconstruction loop"
msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "Ausnahme beim Löschen von Container %s %s"
@ -606,6 +655,13 @@ msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt"
msgid "Found configs:"
msgstr "Gefundene Konfigurationen:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle "
"Replikationsdurchgang wird abgebrochen."
msgid "Host unreachable"
msgstr "Host nicht erreichbar"
@ -645,6 +701,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "Lange laufendes rsync wird gekillt: %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "Laden von JSON aus %s fehlgeschlagen: (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "Suche erkannt. Live-Coros werden gelöscht."
@ -664,14 +724,26 @@ msgstr "Kein Cluster-Endpunkt für %r %r"
msgid "No permission to signal PID %d"
msgstr "Keine Berechtigung zu Signal-Programmkennung %d"
#, python-format
msgid "No policy with index %s"
msgstr "Keine Richtlinie mit Index %s"
#, python-format
msgid "No realm key for %r"
msgstr "Kein Bereichsschlüssel für %r"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "Kein freier Speicherplatz im Gerät für %s (%s) vorhanden."
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)."
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
@ -719,13 +791,13 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, "
"%(quars)d unter Quarantäne gestellt, %(errors)d Fehlerdateien/s: "
"%(frate).2f , Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, "
"%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: "
"%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, "
"Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
#, python-format
@ -800,6 +872,14 @@ msgstr "Pfad in X-Container-Sync-To ist erforderlich"
msgid "Problem cleaning up %s"
msgstr "Problem bei der Bereinigung von %s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "Problem bei der Bereinigung von %s (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "Problem beim Schreiben der langlebigen Statusdatei %s (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "Fehler bei der Profilerstellung: %s"
@ -843,6 +923,14 @@ msgstr "%s Objekte werden entfernt"
msgid "Removing partition: %s"
msgstr "Partition wird entfernt: %s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "PID-Datei %s mit ungültiger PID wird entfernt."
#, python-format
msgid "Removing stale pid file %s"
msgstr "Veraltete PID-Datei %s wird entfernt"
@ -951,10 +1039,22 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "Zeitlimit %(action)s für memcached: %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Versuch, %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "Versuch, %(full_path)s mit GET abzurufen"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "Es wird versucht, %s-Status von PUT für %s abzurufen."
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Versuch, den finalen Status von PUT für %s abzurufen"
@ -968,6 +1068,10 @@ msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)"
msgid "Trying to send to client"
msgstr "Versuch, an den Client zu senden"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren."
#, python-format
msgid "Trying to write to %s"
msgstr "Versuch, an %s zu schreiben"
@ -979,11 +1083,24 @@ msgstr "NICHT ABGEFANGENE AUSNAHME"
msgid "Unable to find %s config section in %s"
msgstr "%s-Konfigurationsabschnitt in %s kann nicht gefunden werden"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr ""
"Interner Client konnte nicht aus der Konfiguration geladen werden: %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
"%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen."
#, python-format
msgid "Unable to locate config for %s"
msgstr "Konfiguration für %s wurde nicht gefunden."
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "Konfigurationsnummer %s für %s wurde nicht gefunden."
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -1009,6 +1126,11 @@ msgstr "Unerwartete Antwort: %s"
msgid "Unhandled exception"
msgstr "Nicht behandelte Exception"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"Unbekannte Ausnahme bei GET-Versuch: %(account)r %(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s"
@ -1043,6 +1165,10 @@ msgstr ""
msgid "Waited %s seconds for %s to die; giving up"
msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet; Gibt auf"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet. Wird abgebrochen."
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr ""
"Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client "

View File

@ -8,9 +8,9 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev176\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -704,18 +704,6 @@ msgstr ""
"segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: "
"%(audit_rate).2f"
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: "
"%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores archivos "
"por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: "
"%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "Estadísticas de auditoría de objetos: %s"

View File

@ -6,16 +6,18 @@
# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
# Angelique Pillal <pillal@fr.ibm.com>, 2016. #zanata
# Gael Rehault <gael_rehault@dell.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.7.1.dev4\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-25 11:23+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-25 03:29+0000\n"
"Last-Translator: Angelique Pillal <pillal@fr.ibm.com>\n"
"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
@ -52,6 +54,16 @@ msgstr "%(ip)s/%(device)s démonté (d'après la réponse)"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions sur %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) périphériques reconstruites en %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -88,6 +100,10 @@ msgstr "%s n'existe pas"
msgid "%s is not mounted"
msgstr "%s n'est pas monté"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s ont été identifié(es) comme étant démonté(es)"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s en cours d'exécution (%s - %s)"
@ -214,6 +230,14 @@ msgstr "Ne peut pas accéder au fichier %s."
msgid "Can not load profile data from %s."
msgstr "Impossible de charger des données de profil depuis %s."
#, python-format
msgid "Cannot read %s (%s)"
msgstr "Impossible de lire %s (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "Impossible d'écrire %s (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "Le client n'a pas lu les données du proxy en %s s"
@ -224,6 +248,9 @@ msgstr "Client déconnecté lors de la lecture"
msgid "Client disconnected without sending enough data"
msgstr "Client déconnecté avant l'envoi de toutes les données requises"
msgid "Client disconnected without sending last chunk"
msgstr "Le client a été déconnecté avant l'envoi du dernier bloc"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -231,14 +258,22 @@ msgstr ""
"Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké "
"dans les métadonnées d'objet %(meta)s"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"L'option de configuration internal_client_conf_path n'a pas été définie. La "
"configuration par défaut est utilisée. Consultez les options dans internal-"
"client.conf-sample."
msgid "Connection refused"
msgstr "Connexion refusé"
msgstr "Connexion refusée"
msgid "Connection timeout"
msgstr "Connexion timeout"
msgstr "Dépassement du délai d'attente de connexion"
msgid "Container"
msgstr "Containeur"
msgstr "Conteneur"
#, python-format
msgid "Container audit \"once\" mode completed: %.02fs"
@ -280,7 +315,7 @@ msgstr "Liaison impossible à %s:%s après une tentative de %s secondes"
#, python-format
msgid "Could not load %r: %s"
msgstr "Ne peut pas etre charger %r: %s"
msgstr "Impossible de charger %r: %s"
#, python-format
msgid "Data download error: %s"
@ -290,6 +325,10 @@ msgstr "Erreur de téléchargement des données: %s"
msgid "Devices pass completed: %.02fs"
msgstr "Session d'audit d'unité terminée : %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "Le répertoire %r n'est pas mappé à une stratégie valide (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s"
@ -372,6 +411,11 @@ msgstr ""
msgid "ERROR Exception causing client disconnect"
msgstr "ERREUR Exception entraînant la déconnexion du client"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr ""
"ERREUR Exception lors du transfert de données vers des serveurs d'objets %s"
msgid "ERROR Failed to get my own IPs?"
msgstr "ERREUR Obtention impossible de mes propres adresses IP ?"
@ -550,6 +594,12 @@ msgstr "Erreur de synchronisation de la partition"
msgid "Error syncing with node: %s"
msgstr "Erreur de synchronisation avec le noeud : %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"Une erreur est survenue lors de la tentative de régénération de %(path)s "
"policy#%(policy)d frag#%(frag_index)s"
msgid "Error: An error occurred"
msgstr "Erreur : une erreur s'est produite"
@ -569,6 +619,9 @@ msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur"
msgid "Exception in top-level replication loop"
msgstr "Exception dans la boucle de réplication de niveau supérieur"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exception dans la boucle de reconstruction de niveau supérieur"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "Exception lors de la suppression du conteneur %s %s"
@ -606,7 +659,14 @@ msgstr ""
"Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s"
msgid "Found configs:"
msgstr "Configurations trouvés:"
msgstr "Configurations trouvées :"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"Le premier mode de transferts contient d'autres transferts. Abandon de la "
"session de réplication en cours."
msgid "Host unreachable"
msgstr "Hôte inaccessible"
@ -627,6 +687,10 @@ msgstr "Hôte %r non valide dans X-Container-Sync-To"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Entrée en attente non valide %(file)s : %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "Réponse %(resp)s non valide de %(full_path)s"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "Réponse %(resp)s non valide de %(ip)s"
@ -643,6 +707,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "Arrêt de l'opération Rsync à exécution longue : %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "Echec du chargement du fichier JSON depuis %s (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "Blocage détecté. Arrêt des coroutines actives."
@ -662,16 +730,29 @@ msgstr "Aucun noeud final de cluster pour %r %r"
msgid "No permission to signal PID %d"
msgstr "Aucun droit pour signaler le PID %d"
#, python-format
msgid "No policy with index %s"
msgstr "Aucune statégie avec un index de type %s"
#, python-format
msgid "No realm key for %r"
msgstr "Aucune clé de domaine pour %r"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "Plus d'espace disponible sur le périphérique pour %s (%s)"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr ""
"Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s "
"(%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr ""
"Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
@ -680,6 +761,10 @@ msgstr ""
"Introuvable : %(sync_from)r => %(sync_to)r - objet "
"%(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "Aucun élément reconstruit pendant %s secondes."
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "Aucun élément répliqué pendant %s secondes."
@ -717,19 +802,29 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d "
"succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : "
"%(frate).2f. octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée "
"%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée "
"d'audit : %(audit).2f. Taux : %(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "Statistiques de l'audit d'objet : %s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr ""
"La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f "
"minutes)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "Reconstruction d'objet terminée. (%.02f minutes)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr ""
@ -791,6 +886,15 @@ msgstr "Chemin requis dans X-Container-Sync-To"
msgid "Problem cleaning up %s"
msgstr "Problème lors du nettoyage de %s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "Problème lors du nettoyage de %s (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr ""
"Un problème est survenu lors de l'écriture du fichier d'état durable %s (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "Erreur de profilage : %s"
@ -834,6 +938,15 @@ msgstr "Suppression de %s objets"
msgid "Removing partition: %s"
msgstr "Suppression partition: %s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr ""
"Supression du fichier PID %(pid_file)s, comportant un PID incorrect %(pid)d"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Suppression du fichier pid %s comportant un pid non valide"
#, python-format
msgid "Removing stale pid file %s"
msgstr "Suppression du fichier PID %s périmé"
@ -853,6 +966,11 @@ msgstr ""
"Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit "
"(Max Sleep) %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr ""
"Changement d'anneau détecté. Abandon de la session de reconstruction en "
"cours."
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
"Changement d'anneau détecté. Abandon de la session de réplication en cours."
@ -861,6 +979,9 @@ msgstr ""
msgid "Running %s once"
msgstr "Exécution unique de %s"
msgid "Running object reconstructor in script mode."
msgstr "Exécution du reconstructeur d'objet en mode script."
msgid "Running object replicator in script mode."
msgstr "Exécution du réplicateur d'objet en mode script."
@ -902,6 +1023,12 @@ msgstr "%s est ignoré car il n'est pas monté"
msgid "Starting %s"
msgstr "Démarrage %s"
msgid "Starting object reconstruction pass."
msgstr "Démarrage de la session de reconstruction d'objet."
msgid "Starting object reconstructor in daemon mode."
msgstr "Démarrage du reconstructeur d'objet en mode démon."
msgid "Starting object replication pass."
msgstr "Démarrage de la session de réplication d'objet."
@ -927,10 +1054,24 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "Délai d'attente de %(action)s dans memcached : %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr ""
"Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/"
"%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentative d'exécution de %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "Tentative de lecture de %(full_path)s"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "Tentative d'obtention du statut de l'opération PUT %s sur %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Tentative d'obtention du statut final de l'opération PUT sur %s"
@ -944,6 +1085,10 @@ msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)"
msgid "Trying to send to client"
msgstr "Tentative d'envoi au client"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "Tentative de synchronisation de suffixes à l'aide de %s"
#, python-format
msgid "Trying to write to %s"
msgstr "Tentative d'écriture sur %s"
@ -953,13 +1098,26 @@ msgstr "EXCEPTION NON INTERCEPTEE"
#, python-format
msgid "Unable to find %s config section in %s"
msgstr "Impossuble de trouvé la section configuration %s dans %s"
msgstr "Impossible de trouver la section de configuration %s dans %s"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr ""
"Impossible de charger le client interne depuis la configuration : %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
"Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)."
#, python-format
msgid "Unable to locate config for %s"
msgstr "Impossible de trouver la configuration pour %s"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "Impossible de trouver la configuration portant le numéro %s pour %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -985,6 +1143,12 @@ msgstr "Réponse inattendue : %s"
msgid "Unhandled exception"
msgstr "Exception non prise en charge"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"Une exception inconnue s'est produite pendant une opération GET: %(account)r "
"%(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s"
@ -1019,6 +1183,10 @@ msgstr ""
msgid "Waited %s seconds for %s to die; giving up"
msgstr "Attente de %s secondes pour la fin de %s ; abandon"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "Attente de %s secondes pour la fin de %s . En cours d'arrêt"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached"

View File

@ -5,16 +5,18 @@
# Translators:
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
# Alessandra <alessandra@translated.net>, 2016. #zanata
# Remo Mattei <Remo@rm.ht>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev254\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-22 19:48+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-22 05:31+0000\n"
"Last-Translator: Remo Mattei <Remo@rm.ht>\n"
"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
@ -51,6 +53,16 @@ msgstr "%(ip)s/%(device)s ha risposto come smontato"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partizioni di %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) dispositivi ricostruiti in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s rimanenti)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -87,6 +99,10 @@ msgstr "%s non esiste"
msgid "%s is not mounted"
msgstr "%s non è montato"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s ha risposto come smontato"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s in esecuzione (%s - %s)"
@ -196,7 +212,7 @@ msgid "Beginning pass on account %s"
msgstr "Avvio della trasmissione sull'account %s"
msgid "Beginning replication run"
msgstr "Avvio dell'esecuzione della replica"
msgstr "Avvio replica"
msgid "Broker error trying to rollback locked connection"
msgstr ""
@ -211,6 +227,14 @@ msgstr "Impossibile accedere al file %s."
msgid "Can not load profile data from %s."
msgstr "Impossibile caricare i dati del profilo da %s."
#, python-format
msgid "Cannot read %s (%s)"
msgstr "Non e' possibile leggere %s (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "Non e' possibile scriver %s (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "Il client non ha eseguito la lettura dal proxy in %ss"
@ -221,6 +245,9 @@ msgstr "Client scollegato alla lettura"
msgid "Client disconnected without sending enough data"
msgstr "Client disconnesso senza inviare dati sufficienti"
msgid "Client disconnected without sending last chunk"
msgstr "Client disconnesso senza inviare l'ultima porzione"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -228,6 +255,14 @@ msgstr ""
"Il percorso del client %(client)s non corrisponde al percorso memorizzato "
"nei metadati dell'oggetto %(meta)s"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"Opzione di configurazione internal_client_conf_path non definita. Viene "
"utilizzata la configurazione predefinita, vedere l'esempio internal-client."
"conf-sample per le opzioni"
msgid "Connection refused"
msgstr "Connessione rifiutata"
@ -289,6 +324,10 @@ msgstr "Errore di download dei dati: %s"
msgid "Devices pass completed: %.02fs"
msgstr "Trasmissione dei dispositivi completata: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "La directory %r non è associata ad una politica valida (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s"
@ -367,6 +406,11 @@ msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s"
msgid "ERROR Exception causing client disconnect"
msgstr "ERRORE Eccezione che causa la disconnessione del client"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr ""
"ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s"
msgid "ERROR Failed to get my own IPs?"
msgstr "ERRORE Impossibile ottenere i propri IP?"
@ -404,7 +448,7 @@ msgstr "ERRORE Eccezione non gestita nella richiesta"
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr "ERRORE Errore __call__ con %(method)s %(path)s "
msgstr "ERRORE errore __call__ con %(method)s %(path)s "
#, python-format
msgid ""
@ -545,8 +589,14 @@ msgstr "Errore durante la sincronizzazione della partizione"
msgid "Error syncing with node: %s"
msgstr "Errore durante la sincronizzazione con il nodo: %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#"
"%(frag_index)s"
msgid "Error: An error occurred"
msgstr "Errore: si è verificato un errore"
msgstr "Errore: Si è verificato un errore"
msgid "Error: missing config path argument"
msgstr "Errore: Argomento path della configurazione mancante"
@ -556,7 +606,7 @@ msgid "Error: unable to locate %s"
msgstr "Errore: impossibile individuare %s"
msgid "Exception dumping recon cache"
msgstr "Eccezione durante il dump della cache di riconoscimento"
msgstr "Eccezione durante il dump della cache di recon"
msgid "Exception in top-level account reaper loop"
msgstr "Eccezione nel loop reaper dell'account di livello superiore"
@ -564,6 +614,9 @@ msgstr "Eccezione nel loop reaper dell'account di livello superiore"
msgid "Exception in top-level replication loop"
msgstr "Eccezione nel loop di replica di livello superiore"
msgid "Exception in top-levelreconstruction loop"
msgstr "Eccezione nel loop di ricostruzione di livello superiore"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "Eccezione durante l'eliminazione del contenitore %s %s"
@ -603,6 +656,13 @@ msgstr ""
msgid "Found configs:"
msgstr "Configurazioni trovate:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"Nella prima modalità di passaggio ci sono ancora passaggi restanti. "
"Interruzione del passaggio di replica corrente."
msgid "Host unreachable"
msgstr "Host non raggiungibile"
@ -622,6 +682,10 @@ msgstr "Host non valido %r in X-Container-Sync-To"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Voce in sospeso non valida %(file)s: %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "Risposta non valida %(resp)s da %(full_path)s"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "Risposta non valida %(resp)s da %(ip)s"
@ -638,6 +702,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "Chiusura rsync ad elaborazione prolungata: %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "Caricamento JSON dal %s fallito (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "Blocco rilevato... chiusura dei coros attivi."
@ -657,20 +725,36 @@ msgstr "Nessun endpoint del cluster per %r %r"
msgid "No permission to signal PID %d"
msgstr "Nessuna autorizzazione per la segnalazione del PID %d"
#, python-format
msgid "No policy with index %s"
msgstr "Nessuna politica con indice %s"
#, python-format
msgid "No realm key for %r"
msgstr "Nessuna chiave dell'area di autenticazione per %r"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "Nessuno spazio rimasto sul dispositivo per %s (%s)"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "Nessun elemento ricostruito per %s secondi."
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "Nessun elemento replicato per %s secondi."
@ -706,7 +790,7 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
@ -719,6 +803,14 @@ msgstr ""
msgid "Object audit stats: %s"
msgstr "Statistiche verifica oggetto: %s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)"
@ -778,6 +870,14 @@ msgstr "Percorso richiesto in X-Container-Sync-To"
msgid "Problem cleaning up %s"
msgstr "Problema durante la ripulitura di %s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "Problema durante la ripulitura di %s (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "Problema durante la scrittura del file obsoleto duraturo %s (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "Errore di creazione dei profili: %s"
@ -821,6 +921,14 @@ msgstr "Rimozione di oggetti %s"
msgid "Removing partition: %s"
msgstr "Rimozione della partizione: %s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Rimozione del file pid %s con pid non valido"
#, python-format
msgid "Removing stale pid file %s"
msgstr "Rimozione del file pid %s obsoleto in corso"
@ -830,7 +938,7 @@ msgstr "Esecuzione della replica TERMINATA"
#, python-format
msgid "Returning 497 because of blacklisting: %s"
msgstr "Viene restituito 497 a causa della blacklist: %s"
msgstr "Viene restituito il codice 497 a causa della blacklist: %s"
#, python-format
msgid ""
@ -840,6 +948,11 @@ msgstr ""
"Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit "
"(numero massimo sospensioni) %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr ""
"Modifica ring rilevata. Interruzione della trasmissione della ricostruzione "
"corrente."
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
"Modifica ring rilevata. Interruzione della trasmissione della replica "
@ -849,6 +962,10 @@ msgstr ""
msgid "Running %s once"
msgstr "Esecuzione di %s una volta"
msgid "Running object reconstructor in script mode."
msgstr ""
"Esecuzione del programma di ricostruzione dell'oggetto in modalità script."
msgid "Running object replicator in script mode."
msgstr "Esecuzione del programma di replica dell'oggetto in modalità script."
@ -892,6 +1009,12 @@ msgstr "%s viene ignorato perché non è montato"
msgid "Starting %s"
msgstr "Avvio di %s"
msgid "Starting object reconstruction pass."
msgstr "Avvio della trasmissione della ricostruzione dell'oggetto."
msgid "Starting object reconstructor in daemon mode."
msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon."
msgid "Starting object replication pass."
msgstr "Avvio della trasmissione della replica dell'oggetto."
@ -903,7 +1026,7 @@ msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)"
msgid "The file type are forbidden to access!"
msgstr "Non è consentito l'accesso al tipo di file."
msgstr "Non è consentito l'accesso a questo tipo di file!"
#, python-format
msgid ""
@ -917,10 +1040,22 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "Timeout di %(action)s su memcached: %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentativo di %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "Tentativo di eseguire GET %(full_path)s"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "Tentativo di acquisire lo stato %s di PUT su %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Tentativo di acquisire lo stato finale di PUT su %s"
@ -934,6 +1069,10 @@ msgstr "Tentativo di lettura durante GET (nuovo tentativo)"
msgid "Trying to send to client"
msgstr "Tentativo di invio al client"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "Tentativo di sincronizzazione dei suffissi con %s"
#, python-format
msgid "Trying to write to %s"
msgstr "Tentativo di scrittura in %s"
@ -945,10 +1084,22 @@ msgstr "ECCEZIONE NON RILEVATA"
msgid "Unable to find %s config section in %s"
msgstr "Impossibile trovare la sezione di configurazione %s in %s"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr "Impossibile caricare il client interno dalla configurazione: %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Impossibile individuare %s in libc. Lasciato come no-op."
#, python-format
msgid "Unable to locate config for %s"
msgstr "Impossibile individuare la configurazione per %s"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "Impossibile individuare il numero di configurazione %s per %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -974,6 +1125,12 @@ msgstr "Risposta imprevista: %s"
msgid "Unhandled exception"
msgstr "Eccezione non gestita"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"Eccezione imprevista nel tentativo di eseguire GET: %(account)r "
"%(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s"
@ -1010,6 +1167,12 @@ msgstr ""
"Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione "
"terminata"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr ""
"Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione "
"terminata"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached"

View File

@ -7,16 +7,17 @@
# Akihiro Motoki <amotoki@gmail.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
# 笹原 昌美 <ebb0de1@jp.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.7.1.dev4\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-25 11:23+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-26 09:26+0000\n"
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
"PO-Revision-Date: 2016-03-25 07:46+0000\n"
"Last-Translator: 笹原 昌美 <ebb0de1@jp.ibm.com>\n"
"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@ -53,6 +54,16 @@ msgstr "%(ip)s/%(device)s はアンマウントとして応答しました"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(device)d/%(dtotal)d (%(dpercentage).2f%%) デバイスの %(reconstructed)d/"
"%(total)d (%(percentage).2f%%) パーティションが %(time).2fs で再構成されまし"
"た (%(rate).2f/秒、残り %(remaining)s)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -89,6 +100,10 @@ msgstr "%s が存在しません"
msgid "%s is not mounted"
msgstr "%s がマウントされていません"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s はアンマウントとして応答しました"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s が実行中 (%s - %s)"
@ -209,6 +224,14 @@ msgstr "ファイル %s にアクセスできません。"
msgid "Can not load profile data from %s."
msgstr "プロファイルデータを %s からロードできません。"
#, python-format
msgid "Cannot read %s (%s)"
msgstr "%s を読み取ることができません (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "%s を書き込むことができません (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした"
@ -219,6 +242,9 @@ msgstr "クライアントが読み取り時に切断されました"
msgid "Client disconnected without sending enough data"
msgstr "十分なデータを送信せずにクライアントが切断されました"
msgid "Client disconnected without sending last chunk"
msgstr "最後のチャンクを送信せずにクライアントが切断されました"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -226,6 +252,14 @@ msgstr ""
"クライアントパス %(client)s はオブジェクトメタデータ %(meta)s に保管されたパ"
"スに一致しません"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"設定オプション internal_client_conf_path が定義されていません。デフォルト設定"
"を使用しています。オプションについては internal-client.conf-sample を参照して"
"ください"
msgid "Connection refused"
msgstr "接続が拒否されました"
@ -283,6 +317,10 @@ msgstr "データダウンロードエラー: %s"
msgid "Devices pass completed: %.02fs"
msgstr "デバイスパスが完了しました: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "ディレクトリー %r は有効なポリシーにマップしていません (%s) "
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "エラー %(db_file)s: %(validate_sync_to_err)s"
@ -359,6 +397,10 @@ msgstr ""
msgid "ERROR Exception causing client disconnect"
msgstr "エラー: 例外によりクライアントが切断されています"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr "エラー: オブジェクトサーバー %s へのデータ転送で例外が発生しました"
msgid "ERROR Failed to get my own IPs?"
msgstr "エラー: 自分の IP の取得に失敗?"
@ -533,6 +575,12 @@ msgstr "パーティションとの同期エラー"
msgid "Error syncing with node: %s"
msgstr "ノードとの同期エラー: %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"%(path)s の再構築を試行中にエラーが発生しました。ポリシー #%(policy)d フラグ"
"メント #%(frag_index)s"
msgid "Error: An error occurred"
msgstr "エラー: エラーが発生しました"
@ -552,6 +600,9 @@ msgstr "最上位アカウントリーパーループで例外が発生しまし
msgid "Exception in top-level replication loop"
msgstr "最上位複製ループで例外が発生しました"
msgid "Exception in top-levelreconstruction loop"
msgstr "最上位再構成ループで例外が発生しました"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "コンテナー %s %s の削除中に例外が発生しました"
@ -590,6 +641,13 @@ msgstr "%(given_domain)s から %(found_domain)s へ CNAME チェーンをフォ
msgid "Found configs:"
msgstr "構成が見つかりました:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"ハンドオフのファーストモードにハンドオフが残っています。現行複製パスを打ち切"
"ります。"
msgid "Host unreachable"
msgstr "ホストが到達不能です"
@ -609,6 +667,10 @@ msgstr "無効なホスト %r が X-Container-Sync-To にあります"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "無効な保留中項目 %(file)s: %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "%(full_path)s からの応答 %(resp)s が無効です"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "%(ip)s からの応答 %(resp)s が無効です"
@ -625,6 +687,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "長期実行の再同期を強制終了中: %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "%s からの JSON のロードが失敗しました (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "ロックが検出されました.. ライブ coros を強制終了中"
@ -644,14 +710,26 @@ msgstr "%r %r のエンドポイントクラスターがありません"
msgid "No permission to signal PID %d"
msgstr "PID %d にシグナル通知する許可がありません"
#, python-format
msgid "No policy with index %s"
msgstr "インデックス %s のポリシーはありません"
#, python-format
msgid "No realm key for %r"
msgstr "%r のレルムキーがありません"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "%s 用のデバイス容量が残っていません (%s)"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "肯定応答を返したオブジェクト・サーバーが不十分です (%d 取得)"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
@ -660,6 +738,10 @@ msgstr ""
"不検出 %(sync_from)r => %(sync_to)r - オブジェクト "
"%(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "%s 秒間で何も再構成されませんでした。"
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s 秒間で何も複製されませんでした。"
@ -694,19 +776,27 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"オブジェクト監査 (%(type)s)。%(start_time)s 以降: ローカル: パス済"
"%(passes)d、検疫済み %(quars)d、エラー %(errors)d、ファイル/秒:"
"%(frate).2f、バイト/秒: %(brate).2f、合計時間: %(total).2f、監査時間:"
"%(audit).2f、率: %(audit_rate).2f"
"オブジェクト監査 (%(type)s)。%(start_time)s 以降: ローカル: 合格した監査 "
"%(passes)d、検疫済み %(quars)d、エラー %(errors)d、ファイル/秒: %(frate).2f、"
"バイト/秒: %(brate).2f、合計時間: %(total).2f、監査時間: %(audit).2f、率: "
"%(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "オブジェクト監査統計: %s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "オブジェクト再構成が完了しました (1 回)。(%.02f 分)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "オブジェクト再構成が完了しました。(%.02f 分)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "オブジェクト複製が完了しました (1 回)。(%.02f 分)"
@ -768,6 +858,14 @@ msgstr "X-Container-Sync-To にパスが必要です"
msgid "Problem cleaning up %s"
msgstr "%s のクリーンアップ中に問題が発生しました"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "%s のクリーンアップ中に問題が発生しました (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "永続状態ファイル %s の書き込み中に問題が発生しました (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "プロファイル作成エラー: %s"
@ -808,6 +906,14 @@ msgstr "%s オブジェクトの削除中"
msgid "Removing partition: %s"
msgstr "パーティションの削除中: %s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "正しくない pid %(pid)d の pid ファイル %(pid_file)s を削除中"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "無効な pid の pid ファイル %s を削除中"
#, python-format
msgid "Removing stale pid file %s"
msgstr "失効した pid ファイル %s を削除中"
@ -827,6 +933,9 @@ msgstr ""
"%(acc)s/%(cont)s/%(obj)s に対する %(meth)s に関して 498 を返しています。"
"Ratelimit (最大スリープ) %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr "リング変更が検出されました。現行再構成パスを打ち切ります。"
msgid "Ring change detected. Aborting current replication pass."
msgstr "リング変更が検出されました。現行複製パスを打ち切ります。"
@ -834,6 +943,9 @@ msgstr "リング変更が検出されました。現行複製パスを打ち切
msgid "Running %s once"
msgstr "%s を 1 回実行中"
msgid "Running object reconstructor in script mode."
msgstr "スクリプトモードでオブジェクトリコンストラクターを実行中です。"
msgid "Running object replicator in script mode."
msgstr "スクリプトモードでオブジェクトレプリケーターを実行中です。"
@ -876,6 +988,12 @@ msgstr "マウントされていないため、 %s をスキップします"
msgid "Starting %s"
msgstr "%s を開始しています"
msgid "Starting object reconstruction pass."
msgstr "オブジェクト再構成パスを開始中です。"
msgid "Starting object reconstructor in daemon mode."
msgstr "オブジェクトリコンストラクターをデーモンモードで開始中です。"
msgid "Starting object replication pass."
msgstr "オブジェクト複製パスを開始中です。"
@ -901,10 +1019,22 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "memcached %(server)s に対する %(action)s がタイムアウトになりました"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s のタイムアウト例外"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "%(method)s %(path)s を試行中"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "GET %(full_path)s を試行中"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "%s への PUT の状況 %s の取得を試行中"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "%s への PUT の最終状況の取得を試行中"
@ -918,6 +1048,10 @@ msgstr "GET 時に読み取りを試行中 (再試行中)"
msgid "Trying to send to client"
msgstr "クライアントへの送信を試行中"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "%s でサフィックスの同期を試行中"
#, python-format
msgid "Trying to write to %s"
msgstr "%s への書き込みを試行中"
@ -929,10 +1063,22 @@ msgstr "キャッチされていない例外"
msgid "Unable to find %s config section in %s"
msgstr "%s 構成セクションが %s に見つかりません"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr "設定から内部クライアントをロードできません: %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "%s が libc に見つかりません。no-op として終了します。"
#, python-format
msgid "Unable to locate config for %s"
msgstr "%s の設定が見つかりません"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "%s の設定番号 %s が見つかりません"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -957,6 +1103,11 @@ msgstr "予期しない応答: %s"
msgid "Unhandled exception"
msgstr "未処理例外"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"GET を試行中に不明な例外が発生しました: %(account)r %(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "%(container)s %(dbfile)s に関する更新レポートが失敗しました"
@ -985,6 +1136,10 @@ msgstr "警告: メモリー制限を変更できません。非ルートとし
msgid "Waited %s seconds for %s to die; giving up"
msgstr "%s 秒間、%s の停止を待機しました。中止します"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "%s 秒間、%s の停止を待機しました。強制終了します"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "警告: memcached クライアントなしで ratelimit を行うことはできません"

View File

@ -10,9 +10,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.6.1.dev32\n"
"Project-Id-Version: swift 2.6.1.dev176\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-30 00:30+0000\n"
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -686,18 +686,6 @@ msgstr ""
"목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/"
"초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f"
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"오브젝트 감사(%(type)s). %(start_time)s 이후: 로컬: %(passes)d개 패스, "
"%(quars)d개 격리, %(errors)d개 오류 파일/초: %(frate).2f ,바이트/초: "
"%(brate).2f, 총 시간: %(total).2f, 감사 시간: %(audit).2f, 속도: "
"%(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "오브젝트 감사 통계: %s"

View File

@ -9,16 +9,17 @@
# Volmar Oliveira Junior <volmar.oliveira.jr@gmail.com>, 2014
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Carlos Marques <marquesc@br.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev254\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-22 19:48+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-22 02:07+0000\n"
"Last-Translator: Carlos Marques <marquesc@br.ibm.com>\n"
"Language: pt-BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
@ -55,6 +56,16 @@ msgstr "%(ip)s/%(device)s respondeu como desmontado"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partições de %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) dispositivos reconstruídos em %(time).2fs "
"(%(rate).2f/sec, %(remaining)s restantes)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -91,6 +102,10 @@ msgstr "%s não existe"
msgid "%s is not mounted"
msgstr "%s não está montado"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s respondeu como não montado"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s em execução (%s - %s)"
@ -212,6 +227,14 @@ msgstr "Não é possível acessar o arquivo %s."
msgid "Can not load profile data from %s."
msgstr "Não é possível carregar dados do perfil a partir de %s."
#, python-format
msgid "Cannot read %s (%s)"
msgstr "Não é possível ler %s (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "Não é possível gravar %s (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "O cliente não leu no proxy dentro de %ss"
@ -222,6 +245,9 @@ msgstr "Cliente desconectado durante leitura"
msgid "Client disconnected without sending enough data"
msgstr "Cliente desconecatdo sem ter enviado dados suficientes"
msgid "Client disconnected without sending last chunk"
msgstr "Cliente desconectado sem ter enviado o último chunk"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -229,6 +255,13 @@ msgstr ""
"Caminho do cliente %(client)s não corresponde ao caminho armazenado nos "
"metadados do objeto %(meta)s"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"Opção de configuração internal_client_conf_path não definida. Usando a "
"configuração padrão. Consulte internal-client.conf-sample para obter opções"
msgid "Connection refused"
msgstr "Conexão recusada"
@ -288,6 +321,10 @@ msgstr "Erro ao fazer download de dados: %s"
msgid "Devices pass completed: %.02fs"
msgstr "Dispositivos finalizados: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "O diretório %r não está mapeado para uma política válida (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s"
@ -363,6 +400,10 @@ msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s"
msgid "ERROR Exception causing client disconnect"
msgstr "ERRO Exceção causando clientes a desconectar"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s"
msgid "ERROR Failed to get my own IPs?"
msgstr "ERRO Falha ao pegar meu próprio IPs?"
@ -536,6 +577,11 @@ msgstr "Erro ao sincronizar partição"
msgid "Error syncing with node: %s"
msgstr "Erro ao sincronizar com o nó: %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s"
msgid "Error: An error occurred"
msgstr "Erro: Ocorreu um erro"
@ -555,6 +601,9 @@ msgstr "Exceção no loop do removedor da conta de nível superior"
msgid "Exception in top-level replication loop"
msgstr "Exceção no loop de replicação de nível superior"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exceção no loop de reconstrução de nível superior"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "Exceção ao excluir contêiner %s %s"
@ -592,6 +641,13 @@ msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s"
msgid "Found configs:"
msgstr "Localizados arquivos de configuração:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação "
"da replicação atual."
msgid "Host unreachable"
msgstr "Destino inalcançável"
@ -611,6 +667,10 @@ msgstr "Host inválido %r em X-Container-Sync-To"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Entrada pendente inválida %(file)s: %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "Resposta inválida %(resp)s a partir de %(full_path)s"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "Resposta inválida %(resp)s a partir de %(ip)s"
@ -627,6 +687,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "Eliminando a ressincronização de longa execução: %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "Falha ao carregar JSON a partir do %s (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "Bloqueio detectado... eliminando núcleos em tempo real."
@ -646,10 +710,18 @@ msgstr "Nenhum terminal de cluster para %r %r"
msgid "No permission to signal PID %d"
msgstr "Nenhuma permissão para PID do sinal %d"
#, python-format
msgid "No policy with index %s"
msgstr "Nenhuma política com índice %s"
#, python-format
msgid "No realm key for %r"
msgstr "Nenhuma chave do domínio para %r"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "Nenhum espaço deixado no dispositivo para %s (%s)"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)"
@ -667,6 +739,10 @@ msgstr ""
"Não localizado %(sync_from)r => %(sync_to)r objeto "
"%(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "Nada foi reconstruído durante %s segundos."
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "Nada foi replicado para %s segundos."
@ -702,12 +778,12 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d "
"aprovado, %(quars)d em quarentena, %(errors)d arquivos de erros/seg: "
"aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: "
"%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de "
"auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
@ -715,6 +791,14 @@ msgstr ""
msgid "Object audit stats: %s"
msgstr "Estatísticas de auditoria do objeto: %s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "Reconstrução do objeto concluída. (%.02f minutos)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "Replicação completa do objeto (única). (%.02f minutos)"
@ -775,6 +859,14 @@ msgstr "Caminho necessário em X-Container-Sync-To"
msgid "Problem cleaning up %s"
msgstr "Problema ao limpar %s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "Problema ao limpar %s (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "Problema ao gravar arquivo de estado durável %s (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "Erro da Criação de Perfil: %s"
@ -817,6 +909,14 @@ msgstr "Removendo %s objetos"
msgid "Removing partition: %s"
msgstr "Removendo partição: %s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Removendo o arquivo pid %s com pid inválido"
#, python-format
msgid "Removing stale pid file %s"
msgstr "Removendo o arquivo pid %s antigo"
@ -836,6 +936,10 @@ msgstr ""
"Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa "
"(Suspensão Máxima) %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr ""
"Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual."
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
"Alteração do anel detectada. Interrompendo a aprovação da replicação atual."
@ -844,6 +948,9 @@ msgstr ""
msgid "Running %s once"
msgstr "Executando %s uma vez,"
msgid "Running object reconstructor in script mode."
msgstr "Executando o reconstrutor do objeto no modo de script."
msgid "Running object replicator in script mode."
msgstr "Executando replicador do objeto no modo de script."
@ -887,6 +994,12 @@ msgstr "Pulando %s porque não está montado"
msgid "Starting %s"
msgstr "Iniciando %s"
msgid "Starting object reconstruction pass."
msgstr "Iniciando a aprovação da reconstrução de objeto."
msgid "Starting object reconstructor in daemon mode."
msgstr "Iniciando o reconstrutor do objeto no modo daemon."
msgid "Starting object replication pass."
msgstr "Iniciando a aprovação da replicação de objeto."
@ -912,10 +1025,22 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "Tempo limite %(action)s para memcached: %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentando %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "Tentando GET %(full_path)s"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "Tentando obter o status %s do PUT para o %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Tentando obter o status final do PUT para o %s"
@ -929,6 +1054,10 @@ msgstr "Tentando ler durante GET (tentando novamente)"
msgid "Trying to send to client"
msgstr "Tentando enviar para o cliente"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "Tentando sincronizar sufixos com %s"
#, python-format
msgid "Trying to write to %s"
msgstr "Tentando escrever para %s"
@ -940,6 +1069,11 @@ msgstr "EXCEÇÃO NÃO CAPTURADA"
msgid "Unable to find %s config section in %s"
msgstr "Não é possível localizar %s da seção de configuração em %s"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr ""
"Não é possível carregar cliente interno a partir da configuração: %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Não é possível localizar %s em libc. Saindo como um não operacional."
@ -948,6 +1082,10 @@ msgstr "Não é possível localizar %s em libc. Saindo como um não operacional.
msgid "Unable to locate config for %s"
msgstr "Não é possível localizar configuração para %s"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "Não é possível localizar o número de configuração %s para %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -973,6 +1111,10 @@ msgstr "Resposta inesperada: %s"
msgid "Unhandled exception"
msgstr "Exceção não-tratada"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr "Exceção inesperada ao tentar GET: %(account)r %(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s"
@ -1006,6 +1148,10 @@ msgstr ""
msgid "Waited %s seconds for %s to die; giving up"
msgstr "Esperou %s segundos para %s eliminar; desistindo"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "Esperou %s segundos para %s eliminar; eliminando"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached"

View File

@ -6,16 +6,17 @@
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Filatov Sergey <filatecs@gmail.com>, 2016. #zanata
# Grigory Mokhin <mokhin@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev244\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-22 03:44+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-01-17 10:49+0000\n"
"Last-Translator: Filatov Sergey <filatecs@gmail.com>\n"
"PO-Revision-Date: 2016-03-21 07:06+0000\n"
"Last-Translator: Grigory Mokhin <mokhin@gmail.com>\n"
"Language: ru\n"
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
@ -235,6 +236,9 @@ msgstr "Клиент отключен во время чтения"
msgid "Client disconnected without sending enough data"
msgstr "Клиент отключен без отправки данных"
msgid "Client disconnected without sending last chunk"
msgstr "Клиент отключился, не отправив последний фрагмент данных"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
@ -390,6 +394,11 @@ msgstr "Ошибка: ошибка закрытия DiskFile %(data_file)s: %(ex
msgid "ERROR Exception causing client disconnect"
msgstr "Ошибка. Исключительная ситуация при отключении клиента"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr ""
"ОШИБКА. Исключительная ситуация при передаче данных на серверы объектов %s"
msgid "ERROR Failed to get my own IPs?"
msgstr "Ошибка: не удалось получить собственные IP-адреса?"
@ -568,6 +577,12 @@ msgstr "Ошибка синхронизации раздела"
msgid "Error syncing with node: %s"
msgstr "Ошибка синхронизации с узлом %s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr ""
"Ошибка при попытке перекомпоновки стратегии %(path)s: номер#%(policy)d "
"фрагмент#%(frag_index)s"
msgid "Error: An error occurred"
msgstr "Ошибка: произошла ошибка"
@ -629,6 +644,13 @@ msgstr "Следующая цепочка CNAME для %(given_domain)s в %(fou
msgid "Found configs:"
msgstr "Обнаружены конфигурации:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
"В режиме передачи управления не все операции завершены. Принудительное "
"завершение текущего прохода репликации."
msgid "Host unreachable"
msgstr "Хост недоступен"
@ -703,6 +725,10 @@ msgstr "Не устройстве %s (%s) закончилось место"
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "Недостаточное число подтверждений с серверов объектов (получено %d)"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
@ -750,7 +776,7 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
@ -767,6 +793,10 @@ msgstr "Состояние контроля объекта: %s"
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "Реконструкция объекта выполнена. (%.02f мин.)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)"
@ -830,9 +860,9 @@ msgstr "Неполадка при очистке %s"
msgid "Problem cleaning up %s (%s)"
msgstr "Возникла проблема при очистке %s (%s)"
#, fuzzy, python-format
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "Возникла проблема при записи файла состояния %s (%s)"
msgstr "Возникла неполадка при записи файла сохраняемого состояния %s (%s)"
#, python-format
msgid "Profiling Error: %s"
@ -878,12 +908,12 @@ msgid "Removing partition: %s"
msgstr "Удаление раздела: %s"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Удаление pid файла %s с неверным pid-ом"
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Удаление файла pid %(pid_file)s с ошибочным pid %(pid)d"
#, python-format
msgid "Removing pid file %s with wrong pid %d"
msgstr "Удаление pid файла %s с неверным pid-ом %d"
msgid "Removing pid file %s with invalid pid"
msgstr "Удаление pid файла %s с неверным pid-ом"
#, python-format
msgid "Removing stale pid file %s"
@ -1007,6 +1037,10 @@ msgstr "Попытка выполнения метода %(method)s %(path)s"
msgid "Trying to GET %(full_path)s"
msgstr "Попытка GET-запроса %(full_path)s"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "Попытка получения состояния %s операции PUT в %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Попытка получения конечного состояния PUT в %s"
@ -1020,6 +1054,10 @@ msgstr "Попытка чтения во время операции GET (вып
msgid "Trying to send to client"
msgstr "Попытка отправки клиенту"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "Попытка синхронизации суффиксов с %s"
#, python-format
msgid "Trying to write to %s"
msgstr "Попытка записи в %s"
@ -1043,6 +1081,10 @@ msgstr "Не удалось найти %s в libc. Оставлено как no
msgid "Unable to locate config for %s"
msgstr "Не удалось найти конфигурационный файл для %s"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "Не удается найти конфигурации с номером %s для %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""

View File

@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev244\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-18 06:20+0000\n"
"POT-Creation-Date: 2016-03-22 06:16+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@ -63,8 +63,8 @@ msgstr ""
msgid "ERROR Could not get account info %s"
msgstr ""
#: swift/account/reaper.py:139 swift/common/utils.py:2128
#: swift/obj/diskfile.py:296 swift/obj/updater.py:88 swift/obj/updater.py:131
#: swift/account/reaper.py:139 swift/common/utils.py:2342
#: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:131
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr ""
@ -159,8 +159,8 @@ msgstr ""
msgid "Exception with objects for container %(container)s for account %(account)s"
msgstr ""
#: swift/account/server.py:276 swift/container/server.py:589
#: swift/obj/server.py:964
#: swift/account/server.py:276 swift/container/server.py:607
#: swift/obj/server.py:1038
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr ""
@ -181,16 +181,16 @@ msgstr ""
msgid "Error in %r with mtime_check_interval: %s"
msgstr ""
#: swift/common/db.py:352
#: swift/common/db.py:353
#, python-format
msgid "Quarantined %s to %s due to %s database"
msgstr ""
#: swift/common/db.py:407
#: swift/common/db.py:408
msgid "Broker error trying to rollback locked connection"
msgstr ""
#: swift/common/db.py:610
#: swift/common/db.py:611
#, python-format
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr ""
@ -210,7 +210,7 @@ msgstr ""
msgid "Removed %(remove)d dbs"
msgstr ""
#: swift/common/db_replicator.py:215
#: swift/common/db_replicator.py:215 swift/obj/replicator.py:514
#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr ""
@ -340,7 +340,7 @@ msgstr ""
#: swift/common/manager.py:564
#, python-format
msgid "Removing pid file %s with wrong pid %d"
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr ""
#: swift/common/manager.py:571
@ -384,125 +384,125 @@ msgstr ""
msgid "%s does not exist"
msgstr ""
#: swift/common/memcached.py:197
#: swift/common/memcached.py:166
#, python-format
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr ""
#: swift/common/memcached.py:200
#: swift/common/memcached.py:169
#, python-format
msgid "Error %(action)s to memcached: %(server)s"
msgstr ""
#: swift/common/memcached.py:225
#: swift/common/memcached.py:194
#, python-format
msgid "Error limiting server %s"
msgstr ""
#: swift/common/request_helpers.py:107
#: swift/common/request_helpers.py:109
#, python-format
msgid "No policy with index %s"
msgstr ""
#: swift/common/request_helpers.py:454
#: swift/common/request_helpers.py:456
msgid "ERROR: An error occurred while retrieving segments"
msgstr ""
#: swift/common/utils.py:392
#: swift/common/utils.py:397
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
#: swift/common/utils.py:583
#: swift/common/utils.py:591
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
#: swift/common/utils.py:667
#: swift/common/utils.py:675
#, python-format
msgid "Unable to perform fsync() on directory %s: %s"
msgstr ""
#: swift/common/utils.py:1103
#: swift/common/utils.py:1244
#, python-format
msgid "%s: Connection reset by peer"
msgstr ""
#: swift/common/utils.py:1105 swift/common/utils.py:1108
#: swift/common/utils.py:1246 swift/common/utils.py:1249
#, python-format
msgid "%s: %s"
msgstr ""
#: swift/common/utils.py:1320
#: swift/common/utils.py:1497
msgid "Connection refused"
msgstr ""
#: swift/common/utils.py:1322
#: swift/common/utils.py:1499
msgid "Host unreachable"
msgstr ""
#: swift/common/utils.py:1324
#: swift/common/utils.py:1501
msgid "Connection timeout"
msgstr ""
#: swift/common/utils.py:1602
#: swift/common/utils.py:1779
msgid "UNCAUGHT EXCEPTION"
msgstr ""
#: swift/common/utils.py:1657
#: swift/common/utils.py:1834
msgid "Error: missing config path argument"
msgstr ""
#: swift/common/utils.py:1662
#: swift/common/utils.py:1839
#, python-format
msgid "Error: unable to locate %s"
msgstr ""
#: swift/common/utils.py:1986
#: swift/common/utils.py:2200
#, python-format
msgid "Unable to read config from %s"
msgstr ""
#: swift/common/utils.py:1992
#: swift/common/utils.py:2206
#, python-format
msgid "Unable to find %s config section in %s"
msgstr ""
#: swift/common/utils.py:2357
#: swift/common/utils.py:2591
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr ""
#: swift/common/utils.py:2362
#: swift/common/utils.py:2596
#, python-format
msgid "No realm key for %r"
msgstr ""
#: swift/common/utils.py:2366
#: swift/common/utils.py:2600
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr ""
#: swift/common/utils.py:2375
#: swift/common/utils.py:2609
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr ""
#: swift/common/utils.py:2379
#: swift/common/utils.py:2613
msgid "Path required in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2382
#: swift/common/utils.py:2616
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2387
#: swift/common/utils.py:2621
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2579
#: swift/common/utils.py:2815
msgid "Exception dumping recon cache"
msgstr ""
@ -568,15 +568,15 @@ msgstr ""
msgid "Error listing devices"
msgstr ""
#: swift/common/middleware/recon.py:259
#: swift/common/middleware/recon.py:265
msgid "Error reading ringfile"
msgstr ""
#: swift/common/middleware/recon.py:273
#: swift/common/middleware/recon.py:279
msgid "Error reading swift.conf"
msgstr ""
#: swift/common/middleware/xprofile.py:243
#: swift/common/middleware/xprofile.py:226
#, python-format
msgid "Error on render profiling results: %s"
msgstr ""
@ -658,82 +658,82 @@ msgstr ""
msgid "ERROR Could not get container info %s"
msgstr ""
#: swift/container/server.py:181
#: swift/container/server.py:186
#, python-format
msgid ""
"ERROR Account update failed: different numbers of hosts and devices in "
"request: \"%s\" vs \"%s\""
msgstr ""
#: swift/container/server.py:226
#: swift/container/server.py:231
#, python-format
msgid ""
"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
"later): Response %(status)s %(reason)s"
msgstr ""
#: swift/container/server.py:235
#: swift/container/server.py:240
#, python-format
msgid ""
"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
"later)"
msgstr ""
#: swift/container/sync.py:218
#: swift/container/sync.py:225
msgid ""
"Configuration option internal_client_conf_path not defined. Using default"
" configuration, See internal-client.conf-sample for options"
msgstr ""
#: swift/container/sync.py:231
#: swift/container/sync.py:238
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr ""
#: swift/container/sync.py:265
#: swift/container/sync.py:269
msgid "Begin container sync \"once\" mode"
msgstr ""
#: swift/container/sync.py:277
#: swift/container/sync.py:278
#, python-format
msgid "Container sync \"once\" mode completed: %.02fs"
msgstr ""
#: swift/container/sync.py:285
#: swift/container/sync.py:286
#, python-format
msgid ""
"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], "
"%(skip)s skipped, %(fail)s failed"
msgstr ""
#: swift/container/sync.py:338
#: swift/container/sync.py:352
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr ""
#: swift/container/sync.py:394
#: swift/container/sync.py:408
#, python-format
msgid "ERROR Syncing %s"
msgstr ""
#: swift/container/sync.py:477
#: swift/container/sync.py:492
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
#: swift/container/sync.py:511
#: swift/container/sync.py:525
#, python-format
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr ""
#: swift/container/sync.py:517
#: swift/container/sync.py:531
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr ""
#: swift/container/sync.py:524 swift/container/sync.py:531
#: swift/container/sync.py:538 swift/container/sync.py:545
#, python-format
msgid "ERROR Syncing %(db_file)s %(row)s"
msgstr ""
@ -744,7 +744,7 @@ msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr ""
#: swift/container/updater.py:92 swift/obj/reconstructor.py:822
#: swift/obj/replicator.py:590 swift/obj/replicator.py:706
#: swift/obj/replicator.py:598 swift/obj/replicator.py:715
#, python-format
msgid "%s is not mounted"
msgstr ""
@ -798,17 +798,17 @@ msgid ""
"later): "
msgstr ""
#: swift/obj/auditor.py:80
#: swift/obj/auditor.py:78
#, python-format
msgid " - parallel, %s"
msgstr ""
#: swift/obj/auditor.py:82
#: swift/obj/auditor.py:80
#, python-format
msgid " - %s"
msgstr ""
#: swift/obj/auditor.py:83
#: swift/obj/auditor.py:81
#, python-format
msgid "Begin object audit \"%s\" mode (%s%s)"
msgstr ""
@ -817,7 +817,7 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d "
"passed, %(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f ,"
"passed, %(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f,"
" bytes/sec: %(brate).2f, Total time: %(total).2f, Auditing time: "
"%(audit).2f, Rate: %(audit_rate).2f"
msgstr ""
@ -836,73 +836,88 @@ msgstr ""
msgid "Object audit stats: %s"
msgstr ""
#: swift/obj/auditor.py:187
#: swift/obj/auditor.py:190
#, python-format
msgid "ERROR Trying to audit %s"
msgstr ""
#: swift/obj/auditor.py:224
#: swift/obj/auditor.py:227
#, python-format
msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
msgstr ""
#: swift/obj/auditor.py:275
#: swift/obj/auditor.py:279
#, python-format
msgid "ERROR: Unable to run auditing: %s"
msgstr ""
#: swift/obj/auditor.py:346 swift/obj/auditor.py:367
#: swift/obj/auditor.py:350 swift/obj/auditor.py:371
#, python-format
msgid "ERROR auditing: %s"
msgstr ""
#: swift/obj/diskfile.py:306 swift/obj/updater.py:162
#: swift/obj/diskfile.py:371 swift/obj/updater.py:162
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr ""
#: swift/obj/diskfile.py:700
#: swift/obj/diskfile.py:413
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgid "Cannot read %s (%s)"
msgstr ""
#: swift/obj/diskfile.py:783
msgid "Error hashing suffix"
#: swift/obj/diskfile.py:418
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr ""
#: swift/obj/diskfile.py:433
#, python-format
msgid "Cannot write %s (%s)"
msgstr ""
#: swift/obj/diskfile.py:904
#, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:1134
#: swift/obj/diskfile.py:1024
msgid "Error hashing suffix"
msgstr ""
#: swift/obj/diskfile.py:1188
#, python-format
msgid "Problem cleaning up %s"
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:1441
#, python-format
msgid "Problem cleaning up %s"
msgstr ""
#: swift/obj/diskfile.py:1786
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr ""
#: swift/obj/diskfile.py:1751
#: swift/obj/diskfile.py:2114
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata "
"%(meta)s"
msgstr ""
#: swift/obj/diskfile.py:2140
#: swift/obj/diskfile.py:2522
#, python-format
msgid "No space left on device for %s (%s)"
msgstr ""
#: swift/obj/diskfile.py:2149
#: swift/obj/diskfile.py:2531
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr ""
#: swift/obj/diskfile.py:2152
#: swift/obj/diskfile.py:2534
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr ""
@ -959,14 +974,14 @@ msgid ""
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:511
#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:519
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr ""
#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:518
#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:526
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
@ -976,7 +991,7 @@ msgstr ""
msgid "Nothing reconstructed for %s seconds."
msgstr ""
#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:555
#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:563
msgid "Lockup detected.. killing live coros."
msgstr ""
@ -990,7 +1005,7 @@ msgstr ""
msgid "%s responded as unmounted"
msgstr ""
#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:364
#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:369
#, python-format
msgid "Removing partition: %s"
msgstr ""
@ -1040,91 +1055,97 @@ msgstr ""
msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr ""
#: swift/obj/replicator.py:334
#: swift/obj/replicator.py:335
#, python-format
msgid "Removing %s objects"
msgstr ""
#: swift/obj/replicator.py:353
#: swift/obj/replicator.py:356
msgid "Error syncing handoff partition"
msgstr ""
#: swift/obj/replicator.py:429
#: swift/obj/replicator.py:434
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr ""
#: swift/obj/replicator.py:436
#: swift/obj/replicator.py:441
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr ""
#: swift/obj/replicator.py:480
#: swift/obj/replicator.py:485
#, python-format
msgid "Error syncing with node: %s"
msgstr ""
#: swift/obj/replicator.py:485
#: swift/obj/replicator.py:490
msgid "Error syncing partition"
msgstr ""
#: swift/obj/replicator.py:500
#: swift/obj/replicator.py:505
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
#: swift/obj/replicator.py:526
#: swift/obj/replicator.py:534
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr ""
#: swift/obj/replicator.py:709
#: swift/obj/replicator.py:721
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr ""
#: swift/obj/replicator.py:727
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
#: swift/obj/replicator.py:737
#: swift/obj/replicator.py:755
msgid "Exception in top-level replication loop"
msgstr ""
#: swift/obj/replicator.py:747
#: swift/obj/replicator.py:765
msgid "Running object replicator in script mode."
msgstr ""
#: swift/obj/replicator.py:765
#: swift/obj/replicator.py:783
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr ""
#: swift/obj/replicator.py:776
#: swift/obj/replicator.py:794
msgid "Starting object replicator in daemon mode."
msgstr ""
#: swift/obj/replicator.py:780
#: swift/obj/replicator.py:798
msgid "Starting object replication pass."
msgstr ""
#: swift/obj/replicator.py:785
#: swift/obj/replicator.py:803
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr ""
#: swift/obj/server.py:240
#: swift/obj/server.py:241
#, python-format
msgid ""
"ERROR Container update failed (saving for async update later): %(status)d"
" response from %(ip)s:%(port)s/%(dev)s"
msgstr ""
#: swift/obj/server.py:247
#: swift/obj/server.py:248
#, python-format
msgid ""
"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for "
"async update later)"
msgstr ""
#: swift/obj/server.py:282
#: swift/obj/server.py:284
#, python-format
msgid ""
"ERROR Container update failed: different numbers of hosts and devices in "
@ -1178,162 +1199,162 @@ msgstr ""
msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
msgstr ""
#: swift/proxy/server.py:416
#: swift/proxy/server.py:418
msgid "ERROR Unhandled exception in request"
msgstr ""
#: swift/proxy/server.py:471
#: swift/proxy/server.py:473
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr ""
#: swift/proxy/server.py:488 swift/proxy/server.py:506
#: swift/proxy/server.py:490 swift/proxy/server.py:508
#, python-format
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr ""
#: swift/proxy/server.py:529
#: swift/proxy/server.py:531
#, python-format
msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
msgstr ""
#: swift/proxy/controllers/account.py:65
#: swift/proxy/controllers/account.py:67
msgid "Account"
msgstr ""
#: swift/proxy/controllers/base.py:809 swift/proxy/controllers/base.py:848
#: swift/proxy/controllers/base.py:940 swift/proxy/controllers/obj.py:338
#: swift/proxy/controllers/obj.py:875 swift/proxy/controllers/obj.py:924
#: swift/proxy/controllers/obj.py:938 swift/proxy/controllers/obj.py:1759
#: swift/proxy/controllers/obj.py:1996 swift/proxy/controllers/obj.py:2121
#: swift/proxy/controllers/obj.py:2353
#: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852
#: swift/proxy/controllers/base.py:944 swift/proxy/controllers/obj.py:340
#: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934
#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1769
#: swift/proxy/controllers/obj.py:2007 swift/proxy/controllers/obj.py:2145
#: swift/proxy/controllers/obj.py:2379
msgid "Object"
msgstr ""
#: swift/proxy/controllers/base.py:810 swift/proxy/controllers/base.py:849
#: swift/proxy/controllers/base.py:814 swift/proxy/controllers/base.py:853
msgid "Trying to read during GET (retrying)"
msgstr ""
#: swift/proxy/controllers/base.py:941
#: swift/proxy/controllers/base.py:945
msgid "Trying to read during GET"
msgstr ""
#: swift/proxy/controllers/base.py:945
#: swift/proxy/controllers/base.py:949
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr ""
#: swift/proxy/controllers/base.py:950
#: swift/proxy/controllers/base.py:954
msgid "Client disconnected on read"
msgstr ""
#: swift/proxy/controllers/base.py:952
#: swift/proxy/controllers/base.py:956
msgid "Trying to send to client"
msgstr ""
#: swift/proxy/controllers/base.py:1003 swift/proxy/controllers/base.py:1415
#: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1437
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr ""
#: swift/proxy/controllers/base.py:1042 swift/proxy/controllers/base.py:1403
#: swift/proxy/controllers/obj.py:361 swift/proxy/controllers/obj.py:915
#: swift/proxy/controllers/obj.py:2113 swift/proxy/controllers/obj.py:2398
#: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1425
#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925
#: swift/proxy/controllers/obj.py:2137 swift/proxy/controllers/obj.py:2424
msgid "ERROR Insufficient Storage"
msgstr ""
#: swift/proxy/controllers/base.py:1045
#: swift/proxy/controllers/base.py:1040
#, python-format
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr ""
#: swift/proxy/controllers/base.py:1406
#: swift/proxy/controllers/base.py:1428
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
msgstr ""
#: swift/proxy/controllers/base.py:1536
#: swift/proxy/controllers/base.py:1558
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr ""
#: swift/proxy/controllers/container.py:98
#: swift/proxy/controllers/container.py:100
msgid "Container"
msgstr ""
#: swift/proxy/controllers/obj.py:339
#: swift/proxy/controllers/obj.py:341
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr ""
#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:2403
#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2429
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr ""
#: swift/proxy/controllers/obj.py:571
#: swift/proxy/controllers/obj.py:579
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr ""
#: swift/proxy/controllers/obj.py:584
#: swift/proxy/controllers/obj.py:592
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
#: swift/proxy/controllers/obj.py:919 swift/proxy/controllers/obj.py:2116
#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2140
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
#: swift/proxy/controllers/obj.py:925 swift/proxy/controllers/obj.py:2122
#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2146
#, python-format
msgid "Expect: 100-continue on %s"
msgstr ""
#: swift/proxy/controllers/obj.py:939 swift/proxy/controllers/obj.py:1760
#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1770
#, python-format
msgid "Trying to write to %s"
msgstr ""
#: swift/proxy/controllers/obj.py:990 swift/proxy/controllers/obj.py:2287
#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2311
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr ""
#: swift/proxy/controllers/obj.py:998 swift/proxy/controllers/obj.py:2293
#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2317
msgid "Client disconnected without sending last chunk"
msgstr ""
#: swift/proxy/controllers/obj.py:1003 swift/proxy/controllers/obj.py:2300
#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2324
msgid "ERROR Exception causing client disconnect"
msgstr ""
#: swift/proxy/controllers/obj.py:1007 swift/proxy/controllers/obj.py:2304
#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2328
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr ""
#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2218
#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2242
msgid "Client disconnected without sending enough data"
msgstr ""
#: swift/proxy/controllers/obj.py:1059
#: swift/proxy/controllers/obj.py:1069
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr ""
#: swift/proxy/controllers/obj.py:1063 swift/proxy/controllers/obj.py:2264
#: swift/proxy/controllers/obj.py:2487
#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2288
#: swift/proxy/controllers/obj.py:2513
msgid "Object PUT"
msgstr ""
#: swift/proxy/controllers/obj.py:2257
#: swift/proxy/controllers/obj.py:2281
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr ""
#: swift/proxy/controllers/obj.py:2354
#: swift/proxy/controllers/obj.py:2380
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr ""

View File

@ -7,9 +7,9 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev235\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-18 23:11+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -738,18 +738,6 @@ msgstr ""
"%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, "
"Oran: %(audit_rate).2f"
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"Nesne denedimi (%(type)s). %(start_time)s den beri: Yerel olarak: %(passes)d "
"geçti, %(quars)d karantinaya alındı, %(errors)d hata dosya/sn: %(frate).2f , "
"bayt/sn: %(brate).2f, Toplam süre: %(total).2f, Denetleme süresi: "
"%(audit).2f, Oran: %(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "Nesne denetim istatistikleri: %s"
@ -872,10 +860,6 @@ msgstr "Bölüm kaldırılıyor: %s"
msgid "Removing pid file %s with invalid pid"
msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor"
#, python-format
msgid "Removing pid file %s with wrong pid %d"
msgstr "%s pid dosyası %d yanlış pid'ine sahip siliniyor"
#, python-format
msgid "Removing stale pid file %s"
msgstr "Askıdaki pid dosyası siliniyor %s"

View File

@ -6,16 +6,17 @@
# Pearl Yajing Tan(Seagate Tech) <pearl.y.tan@seagate.com>, 2014
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Linda <duleish@cn.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev254\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-22 19:48+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-22 10:30+0000\n"
"Last-Translator: Linda <duleish@cn.ibm.com>\n"
"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@ -50,6 +51,16 @@ msgstr "%(ip)s/%(device)s的回应为未挂载"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"%(device)d/%(dtotal)d (%(dpercentage).2f%%) 设备的 %(reconstructed)d/"
"%(total)d (%(percentage).2f%%) 分区已于 %(time).2fs 重构(%(rate).2f/秒,剩"
"余 %(remaining)s"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -86,6 +97,10 @@ msgstr "%s不存在"
msgid "%s is not mounted"
msgstr "%s未挂载"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s 响应为未安装"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s运行(%s - %s)"
@ -206,6 +221,14 @@ msgstr "无法访问文件%s"
msgid "Can not load profile data from %s."
msgstr "无法从%s下载分析数据"
#, python-format
msgid "Cannot read %s (%s)"
msgstr "无法读取 %s (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "无法写入 %s (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "客户尚未从代理处读取%ss"
@ -216,11 +239,21 @@ msgstr "客户读取时中断"
msgid "Client disconnected without sending enough data"
msgstr "客户中断 尚未发送足够"
msgid "Client disconnected without sending last chunk"
msgstr "客户机已断开连接而未发送最后一个数据块"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"未定义配置选项 internal_client_conf_path。正在使用缺省配置。请参阅 internal-"
"client.conf-sample 以了解各个选项"
msgid "Connection refused"
msgstr "连接被拒绝"
@ -278,6 +311,10 @@ msgstr "数据下载错误:%s"
msgid "Devices pass completed: %.02fs"
msgstr "设备通过完成: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "目录 %r 未映射至有效策略 (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
@ -349,6 +386,10 @@ msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
msgid "ERROR Exception causing client disconnect"
msgstr "错误 异常导致客户端中断连接"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr "错误:向对象服务器 %s 传输数据时发生异常"
msgid "ERROR Failed to get my own IPs?"
msgstr "错误 无法获得我方IPs?"
@ -516,6 +557,10 @@ msgstr "执行同步分区时发生错误"
msgid "Error syncing with node: %s"
msgstr "执行同步时节点%s发生错误"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr "尝试重建 %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s"
msgid "Error: An error occurred"
msgstr "错误:一个错误发生了"
@ -535,6 +580,9 @@ msgstr "异常出现在top-level账号reaper环"
msgid "Exception in top-level replication loop"
msgstr "top-level复制圈出现异常"
msgid "Exception in top-levelreconstruction loop"
msgstr " top-levelreconstruction 环中发生异常"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "执行删除容器时出现异常 %s %s"
@ -571,6 +619,11 @@ msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s"
msgid "Found configs:"
msgstr "找到配置"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr "Handoffs 优先方式仍有 handoffs。正在中止当前复制过程。"
msgid "Host unreachable"
msgstr "无法连接到主机"
@ -590,6 +643,10 @@ msgstr "X-Container-Sync-To中无效主机%r"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "不可用的等待输入%(file)s: %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "从 %(full_path)s 返回了无效响应 %(resp)s"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "无效的回应%(resp)s来自%(ip)s"
@ -605,6 +662,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "终止long-running同步: %s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "从 %s 读取 JSON 失败 (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "检测到lockup。终止正在执行的coros"
@ -624,20 +685,36 @@ msgstr "%r %r的集群节点不存在"
msgid "No permission to signal PID %d"
msgstr "无权限发送信号PID%d"
#, python-format
msgid "No policy with index %s"
msgstr "没有具备索引 %s 的策略"
#, python-format
msgid "No realm key for %r"
msgstr "%r权限key不存在"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "设备上没有可容纳 %s (%s) 的空间"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "没有足够的对象服务器应答(收到 %d"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "过去 %s 秒未重构任何对象。"
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s秒无复制"
@ -672,18 +749,27 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d "
"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: "
"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
"对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通"
"过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:"
"%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:"
"%(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "对象审计统计:%s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "对象重构完成(一次)。(%.02f 分钟)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "对象重构完成。(%.02f 分钟)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
@ -740,6 +826,14 @@ msgstr "在X-Container-Sync-To中路径是必须的"
msgid "Problem cleaning up %s"
msgstr "问题清除%s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "清除 %s (%s) 时发生了问题"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "编写可持续状态文件 %s (%s) 时发生了问题"
#, python-format
msgid "Profiling Error: %s"
msgstr "分析代码时出现错误:%s"
@ -777,6 +871,14 @@ msgstr "正在移除 %s 个对象"
msgid "Removing partition: %s"
msgstr "移除分区:%s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "移除 pid 文件 %(pid_file)s 失败pid %(pid)d 不正确"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "正在移除带有无效 pid 的 pid 文件 %s"
#, python-format
msgid "Removing stale pid file %s"
msgstr "移除原有pid文件%s"
@ -796,6 +898,9 @@ msgstr ""
"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s流量控制(Max \"\n"
"\"Sleep) %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr "检测到环更改。正在中止当前重构过程。"
msgid "Ring change detected. Aborting current replication pass."
msgstr "Ring改变被检测到。退出现有的复制通过"
@ -803,6 +908,9 @@ msgstr "Ring改变被检测到。退出现有的复制通过"
msgid "Running %s once"
msgstr "运行%s一次"
msgid "Running object reconstructor in script mode."
msgstr "正以脚本方式运行对象重构程序。"
msgid "Running object replicator in script mode."
msgstr "在加密模式下执行对象复制"
@ -842,6 +950,12 @@ msgstr "挂载失败 跳过%s"
msgid "Starting %s"
msgstr "启动%s"
msgid "Starting object reconstruction pass."
msgstr "正在启动对象重构过程。"
msgid "Starting object reconstructor in daemon mode."
msgstr "正以守护程序方式启动对象重构程序。"
msgid "Starting object replication pass."
msgstr "开始通过对象复制"
@ -865,10 +979,22 @@ msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "%(action)s超时 高性能内存对象缓存: %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 发生超时异常"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "正尝试获取 %(full_path)s"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "正尝试将 PUT 的 %s 状态发送至 %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "尝试执行获取最后的PUT状态%s"
@ -882,6 +1008,10 @@ msgstr "执行GET时尝试读取(重新尝试)"
msgid "Trying to send to client"
msgstr "尝试发送到客户端"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "正尝试使后缀与 %s 同步"
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
@ -893,10 +1023,22 @@ msgstr "未捕获的异常"
msgid "Unable to find %s config section in %s"
msgstr "无法在%s中查找到%s设置部分"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr "无法从配置装入内部客户机:%r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "无法查询到%s 保留为no-op"
#, python-format
msgid "Unable to locate config for %s"
msgstr "找不到 %s 的配置"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "找不到 %s 的配置编号 %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr "无法查询到fallocate posix_fallocate。保存为no-op"
@ -920,6 +1062,10 @@ msgstr "意外响应:%s"
msgid "Unhandled exception"
msgstr "未处理的异常"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr "尝试获取 %(account)r %(container)r %(object)r 时发生未知异常"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "%(container)s %(dbfile)s更新报告失败"
@ -946,6 +1092,10 @@ msgstr "警告无法修改内存极限是否按非root运行"
msgid "Waited %s seconds for %s to die; giving up"
msgstr "等待%s秒直到%s停止放弃"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "已消耗 %s 秒等待 %s 终止;正在终止"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "警告:缺失缓存客户端 无法控制流量 "

View File

@ -5,16 +5,17 @@
# Translators:
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Jennifer <cristxu@tw.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev267\n"
"Project-Id-Version: swift 2.6.1.dev268\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"POT-Creation-Date: 2016-03-24 22:25+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"PO-Revision-Date: 2016-03-24 01:54+0000\n"
"Last-Translator: Jennifer <cristxu@tw.ibm.com>\n"
"Language: zh-TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@ -50,6 +51,16 @@ msgstr "%(ip)s/%(device)s 已回應為未裝載"
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"在 %(time).2fs 內重新建構了 %(device)d/%(dtotal)d (%(dpercentage).2f%%) 個裝"
"置的 %(reconstructed)d/%(total)d (%(percentage).2f%%) 個分割區(%(rate).2f/"
"秒,剩餘 %(remaining)s"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -86,6 +97,10 @@ msgstr "%s 不存在"
msgid "%s is not mounted"
msgstr "未裝載 %s"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s 已回應為未裝載"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s 在執行中 (%s - %s)"
@ -206,6 +221,14 @@ msgstr "無法存取檔案 %s。"
msgid "Can not load profile data from %s."
msgstr "無法從 %s 中載入設定檔資料。"
#, python-format
msgid "Cannot read %s (%s)"
msgstr "無法讀取 %s (%s)"
#, python-format
msgid "Cannot write %s (%s)"
msgstr "無法寫入 %s (%s)"
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "用戶端未在 %s 秒內從 Proxy 中讀取"
@ -216,11 +239,21 @@ msgstr "用戶端在讀取時中斷連線"
msgid "Client disconnected without sending enough data"
msgstr "用戶端已中斷連線,未傳送足夠的資料"
msgid "Client disconnected without sending last chunk"
msgstr "用戶端已中斷連線,未傳送最後一個片段"
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata %(meta)s"
msgstr "用戶端路徑 %(client)s 不符合物件 meta 資料%(meta)s 中儲存的路徑"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"為定義配置選項 internal_client_conf_path。將使用預設配置請參閱 internal-"
"client.conf-sample 以取得選項"
msgid "Connection refused"
msgstr "連線遭拒"
@ -278,6 +311,10 @@ msgstr "資料下載錯誤:%s"
msgid "Devices pass completed: %.02fs"
msgstr "裝置通過已完成:%.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "目錄 %r 未對映至有效的原則 (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "錯誤:%(db_file)s%(validate_sync_to_err)s"
@ -349,6 +386,10 @@ msgstr "錯誤:磁碟檔 %(data_file)s 關閉失敗:%(exc)s%(stack)s"
msgid "ERROR Exception causing client disconnect"
msgstr "錯誤:異常狀況造成用戶端中斷連線"
#, python-format
msgid "ERROR Exception transferring data to object servers %s"
msgstr "錯誤:將資料轉送至物件伺服器 %s 時發生異常狀況"
msgid "ERROR Failed to get my own IPs?"
msgstr "錯誤:無法取得我自己的 IP"
@ -517,6 +558,10 @@ msgstr "同步分割區時發生錯誤"
msgid "Error syncing with node: %s"
msgstr "與節點同步時發生錯誤:%s"
#, python-format
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
msgstr "嘗試重建 %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤"
msgid "Error: An error occurred"
msgstr "錯誤:發生錯誤"
@ -536,6 +581,9 @@ msgstr "最上層帳戶 Reaper 迴圈發生異常狀況"
msgid "Exception in top-level replication loop"
msgstr "最上層抄寫迴圈中發生異常狀況"
msgid "Exception in top-levelreconstruction loop"
msgstr "最上層重新建構迴圈中發生異常狀況"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "刪除儲存器 %s %s 時發生異常狀況"
@ -572,6 +620,11 @@ msgstr "遵循 %(given_domain)s 到 %(found_domain)s 的 CNAME 鏈"
msgid "Found configs:"
msgstr "找到配置:"
msgid ""
"Handoffs first mode still has handoffs remaining. Aborting current "
"replication pass."
msgstr "「遞交作業最先」模式仍有剩餘的遞交作業。正在中斷現行抄寫傳遞。"
msgid "Host unreachable"
msgstr "無法抵達主機"
@ -591,6 +644,10 @@ msgstr "X-Container-Sync-To 中的主機 %r 無效"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "無效的擱置項目 %(file)s%(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "來自 %(full_path)s 的回應 %(resp)s 無效"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "來自 %(ip)s 的回應 %(resp)s 無效"
@ -606,6 +663,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "正在結束長時間執行的遠端同步:%s"
#, python-format
msgid "Loading JSON from %s failed (%s)"
msgstr "從 %s 載入 JSON 失敗 (%s)"
msgid "Lockup detected.. killing live coros."
msgstr "偵測到鎖定。正在結束即時 coro。"
@ -625,14 +686,26 @@ msgstr "沒有 %r %r 的叢集端點"
msgid "No permission to signal PID %d"
msgstr "沒有信號 PID %d 的許可權"
#, python-format
msgid "No policy with index %s"
msgstr "沒有具有索引 %s 的原則"
#, python-format
msgid "No realm key for %r"
msgstr "沒有 %r 的範圍金鑰"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "裝置上沒有用於 %s 的剩餘空間 (%s)"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "節點錯誤限制 %(ip)s:%(port)s (%(device)s)"
#, python-format
msgid "Not enough object servers ack'ed (got %d)"
msgstr "未確認足夠的物件伺服器(已取得 %d"
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
@ -640,6 +713,10 @@ msgid ""
msgstr ""
"找不到 %(sync_from)r => %(sync_to)r - 物件%(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "%s 秒未重新建構任何內容。"
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "未抄寫任何項目達 %s 秒。"
@ -672,19 +749,27 @@ msgstr ""
#, python-format
msgid ""
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
"%(audit_rate).2f"
msgstr ""
"物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通"
"過,%(quars)d 個已隔離,%(errors)d 個錯誤檔案/秒:%(frate).2f,位元組數/秒:"
"%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:"
"過,%(quars)d 個已隔離,%(errors)d 個錯誤檔案/秒:%(frate).2f,位元組數/"
"秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:"
"%(audit_rate).2f"
#, python-format
msgid "Object audit stats: %s"
msgstr "物件審核統計資料:%s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "物件重新建構完成(一次性)。(%.02f 分鐘)"
#, python-format
msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "物件重新建構完成。(%.02f 分鐘)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "物件抄寫完成(一次性)。(%.02f 分鐘)"
@ -743,6 +828,14 @@ msgstr "X-Container-Sync-To 中需要路徑"
msgid "Problem cleaning up %s"
msgstr "清除 %s 時發生問題"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "清除 %s 時發生問題 (%s)"
#, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "寫入可延續狀態檔 %s 時發生問題 (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "側寫錯誤:%s"
@ -780,6 +873,14 @@ msgstr "正在移除 %s 物件"
msgid "Removing partition: %s"
msgstr "正在移除分割區:%s"
#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "正在移除具有錯誤 PID %(pid)d 的 PID 檔 %(pid_file)s"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "正在移除具有無效 PID 的 PID 檔 %s"
#, python-format
msgid "Removing stale pid file %s"
msgstr "正在移除過時 PID 檔案 %s"
@ -799,6 +900,9 @@ msgstr ""
"正在將 %(meth)s 的 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit休眠上"
"限)%(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr "偵測到環變更。正在中斷現行重新建構傳遞。"
msgid "Ring change detected. Aborting current replication pass."
msgstr "偵測到環變更。正在中斷現行抄寫傳遞。"
@ -806,6 +910,9 @@ msgstr "偵測到環變更。正在中斷現行抄寫傳遞。"
msgid "Running %s once"
msgstr "正在執行 %s 一次"
msgid "Running object reconstructor in script mode."
msgstr "正在 Script 模式下執行物件重新建構器。"
msgid "Running object replicator in script mode."
msgstr "正在 Script 模式下執行物件抄寫器"
@ -847,6 +954,12 @@ msgstr "正在跳過 %s原因是它未裝載"
msgid "Starting %s"
msgstr "正在啟動 %s"
msgid "Starting object reconstruction pass."
msgstr "正在啟動物件重新建構傳遞。"
msgid "Starting object reconstructor in daemon mode."
msgstr "正在常駐程式模式下啟動物件重新建構器。"
msgid "Starting object replication pass."
msgstr "正在啟動物件抄寫傳遞。"
@ -871,10 +984,22 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "對 memcached %(server)s 執行%(action)s作業時逾時"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀況"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "正在嘗試 %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "正在嘗試對 %(full_path)s 執行 GET 動作"
#, python-format
msgid "Trying to get %s status of PUT to %s"
msgstr "正在嘗試使 PUT 的 %s 狀態為 %s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "正在嘗試使 PUT 的最終狀態為 %s"
@ -888,6 +1013,10 @@ msgstr "正在嘗試於 GET 期間讀取(正在重試)"
msgid "Trying to send to client"
msgstr "正在嘗試傳送至用戶端"
#, python-format
msgid "Trying to sync suffixes with %s"
msgstr "正在嘗試與 %s 同步字尾"
#, python-format
msgid "Trying to write to %s"
msgstr "正在嘗試寫入至 %s"
@ -899,10 +1028,22 @@ msgstr "未捕捉的異常狀況"
msgid "Unable to find %s config section in %s"
msgstr "找不到 %s 配置區段(在 %s 中)"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr "無法從配置載入內部用戶端:%r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "在 libc 中找不到 %s。保留為 no-op。"
#, python-format
msgid "Unable to locate config for %s"
msgstr "找不到 %s 的配置"
#, python-format
msgid "Unable to locate config number %s for %s"
msgstr "找不到配置號碼 %s針對 %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr "在 libc 中找不到 fallocate、posix_fallocate。保留為 no-op。"
@ -926,6 +1067,11 @@ msgstr "非預期的回應:%s"
msgid "Unhandled exception"
msgstr "無法處理的異常狀況"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"嘗試執行 GET 動作時發生不明異常狀況:%(account)r %(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "%(container)s %(dbfile)s 的更新報告失敗"
@ -953,6 +1099,10 @@ msgstr "警告:無法修改記憶體限制。以非 root 使用者身分執行
msgid "Waited %s seconds for %s to die; giving up"
msgstr "已等待 %s 秒以讓 %s 當掉;正在放棄"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "已等待 %s 秒以讓 %s 當掉"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "警告:無法在沒有 memcached 用戶端的情況下限制速率"

View File

@ -18,18 +18,23 @@ import os
import sys
import time
import signal
import re
from random import shuffle
from swift import gettext_ as _
from contextlib import closing
from eventlet import Timeout
from swift.obj import diskfile
from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \
list_from_csv, listdir
from swift.obj import diskfile, replicator
from swift.common.utils import (
get_logger, ratelimit_sleep, dump_recon_cache, list_from_csv, listdir,
unlink_paths_older_than, readconf, config_auto_int_value)
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
from swift.common.daemon import Daemon
from swift.common.storage_policy import POLICIES
# This matches rsync tempfiles, like ".<timestamp>.data.Xy095a"
RE_RSYNC_TEMPFILE = re.compile(r'^\..*\.([a-zA-Z0-9_]){6}$')
class AuditorWorker(object):
"""Walk through file system to audit objects"""
@ -42,6 +47,27 @@ class AuditorWorker(object):
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
try:
# ideally unless ops overrides the rsync_tempfile_timeout in the
# auditor section we can base our behavior on whatever they
# configure for their replicator
replicator_config = readconf(self.conf['__file__'],
'object-replicator')
except (KeyError, SystemExit):
# if we can't parse the real config (generally a KeyError on
# __file__, or SystemExit on no object-replicator section) we use
# a very conservative default
default = 86400
else:
replicator_rsync_timeout = int(replicator_config.get(
'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT))
# Here we can do some light math for ops and use the *replicator's*
# rsync_timeout (plus 15 mins to avoid deleting local tempfiles
# before the remote replicator kills it's rsync)
default = replicator_rsync_timeout + 900
self.rsync_tempfile_timeout = config_auto_int_value(
self.conf.get('rsync_tempfile_timeout'), default)
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
@ -95,7 +121,9 @@ class AuditorWorker(object):
# can find all diskfile locations regardless of policy -- so for now
# just use Policy-0's manager.
all_locs = (self.diskfile_router[POLICIES[0]]
.object_audit_location_generator(device_dirs=device_dirs))
.object_audit_location_generator(
device_dirs=device_dirs,
auditor_type=self.auditor_type))
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
@ -156,6 +184,9 @@ class AuditorWorker(object):
self.logger.info(
_('Object audit stats: %s') % json.dumps(self.stats_buckets))
# Unset remaining partitions to not skip them in the next run
diskfile.clear_auditor_status(self.devices, self.auditor_type)
def record_stats(self, obj_size):
"""
Based on config's object_size_stats will keep track of how many objects
@ -195,34 +226,46 @@ class AuditorWorker(object):
raise DiskFileQuarantined(msg)
diskfile_mgr = self.diskfile_router[location.policy]
# this method doesn't normally raise errors, even if the audit
# location does not exist; if this raises an unexpected error it
# will get logged in failsafe
df = diskfile_mgr.get_diskfile_from_audit_location(location)
reader = None
try:
df = diskfile_mgr.get_diskfile_from_audit_location(location)
with df.open():
metadata = df.get_metadata()
obj_size = int(metadata['Content-Length'])
if self.stats_sizes:
self.record_stats(obj_size)
if self.zero_byte_only_at_fps and obj_size:
self.passes += 1
return
reader = df.reader(_quarantine_hook=raise_dfq)
with closing(reader):
for chunk in reader:
chunk_len = len(chunk)
self.bytes_running_time = ratelimit_sleep(
self.bytes_running_time,
self.max_bytes_per_second,
incr_by=chunk_len)
self.bytes_processed += chunk_len
self.total_bytes_processed += chunk_len
if obj_size and not self.zero_byte_only_at_fps:
reader = df.reader(_quarantine_hook=raise_dfq)
if reader:
with closing(reader):
for chunk in reader:
chunk_len = len(chunk)
self.bytes_running_time = ratelimit_sleep(
self.bytes_running_time,
self.max_bytes_per_second,
incr_by=chunk_len)
self.bytes_processed += chunk_len
self.total_bytes_processed += chunk_len
except DiskFileNotExist:
return
pass
except DiskFileQuarantined as err:
self.quarantines += 1
self.logger.error(_('ERROR Object %(obj)s failed audit and was'
' quarantined: %(err)s'),
{'obj': location, 'err': err})
self.passes += 1
# _ondisk_info attr is initialized to None and filled in by open
ondisk_info_dict = df._ondisk_info or {}
if 'unexpected' in ondisk_info_dict:
is_rsync_tempfile = lambda fpath: RE_RSYNC_TEMPFILE.match(
os.path.basename(fpath))
rsync_tempfile_paths = filter(is_rsync_tempfile,
ondisk_info_dict['unexpected'])
mtime = time.time() - self.rsync_tempfile_timeout
unlink_paths_older_than(rsync_tempfile_paths, mtime)
class ObjectAuditor(Daemon):
@ -279,6 +322,7 @@ class ObjectAuditor(Daemon):
"""Parallel audit loop"""
self.clear_recon_cache('ALL')
self.clear_recon_cache('ZBF')
once = kwargs.get('mode') == 'once'
kwargs['device_dirs'] = override_devices
if parent:
kwargs['zero_byte_fps'] = zbo_fps
@ -305,13 +349,18 @@ class ObjectAuditor(Daemon):
if len(pids) == parallel_proc:
pid = os.wait()[0]
pids.remove(pid)
# ZBF scanner must be restarted as soon as it finishes
if self.conf_zero_byte_fps and pid == zbf_pid:
if self.conf_zero_byte_fps and pid == zbf_pid and once:
# If we're only running one pass and the ZBF scanner
# finished, don't bother restarting it.
zbf_pid = -100
elif self.conf_zero_byte_fps and pid == zbf_pid:
# When we're running forever, the ZBF scanner must
# be restarted as soon as it finishes.
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True,
**kwargs)
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.append(zbf_pid)
else:
kwargs['device_dirs'] = [device_list.pop()]
@ -319,8 +368,9 @@ class ObjectAuditor(Daemon):
while pids:
pid = os.wait()[0]
# ZBF scanner must be restarted as soon as it finishes
# unless we're in run-once mode
if self.conf_zero_byte_fps and pid == zbf_pid and \
len(pids) > 1:
len(pids) > 1 and not once:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()

View File

@ -33,6 +33,7 @@ are also not considered part of the backend API.
import six.moves.cPickle as pickle
import errno
import fcntl
import json
import os
import time
import uuid
@ -72,6 +73,7 @@ from functools import partial
PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
HASH_FILE = 'hashes.pkl'
HASH_INVALIDATIONS_FILE = 'hashes.invalid'
METADATA_KEY = 'user.swift.metadata'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
@ -221,6 +223,73 @@ def quarantine_renamer(device_path, corrupted_file_path):
return to_dir
def consolidate_hashes(partition_dir):
"""
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param suffix_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: the hashes, or None if there's no hashes.pkl.
"""
hashes_file = join(partition_dir, HASH_FILE)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
if not os.path.exists(hashes_file):
if os.path.exists(invalidations_file):
# no hashes at all -> everything's invalid, so empty the file with
# the invalid suffixes in it, if it exists
try:
with open(invalidations_file, 'wb'):
pass
except OSError as e:
if e.errno != errno.ENOENT:
raise
return None
with lock_path(partition_dir):
try:
with open(hashes_file, 'rb') as hashes_fp:
pickled_hashes = hashes_fp.read()
except (IOError, OSError):
hashes = {}
else:
try:
hashes = pickle.loads(pickled_hashes)
except Exception:
# pickle.loads() can raise a wide variety of exceptions when
# given invalid input depending on the way in which the
# input is invalid.
hashes = None
modified = False
try:
with open(invalidations_file, 'rb') as inv_fh:
for line in inv_fh:
suffix = line.strip()
if hashes is not None and hashes.get(suffix) is not None:
hashes[suffix] = None
modified = True
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
if modified:
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
# Now that all the invalidations are reflected in hashes.pkl, it's
# safe to clear out the invalidations file.
try:
with open(invalidations_file, 'w') as inv_fh:
pass
except OSError as e:
if e.errno != errno.ENOENT:
raise
return hashes
def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
@ -234,16 +303,11 @@ def invalidate_hash(suffix_dir):
hashes_file = join(partition_dir, HASH_FILE)
if not os.path.exists(hashes_file):
return
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
try:
with open(hashes_file, 'rb') as fp:
hashes = pickle.load(fp)
if suffix in hashes and not hashes[suffix]:
return
except Exception:
return
hashes[suffix] = None
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
with open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + "\n")
class AuditLocation(object):
@ -263,7 +327,7 @@ class AuditLocation(object):
def object_audit_location_generator(devices, mount_check=True, logger=None,
device_dirs=None):
device_dirs=None, auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory if device_dirs isn't set. If
@ -277,7 +341,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:device_dirs: a list of directories under devices to traverse
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF
"""
if not device_dirs:
device_dirs = listdir(devices)
@ -296,7 +361,15 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
_('Skipping %s as it is not mounted'), device)
continue
# loop through object dirs for all policies
for dir_ in os.listdir(os.path.join(devices, device)):
device_dir = os.path.join(devices, device)
try:
dirs = os.listdir(device_dir)
except OSError as e:
if logger:
logger.debug(
_('Skipping %s: %s') % (device_dir, e.strerror))
continue
for dir_ in dirs:
if not dir_.startswith(DATADIR_BASE):
continue
try:
@ -307,8 +380,12 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
'to a valid policy (%s)') % (dir_, e))
continue
datadir_path = os.path.join(devices, device, dir_)
partitions = listdir(datadir_path)
for partition in partitions:
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
@ -329,6 +406,51 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type)
def get_auditor_status(datadir_path, logger, auditor_type):
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
status = {}
try:
with open(auditor_status) as statusfile:
status = statusfile.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT and logger:
logger.warning(_('Cannot read %s (%s)') % (auditor_status, e))
return listdir(datadir_path)
try:
status = json.loads(status)
except ValueError as e:
logger.warning(_('Loading JSON from %s failed (%s)') % (
auditor_status, e))
return listdir(datadir_path)
return status['partitions']
def update_auditor_status(datadir_path, logger, partitions, auditor_type):
status = json.dumps({'partitions': partitions})
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
try:
with open(auditor_status, "wb") as statusfile:
statusfile.write(status)
except (OSError, IOError) as e:
if logger:
logger.warning(_('Cannot write %s (%s)') % (auditor_status, e))
def clear_auditor_status(devices, auditor_type="ALL"):
for device in os.listdir(devices):
for dir_ in os.listdir(os.path.join(devices, device)):
if not dir_.startswith("objects"):
continue
datadir_path = os.path.join(devices, device, dir_)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
def strip_self(f):
"""
@ -395,6 +517,7 @@ class BaseDiskFileManager(object):
diskfile_cls = None # must be set by subclasses
invalidate_hash = strip_self(invalidate_hash)
consolidate_hashes = strip_self(consolidate_hashes)
quarantine_renamer = strip_self(quarantine_renamer)
def __init__(self, conf, logger):
@ -626,7 +749,10 @@ class BaseDiskFileManager(object):
# dicts for the files having that extension. The file_info dicts are of
# the form returned by parse_on_disk_filename, with the filename added.
# Each list is sorted in reverse timestamp order.
#
# the results dict is used to collect results of file filtering
results = {}
# The exts dict will be modified during subsequent processing as files
# are removed to be discarded or ignored.
exts = defaultdict(list)
@ -637,16 +763,15 @@ class BaseDiskFileManager(object):
file_info['filename'] = afile
exts[file_info['ext']].append(file_info)
except DiskFileError as e:
self.logger.warning('Unexpected file %s: %s' %
(os.path.join(datadir or '', afile), e))
file_path = os.path.join(datadir or '', afile)
self.logger.warning('Unexpected file %s: %s',
file_path, e)
results.setdefault('unexpected', []).append(file_path)
for ext in exts:
# For each extension sort files into reverse chronological order.
exts[ext] = sorted(
exts[ext], key=lambda info: info['timestamp'], reverse=True)
# the results dict is used to collect results of file filtering
results = {}
if exts.get('.ts'):
# non-tombstones older than or equal to latest tombstone are
# obsolete
@ -751,20 +876,6 @@ class BaseDiskFileManager(object):
results['files'] = files
return results
def hash_cleanup_listdir(self, hsh_path, reclaim_age=ONE_WEEK):
"""
List contents of a hash directory and clean up any old files.
For EC policy, delete files older than a .durable or .ts file.
:param hsh_path: object hash path
:param reclaim_age: age in seconds at which to remove tombstones
:returns: list of files remaining in the directory, reverse sorted
"""
# maintain compatibility with 'legacy' hash_cleanup_listdir
# return value
return self.cleanup_ondisk_files(
hsh_path, reclaim_age=reclaim_age)['files']
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
@ -889,12 +1000,22 @@ class BaseDiskFileManager(object):
recalculate = []
try:
with open(hashes_file, 'rb') as fp:
hashes = pickle.load(fp)
mtime = getmtime(hashes_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
try:
hashes = self.consolidate_hashes(partition_path)
except Exception:
do_listdir = True
force_rewrite = True
else:
if hashes is None: # no hashes.pkl file; let's build it
do_listdir = True
force_rewrite = True
hashes = {}
if do_listdir:
for suff in os.listdir(partition_path):
if len(suff) == 3:
@ -1021,14 +1142,17 @@ class BaseDiskFileManager(object):
policy=policy, use_splice=self.use_splice,
pipe_size=self.pipe_size, **kwargs)
def object_audit_location_generator(self, device_dirs=None):
def object_audit_location_generator(self, device_dirs=None,
auditor_type="ALL"):
"""
Yield an AuditLocation for all objects stored under device_dirs.
:param device_dirs: directory of target device
:param auditor_type: either ALL or ZBF
"""
return object_audit_location_generator(self.devices, self.mount_check,
self.logger, device_dirs)
self.logger, device_dirs,
auditor_type)
def get_diskfile_from_audit_location(self, audit_location):
"""
@ -1065,8 +1189,8 @@ class BaseDiskFileManager(object):
dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
object_hash)
try:
filenames = self.hash_cleanup_listdir(object_path,
self.reclaim_age)
filenames = self.cleanup_ondisk_files(object_path,
self.reclaim_age)['files']
except OSError as err:
if err.errno == errno.ENOTDIR:
quar_path = self.quarantine_renamer(dev_path, object_path)
@ -1322,7 +1446,7 @@ class BaseDiskFileWriter(object):
self._put_succeeded = True
if cleanup:
try:
self.manager.hash_cleanup_listdir(self._datadir)
self.manager.cleanup_ondisk_files(self._datadir)['files']
except OSError:
logging.exception(_('Problem cleaning up %s'), self._datadir)
@ -2411,7 +2535,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
exc = DiskFileNoSpace(str(err))
else:
try:
self.manager.hash_cleanup_listdir(self._datadir)
self.manager.cleanup_ondisk_files(self._datadir)['files']
except OSError as os_err:
self.manager.logger.exception(
_('Problem cleaning up %s (%s)') %

View File

@ -43,17 +43,37 @@ class InMemoryFileSystem(object):
self._filesystem = {}
def get_object(self, name):
"""
Return back an file-like object and its metadata
:param name: standard object name
:return (fp, metadata): fp is `StringIO` in-memory representation
object (or None). metadata is a dictionary
of metadata (or None)
"""
val = self._filesystem.get(name)
if val is None:
data, metadata = None, None
fp, metadata = None, None
else:
data, metadata = val
return data, metadata
fp, metadata = val
return fp, metadata
def put_object(self, name, data, metadata):
self._filesystem[name] = (data, metadata)
def put_object(self, name, fp, metadata):
"""
Store object into memory
:param name: standard object name
:param fp: `StringIO` in-memory representation object
:param metadata: dictionary of metadata to be written
"""
self._filesystem[name] = (fp, metadata)
def del_object(self, name):
"""
Delete object from memory
:param name: standard object name
"""
del self._filesystem[name]
def get_diskfile(self, account, container, obj, **kwargs):

View File

@ -32,7 +32,7 @@ from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub,
tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
from swift.common.swob import HeaderKeyDict
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.ring.utils import is_local_device
@ -68,7 +68,7 @@ def _get_partners(frag_index, part_nodes):
class RebuildingECDiskFileStream(object):
"""
This class wraps the the reconstructed fragment archive data and
This class wraps the reconstructed fragment archive data and
metadata in the DiskFile interface for ssync.
"""

View File

@ -41,6 +41,7 @@ from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
hubs.use_hub(get_hub())
@ -76,7 +77,8 @@ class ObjectReplicator(Daemon):
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout', 900))
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(

View File

@ -44,14 +44,15 @@ from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HeaderKeyDict, \
HTTPConflict, HTTPServerError
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
@ -250,7 +251,8 @@ class ObjectController(BaseStorageServer):
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
timestamp = headers_out['x-timestamp']
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
@ -566,6 +568,7 @@ class ObjectController(BaseStorageServer):
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request,
HeaderKeyDict({
@ -1110,7 +1113,7 @@ def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent REPLICATION_REQUESTS across all
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.

View File

@ -66,7 +66,6 @@ def encode_wanted(remote, local):
The decoder for this line is
:py:func:`~swift.obj.ssync_sender.decode_wanted`
"""
want = {}
if 'ts_data' in local:
# we have something, let's get just the right stuff
@ -248,7 +247,7 @@ class Receiver(object):
raise swob.HTTPInsufficientStorage(drive=self.device)
self.fp = self.request.environ['wsgi.input']
def _check_local(self, object_hash):
def _check_local(self, remote, make_durable=True):
"""
Parse local diskfile and return results of current
representative for comparison to remote.
@ -257,21 +256,42 @@ class Receiver(object):
"""
try:
df = self.diskfile_mgr.get_diskfile_from_hash(
self.device, self.partition, object_hash,
self.device, self.partition, remote['object_hash'],
self.policy, frag_index=self.frag_index)
except exceptions.DiskFileNotExist:
return {}
try:
df.open()
except exceptions.DiskFileDeleted as err:
return {'ts_data': err.timestamp}
except exceptions.DiskFileError as err:
return {}
return {
'ts_data': df.data_timestamp,
'ts_meta': df.timestamp,
'ts_ctype': df.content_type_timestamp,
}
result = {'ts_data': err.timestamp}
except exceptions.DiskFileError:
result = {}
else:
result = {
'ts_data': df.data_timestamp,
'ts_meta': df.timestamp,
'ts_ctype': df.content_type_timestamp,
}
if (make_durable and df.fragments and
remote['ts_data'] in df.fragments and
self.frag_index in df.fragments[remote['ts_data']] and
(df.durable_timestamp is None or
df.durable_timestamp < remote['ts_data'])):
# We have the frag, just missing a .durable, so try to create the
# .durable now. Try this just once to avoid looping if it fails.
try:
with df.create() as writer:
writer.commit(remote['ts_data'])
return self._check_local(remote, make_durable=False)
except Exception:
# if commit fails then log exception and fall back to wanting
# a full update
self.app.logger.exception(
'%s/%s/%s EXCEPTION in replication.Receiver while '
'attempting commit of %s'
% (self.request.remote_addr, self.device, self.partition,
df._datadir))
return result
def _check_missing(self, line):
"""
@ -282,7 +302,7 @@ class Receiver(object):
Anchor point for tests to mock legacy protocol changes.
"""
remote = decode_missing(line)
local = self._check_local(remote['object_hash'])
local = self._check_local(remote)
return encode_wanted(remote, local)
def missing_check(self):

View File

@ -60,10 +60,12 @@ class AccountController(Controller):
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = self.app.account_ring.replica_count \
if self.app.concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
resp = self.GETorHEAD_base(
req, _('Account'), node_iter, partition,
req.swift_entity_path.rstrip('/'))
req.swift_entity_path.rstrip('/'), concurrency)
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE

View File

@ -47,11 +47,12 @@ from swift.common.utils import Timestamp, config_true_value, \
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
ConnectionTimeout, RangeAlreadyComplete
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE
from swift.common.swob import Request, Response, HeaderKeyDict, Range, \
from swift.common.swob import Request, Response, Range, \
HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \
status_map
from swift.common.request_helpers import strip_sys_meta_prefix, \
@ -629,7 +630,8 @@ def bytes_to_skip(record_size, range_start):
class ResumingGetter(object):
def __init__(self, app, req, server_type, node_iter, partition, path,
backend_headers, client_chunk_size=None, newest=None):
backend_headers, concurrency=1, client_chunk_size=None,
newest=None):
self.app = app
self.node_iter = node_iter
self.server_type = server_type
@ -640,6 +642,7 @@ class ResumingGetter(object):
self.skip_bytes = 0
self.used_nodes = []
self.used_source_etag = ''
self.concurrency = concurrency
# stuff from request
self.req_method = req.method
@ -655,6 +658,7 @@ class ResumingGetter(object):
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
# populated from response headers
self.start_byte = self.end_byte = self.length = None
@ -977,93 +981,111 @@ class ResumingGetter(object):
else:
return None
def _make_node_request(self, node, node_timeout, logger_thread_locals):
self.app.logger.thread_locals = logger_thread_locals
if node in self.used_nodes:
return False
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'],
self.partition, self.req_method, self.path,
headers=self.backend_headers,
query_string=self.req_query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': self.req_method, 'path': self.req_path})
return False
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
close_swift_conn(possible_source)
else:
if self.used_source_etag:
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
if self.used_source_etag != src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"'):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
return False
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(None)
self.source_headers.append(possible_source.getheaders())
self.sources.append((possible_source, node))
if not self.newest: # one good source is enough
return True
else:
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(possible_source.read())
self.source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.app.error_occurred(
node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': self.bodies[-1][:1024],
'type': self.server_type})
return False
def _get_source_and_node(self):
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
sources = []
self.sources = []
nodes = GreenthreadSafeIterator(self.node_iter)
node_timeout = self.app.node_timeout
if self.server_type == 'Object' and not self.newest:
node_timeout = self.app.recoverable_node_timeout
for node in self.node_iter:
if node in self.used_nodes:
continue
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'],
self.partition, self.req_method, self.path,
headers=self.backend_headers,
query_string=self.req_query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': self.req_method, 'path': self.req_path})
continue
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
close_swift_conn(possible_source)
else:
if self.used_source_etag:
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
pile = GreenAsyncPile(self.concurrency)
if self.used_source_etag != src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"'):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
continue
for node in nodes:
pile.spawn(self._make_node_request, node, node_timeout,
self.app.logger.thread_locals)
_timeout = self.app.concurrency_timeout \
if pile.inflight < self.concurrency else None
if pile.waitfirst(_timeout):
break
else:
# ran out of nodes, see if any stragglers will finish
any(pile)
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(None)
self.source_headers.append(possible_source.getheaders())
sources.append((possible_source, node))
if not self.newest: # one good source is enough
break
else:
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(possible_source.read())
self.source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.app.error_occurred(
node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': self.bodies[-1][:1024],
'type': self.server_type})
if sources:
sources.sort(key=lambda s: source_key(s[0]))
source, node = sources.pop()
for src, _junk in sources:
if self.sources:
self.sources.sort(key=lambda s: source_key(s[0]))
source, node = self.sources.pop()
for src, _junk in self.sources:
close_swift_conn(src)
self.used_nodes.append(node)
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
source.getheaders())
# Save off the source etag so that, if we lose the connection
# and have to resume from a different node, we can be sure that
@ -1619,7 +1641,7 @@ class Controller(object):
self.app.logger.warning('Could not autocreate account %r' % path)
def GETorHEAD_base(self, req, server_type, node_iter, partition, path,
client_chunk_size=None):
concurrency=1, client_chunk_size=None):
"""
Base handler for HTTP GET or HEAD requests.
@ -1628,6 +1650,7 @@ class Controller(object):
:param node_iter: an iterator to obtain nodes from
:param partition: partition
:param path: path for the request
:param concurrency: number of requests to run concurrently
:param client_chunk_size: chunk size for response body iterator
:returns: swob.Response object
"""
@ -1636,6 +1659,7 @@ class Controller(object):
handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter,
partition, path, backend_headers,
concurrency,
client_chunk_size=client_chunk_size)
res = handler.get_working_response(req)

View File

@ -93,10 +93,12 @@ class ContainerController(Controller):
return HTTPNotFound(request=req)
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
concurrency = self.app.container_ring.replica_count \
if self.app.concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.container_ring, part)
resp = self.GETorHEAD_base(
req, _('Container'), node_iter, part,
req.swift_entity_path)
req.swift_entity_path, concurrency)
if 'swift.authorize' in req.environ:
req.acl = resp.headers.get('x-container-read')
aresp = req.environ['swift.authorize'](req)

View File

@ -56,19 +56,20 @@ from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \
InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \
PutterConnectError, ChunkReadError
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import (
is_informational, is_success, is_client_error, is_server_error,
HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES,
is_redirection, HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES,
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE,
HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT,
HTTP_UNPROCESSABLE_ENTITY)
HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE)
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
ECDriverError, PolicyError)
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, ResumingGetter
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, HeaderKeyDict, \
HTTPServerError, HTTPServiceUnavailable, Request, \
HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \
HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError
from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \
@ -867,9 +868,11 @@ class BaseObjectController(Controller):
class ReplicatedObjectController(BaseObjectController):
def _get_or_head_response(self, req, node_iter, partition, policy):
concurrency = self.app.get_object_ring(policy.idx).replica_count \
if self.app.concurrent_gets else 1
resp = self.GETorHEAD_base(
req, _('Object'), node_iter, partition,
req.swift_entity_path)
req.swift_entity_path, concurrency)
return resp
def _connect_put_node(self, nodes, part, path, headers,
@ -1758,7 +1761,7 @@ class ECPutter(object):
@classmethod
def connect(cls, node, part, path, headers, conn_timeout, node_timeout,
chunked=False):
chunked=False, expected_frag_archive_size=None):
"""
Connect to a backend node and send the headers.
@ -1780,9 +1783,10 @@ class ECPutter(object):
# we must use chunked encoding.
headers['Transfer-Encoding'] = 'chunked'
headers['Expect'] = '100-continue'
if 'Content-Length' in headers:
headers['X-Backend-Obj-Content-Length'] = \
headers.pop('Content-Length')
# make sure this isn't there
headers.pop('Content-Length')
headers['X-Backend-Obj-Content-Length'] = expected_frag_archive_size
headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary
@ -1988,9 +1992,10 @@ class ECObjectController(BaseObjectController):
# no fancy EC decoding here, just one plain old HEAD request to
# one object server because all fragments hold all metadata
# information about the object.
concurrency = policy.ec_ndata if self.app.concurrent_gets else 1
resp = self.GETorHEAD_base(
req, _('Object'), node_iter, partition,
req.swift_entity_path)
req.swift_entity_path, concurrency)
else: # GET request
orig_range = None
range_specs = []
@ -1999,6 +2004,12 @@ class ECObjectController(BaseObjectController):
range_specs = self._convert_range(req, policy)
safe_iter = GreenthreadSafeIterator(node_iter)
# Sending the request concurrently to all nodes, and responding
# with the first response isn't something useful for EC as all
# nodes contain different fragments. Also EC has implemented it's
# own specific implementation of concurrent gets to ec_ndata nodes.
# So we don't need to worry about plumbing and sending a
# concurrency value to ResumingGetter.
with ContextPool(policy.ec_ndata) as pool:
pile = GreenAsyncPile(pool)
for _junk in range(policy.ec_ndata):
@ -2053,8 +2064,12 @@ class ECObjectController(BaseObjectController):
headers=resp_headers,
conditional_response=True,
app_iter=app_iter)
resp.accept_ranges = 'bytes'
app_iter.kickoff(req, resp)
try:
app_iter.kickoff(req, resp)
except HTTPException as err_resp:
# catch any HTTPException response here so that we can
# process response headers uniformly in _fix_response
resp = err_resp
else:
statuses = []
reasons = []
@ -2074,10 +2089,12 @@ class ECObjectController(BaseObjectController):
def _fix_response(self, resp):
# EC fragment archives each have different bytes, hence different
# etags. However, they all have the original object's etag stored in
# sysmeta, so we copy that here so the client gets it.
# sysmeta, so we copy that here (if it exists) so the client gets it.
resp.headers['Etag'] = resp.headers.get('X-Object-Sysmeta-Ec-Etag')
if (is_success(resp.status_int) or is_redirection(resp.status_int) or
resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE):
resp.accept_ranges = 'bytes'
if is_success(resp.status_int):
resp.headers['Etag'] = resp.headers.get(
'X-Object-Sysmeta-Ec-Etag')
resp.headers['Content-Length'] = resp.headers.get(
'X-Object-Sysmeta-Ec-Content-Length')
resp.fix_conditional_response()
@ -2094,16 +2111,41 @@ class ECObjectController(BaseObjectController):
# the object server will get different bytes, so these
# values do not apply (Content-Length might, in general, but
# in the specific case of replication vs. EC, it doesn't).
headers.pop('Content-Length', None)
client_cl = headers.pop('Content-Length', None)
headers.pop('Etag', None)
expected_frag_size = None
if client_cl:
policy_index = int(headers.get('X-Backend-Storage-Policy-Index'))
policy = POLICIES.get_by_index(policy_index)
# TODO: PyECLib <= 1.2.0 looks to return the segment info
# different from the input for aligned data efficiency but
# Swift never does. So calculate the fragment length Swift
# will actually send to object sever by making two different
# get_segment_info calls (until PyECLib fixed).
# policy.fragment_size makes the call using segment size,
# and the next call is to get info for the last segment
# get number of fragments except the tail - use truncation //
num_fragments = int(client_cl) // policy.ec_segment_size
expected_frag_size = policy.fragment_size * num_fragments
# calculate the tail fragment_size by hand and add it to
# expected_frag_size
last_segment_size = int(client_cl) % policy.ec_segment_size
if last_segment_size:
last_info = policy.pyeclib_driver.get_segment_info(
last_segment_size, policy.ec_segment_size)
expected_frag_size += last_info['fragment_size']
self.app.logger.thread_locals = logger_thread_locals
for node in node_iter:
try:
putter = ECPutter.connect(
node, part, path, headers,
conn_timeout=self.app.conn_timeout,
node_timeout=self.app.node_timeout)
node_timeout=self.app.node_timeout,
expected_frag_archive_size=expected_frag_size)
self.app.set_node_timing(node, putter.connect_duration)
return putter
except InsufficientStorage:

View File

@ -147,6 +147,10 @@ class Application(object):
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
self.concurrent_gets = \
config_true_value(conf.get('concurrent_gets'))
self.concurrency_timeout = float(conf.get('concurrency_timeout',
self.conn_timeout))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
rnc_value = int(value[0])

View File

@ -577,10 +577,13 @@ def setup_package():
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if config:
in_process = False
else:
if not config:
in_process = True
# else... leave in_process value unchanged. It may be that
# setup_package is called twice, in which case in_process_setup may
# have loaded config before we reach here a second time, so the
# existence of config is not reliable to determine that in_process
# should be False. Anyway, it's default value is False.
else:
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
@ -775,10 +778,12 @@ def teardown_package():
# clean up containers and objects left behind after running tests
global config
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
if config:
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
global in_process
global _test_socks

View File

@ -460,6 +460,7 @@ class Account(Base):
def delete_containers(self):
for c in listing_items(self.containers):
cont = self.container(c)
cont.update_metadata(hdrs={'x-versions-location': ''})
if not cont.delete_recursive():
return False

View File

@ -91,40 +91,55 @@ TEST_CASE_FORMAT = (
# A scenario of put for account, container and object with
# several roles.
RBAC_PUT = [
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 201),
# PUT container in other users account: not allowed for role admin
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# PUT container in other users account: not allowed for role _member_
('PUT', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# PUT container in other users account: allowed for role ResellerAdmin
('PUT', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 201),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 201),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 201),
# PUT object in other users account: not allowed for role admin
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# PUT object in other users account: not allowed for role _member_
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# PUT object in other users account: allowed for role ResellerAdmin
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 201),
('PUT', None, None, 'UUID', 'UUID', None,
@ -135,8 +150,11 @@ RBAC_PUT = [
RBAC_PUT_WITH_SERVICE_PREFIX = [
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 201),
# PUT container in other users account: not allowed for role service
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', None, None,
@ -147,8 +165,12 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 201),
# PUT object in other users account: not allowed for role service
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', 'UUID', None,
@ -159,8 +181,14 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 201),
# PUT container fails if wrong user, or only one token sent
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', None, None,
@ -169,8 +197,12 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 201),
# PUT object fails if wrong user, or only one token sent
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', 'UUID', None,
@ -185,40 +217,55 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [
# A scenario of delete for account, container and object with
# several roles.
RBAC_DELETE = [
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# DELETE container in other users account: not allowed for role admin
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# DELETE container in other users account: not allowed for role _member_
('DELETE', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# DELETE container in other users account: allowed for role ResellerAdmin
('DELETE', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 204),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 204),
# DELETE object in other users account: not allowed for role admin
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# DELETE object in other users account: not allowed for role _member_
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# DELETE object in other users account: allowed for role ResellerAdmin
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 204),
('DELETE', None, None, 'UUID', 'UUID', None,
@ -229,8 +276,11 @@ RBAC_DELETE = [
RBAC_DELETE_WITH_SERVICE_PREFIX = [
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# DELETE container in other users account: not allowed for role service
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', None, None,
@ -241,8 +291,12 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 204),
# DELETE object in other users account: not allowed for role service
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', 'UUID', None,
@ -253,8 +307,14 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# DELETE container fails if wrong user, or only one token sent
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', None, None,
@ -263,8 +323,12 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# DELETE object fails if wrong user, or only one token sent
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', 'UUID', None,
@ -279,60 +343,83 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [
# A scenario of get for account, container and object with
# several roles.
RBAC_GET = [
# GET own account: ok
('GET', None, None, None, None, None,
None, 'tester', 'tester', None, 200),
('GET', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 200),
# GET other users account: not allowed for role admin
('GET', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# GET other users account: not allowed for role _member_
('GET', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# GET other users account: allowed for role ResellerAdmin
('GET', None, None, None, None, None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, None, None, None,
None, 'tester2', 'tester6', None, 200),
('GET', None, None, None, None, None,
None, 'tester4', 'tester6', None, 200),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 200),
# GET container in other users account: not allowed for role admin
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# GET container in other users account: not allowed for role _member_
('GET', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# GET container in other users account: allowed for role ResellerAdmin
('GET', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 200),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 200),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 200),
# GET object in other users account: not allowed for role admin
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# GET object in other users account: not allowed for role _member_
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# GET object in other users account: allowed for role ResellerAdmin
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, 'UUID', 'UUID', None,
@ -343,8 +430,11 @@ RBAC_GET = [
RBAC_GET_WITH_SERVICE_PREFIX = [
# GET own account: ok
('GET', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 200),
# GET other account: not allowed for role service
('GET', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, None, None, None,
@ -355,8 +445,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 200),
# GET container in other users account: not allowed for role service
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', None, None,
@ -367,8 +461,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 200),
# GET object fails if wrong user, or only one token sent
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', 'UUID', None,
@ -379,8 +477,14 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# GET own account: ok
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET other account: not allowed for role service
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, None, None, None,
@ -389,8 +493,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('GET', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET container fails if wrong user, or only one token sent
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', None, None,
@ -399,8 +507,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET object fails if wrong user, or only one token sent
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', 'UUID', None,
@ -415,60 +527,84 @@ RBAC_GET_WITH_SERVICE_PREFIX = [
# A scenario of head for account, container and object with
# several roles.
RBAC_HEAD = [
# HEAD own account: ok
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 204),
# HEAD other users account: not allowed for role admin
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# HEAD other users account: not allowed for role _member_
('HEAD', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# HEAD other users account: allowed for role ResellerAdmin
('HEAD', None, None, None, None, None,
None, 'tester6', 'tester6', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester6', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester6', None, 204),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# HEAD container in other users account: not allowed for role admin
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# HEAD container in other users account: not allowed for role _member_
('HEAD', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# HEAD container in other users account: allowed for role ResellerAdmin
('HEAD', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 200),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 200),
# HEAD object in other users account: not allowed for role admin
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# HEAD object in other users account: not allowed for role _member_
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# HEAD object in other users account: allowed for role ResellerAdmin
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 200),
('HEAD', None, None, 'UUID', 'UUID', None,
@ -479,8 +615,11 @@ RBAC_HEAD = [
RBAC_HEAD_WITH_SERVICE_PREFIX = [
# HEAD own account: ok
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 204),
# HEAD other account: not allowed for role service
('HEAD', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, None, None, None,
@ -491,8 +630,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# HEAD container in other users account: not allowed for role service
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', None, None,
@ -503,8 +646,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 200),
# HEAD object fails if wrong user, or only one token sent
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', 'UUID', None,
@ -515,8 +662,14 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# HEAD own account: ok
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# HEAD other account: not allowed for role service
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, None, None, None,
@ -525,8 +678,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# HEAD container in other users account: not allowed for role service
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', None, None,
@ -535,8 +692,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# HEAD object fails if wrong user, or only one token sent
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', 'UUID', None,
@ -551,60 +712,83 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [
# A scenario of post for account, container and object with
# several roles.
RBAC_POST = [
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester', None, 204),
('POST', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 204),
# POST other users account: not allowed for role admin
('POST', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# POST other users account: not allowed for role _member_
('POST', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# POST other users account: allowed for role ResellerAdmin
('POST', None, None, None, None, None,
None, 'tester6', 'tester6', None, 204),
('POST', None, None, None, None, None,
None, 'tester2', 'tester6', None, 204),
('POST', None, None, None, None, None,
None, 'tester4', 'tester6', None, 204),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# POST container in other users account: not allowed for role admin
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# POST container in other users account: not allowed for role _member_
('POST', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# POST container in other users account: allowed for role ResellerAdmin
('POST', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 202),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 202),
# POST object in other users account: not allowed for role admin
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# POST object in other users account: not allowed for role _member_
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# POST object in other users account: allowed for role ResellerAdmin
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 202),
('POST', None, None, 'UUID', 'UUID', None,
@ -615,8 +799,11 @@ RBAC_POST = [
RBAC_POST_WITH_SERVICE_PREFIX = [
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 204),
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, None, None, None,
@ -627,8 +814,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# POST container in other users account: not allowed for role service
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', None, None,
@ -639,8 +830,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 202),
# POST object fails if wrong user, or only one token sent
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', 'UUID', None,
@ -651,8 +846,14 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
None, 'tester2', 'tester5', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# POST own account: ok
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# POST other account: not allowed for role service
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, None, None, None,
@ -661,8 +862,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('POST', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# POST container in other users account: not allowed for role service
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', None, None,
@ -671,8 +876,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
'SERVICE', 'tester', 'tester', 'tester', 403),
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 202),
# POST object fails if wrong user, or only one token sent
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', 'UUID', None,
@ -687,6 +896,8 @@ RBAC_POST_WITH_SERVICE_PREFIX = [
# A scenario of options for account, container and object with
# several roles.
RBAC_OPTIONS = [
# OPTIONS request is always ok
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, None, None, None,
@ -786,11 +997,15 @@ RBAC_OPTIONS = [
None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
# Not OK for container: wrong origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 401),
# Not OK for object: missing X-Container-Meta-Access-Control-Allow-Origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 401),
@ -799,6 +1014,8 @@ RBAC_OPTIONS = [
None, 'UUID', 'UUID',
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', None, None, 200),
# Not OK for object: wrong origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', 'UUID',
@ -808,6 +1025,8 @@ RBAC_OPTIONS = [
RBAC_OPTIONS_WITH_SERVICE_PREFIX = [
# OPTIONS request is always ok
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, None, None, None,

View File

@ -167,11 +167,28 @@ class TestObject(unittest2.TestCase):
'Content-Length': '0',
'X-Timestamp': '-1'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_small_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_too_big_x_timestamp(self):
def put(url, token, parsed, conn):
@ -181,11 +198,28 @@ class TestObject(unittest2.TestCase):
'Content-Length': '0',
'X-Timestamp': '99999999999.9999999999'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_big_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_x_delete_after(self):
def put(url, token, parsed, conn):

View File

@ -90,6 +90,14 @@ class Base(unittest2.TestCase):
'Status returned: %d Expected: %s' %
(self.env.conn.response.status, status_or_statuses))
def assert_header(self, header_name, expected_value):
try:
actual_value = self.env.conn.response.getheader(header_name)
except KeyError:
self.fail(
'Expected header name %r not found in response.' % header_name)
self.assertEqual(expected_value, actual_value)
class Base2(object):
def setUp(self):
@ -1640,32 +1648,35 @@ class TestFile(Base):
self.assert_status(416)
else:
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-' % (i)
hdrs = {'Range': range_string}
self.assertTrue(
file_item.read(hdrs=hdrs) == data[i - file_length:],
self.assertEqual(
file_item.read(hdrs=hdrs), data[i - file_length:],
range_string)
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertTrue(
file_item.read(hdrs=hdrs) == data[-1000:], range_string)
self.assertEqual(file_item.read(hdrs=hdrs), data[-1000:], range_string)
hdrs = {'Range': '0-4'}
self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string)
self.assertEqual(file_item.read(hdrs=hdrs), data, '0-4')
# RFC 2616 14.35.1
# "If the entity is shorter than the specified suffix-length, the
# entire entity-body is used."
range_string = 'bytes=-%d' % (file_length + 10)
hdrs = {'Range': range_string}
self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string)
self.assertEqual(file_item.read(hdrs=hdrs), data, range_string)
def testMultiRangeGets(self):
file_length = 10000
@ -2536,6 +2547,7 @@ class TestFileComparison(Base):
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
def testIfMatchMultipleEtags(self):
for file_item in self.env.files:
@ -2545,6 +2557,7 @@ class TestFileComparison(Base):
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
def testIfNoneMatch(self):
for file_item in self.env.files:
@ -2554,6 +2567,8 @@ class TestFileComparison(Base):
hdrs = {'If-None-Match': file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfNoneMatchMultipleEtags(self):
for file_item in self.env.files:
@ -2564,6 +2579,8 @@ class TestFileComparison(Base):
'"bogus1", "bogus2", "%s"' % file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfModifiedSince(self):
for file_item in self.env.files:
@ -2574,8 +2591,12 @@ class TestFileComparison(Base):
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(304)
self.assert_header('etag', file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfUnmodifiedSince(self):
for file_item in self.env.files:
@ -2586,8 +2607,10 @@ class TestFileComparison(Base):
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
@ -2599,33 +2622,38 @@ class TestFileComparison(Base):
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_header('etag', file_item.md5)
def testLastModified(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file = self.env.container.file(file_name)
file.content_type = content_type
resp = file.write_random_return_resp(self.env.file_size)
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
resp = file_item.write_random_return_resp(self.env.file_size)
put_last_modified = resp.getheader('last-modified')
etag = file_item.md5
file = self.env.container.file(file_name)
info = file.info()
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assertIn('last_modified', info)
last_modified = info['last_modified']
self.assertEqual(put_last_modified, info['last_modified'])
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_header('etag', etag)
self.assert_header('accept-ranges', 'bytes')
hdrs = {'If-Unmodified-Since': last_modified}
self.assertTrue(file.read(hdrs=hdrs))
self.assertTrue(file_item.read(hdrs=hdrs))
class TestFileComparisonUTF8(Base2, TestFileComparison):
@ -3214,6 +3242,39 @@ class TestSlo(Base):
self.assertEqual(value[1]['name'],
'/%s/seg_b' % self.env.container.name.decode("utf-8"))
def test_slo_get_raw_the_manifest_with_details_from_server(self):
manifest = self.env.container.file("manifest-db")
got_body = manifest.read(parms={'multipart-manifest': 'get',
'format': 'raw'})
self.assertEqual('application/json; charset=utf-8',
manifest.content_type)
try:
value = json.loads(got_body)
except ValueError:
msg = "GET with multipart-manifest=get&format=raw got invalid json"
self.fail(msg)
self.assertEqual(
set(value[0].keys()), set(('size_bytes', 'etag', 'path')))
self.assertEqual(len(value), 2)
self.assertEqual(value[0]['size_bytes'], 1024 * 1024)
self.assertEqual(value[0]['etag'],
hashlib.md5('d' * 1024 * 1024).hexdigest())
self.assertEqual(value[0]['path'],
'/%s/seg_d' % self.env.container.name.decode("utf-8"))
self.assertEqual(value[1]['size_bytes'], 1024 * 1024)
self.assertEqual(value[1]['etag'],
hashlib.md5('b' * 1024 * 1024).hexdigest())
self.assertEqual(value[1]['path'],
'/%s/seg_b' % self.env.container.name.decode("utf-8"))
file_item = self.env.container.file("manifest-from-get-raw")
file_item.write(got_body, parms={'multipart-manifest': 'put'})
file_contents = file_item.read()
self.assertEqual(2 * 1024 * 1024, len(file_contents))
def test_slo_head_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_info = manifest.info(parms={'multipart-manifest': 'get'})

View File

@ -266,6 +266,26 @@ class TestContainerSync(ReplProbeTest):
% item) for item in mismatched_headers])
self.fail(msg)
def test_sync_newer_remote(self):
source_container, dest_container = self._setup_synced_containers()
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'old-source-body')
# upload to dest with same name
client.put_object(self.url, self.token, dest_container, object_name,
'new-test-body')
# cycle container-sync
Manager(['container-sync']).once()
# verify that the remote object did not change
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'new-test-body')
if __name__ == "__main__":
unittest.main()

View File

@ -6,6 +6,7 @@ auth_ssl = no
auth_prefix = /auth/
## sample config for Swift with Keystone v2 API
# For keystone v2 change auth_version to 2 and auth_prefix to /v2.0/
# And "allow_account_management" should not be set "true"
#auth_version = 3
#auth_host = localhost
#auth_port = 5000

View File

@ -34,7 +34,8 @@ from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common import utils
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
@ -901,7 +902,7 @@ def fake_http_connect(*code_iter, **kwargs):
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
headers = HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
@ -960,7 +961,7 @@ def fake_http_connect(*code_iter, **kwargs):
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
return HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass

View File

@ -100,15 +100,23 @@ class FakeRing(object):
self.nodes = [{'id': '1',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
'device': 'sda1'},
{'id': '2',
'ip': '10.10.10.2',
'port': 6002,
'device': None},
'device': 'sda1'},
{'id': '3',
'ip': '10.10.10.3',
'port': 6002,
'device': None},
{'id': '4',
'ip': '10.10.10.1',
'port': 6002,
'device': 'sda2'},
{'id': '5',
'ip': '10.10.10.1',
'port': 6002,
'device': 'sda3'},
]
def get_nodes(self, *args, **kwargs):
@ -124,6 +132,12 @@ acc_nodes = [{'device': 'sda1',
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@ -134,6 +148,12 @@ cont_nodes = [{'device': 'sda1',
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@ -184,11 +204,11 @@ class TestReaper(unittest.TestCase):
if self.reap_obj_fail:
raise Exception
def prepare_data_dir(self, ts=False):
def prepare_data_dir(self, ts=False, device='sda1'):
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
path = os.path.join(devices_path, device, DATADIR)
os.makedirs(path)
path = os.path.join(path, '100',
'a86', 'a8c682d2472e1720f2d81ff8993aba6')
@ -436,7 +456,7 @@ class TestReaper(unittest.TestCase):
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 2
self.max_delete_fail = 4
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
@ -446,7 +466,7 @@ class TestReaper(unittest.TestCase):
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 2)
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 4)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
@ -454,7 +474,7 @@ class TestReaper(unittest.TestCase):
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 3
self.max_delete_fail = 5
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
@ -464,7 +484,7 @@ class TestReaper(unittest.TestCase):
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3)
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 5)
self.assertEqual(r.stats_containers_remaining, 1)
@patch('swift.account.reaper.Ring',
@ -518,7 +538,7 @@ class TestReaper(unittest.TestCase):
container_shard=container_shard))
self.assertEqual(self.called_amount, 4)
info_lines = r.logger.get_lines_for_level('info')
self.assertEqual(len(info_lines), 6)
self.assertEqual(len(info_lines), 10)
for start_line, stat_line in zip(*[iter(info_lines)] * 2):
self.assertEqual(start_line, 'Beginning pass on account a')
self.assertTrue(stat_line.find('1 containers deleted'))
@ -604,6 +624,42 @@ class TestReaper(unittest.TestCase):
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 1)
def test_reap_device_with_sharding_and_various_devices(self):
devices = self.prepare_data_dir(device='sda2')
conf = {'devices': devices}
r = self.init_reaper(conf)
container_shard_used = [-1]
def fake_reap_account(*args, **kwargs):
container_shard_used[0] = kwargs.get('container_shard')
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda2')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 3)
devices = self.prepare_data_dir(device='sda3')
conf = {'devices': devices}
r = self.init_reaper(conf)
container_shard_used = [-1]
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda3')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 4)
def test_reap_account_with_sharding(self):
devices = self.prepare_data_dir()
self.called_amount = 0
@ -632,20 +688,31 @@ class TestReaper(unittest.TestCase):
fake_list_containers_iter), \
patch('swift.account.reaper.AccountReaper.reap_container',
fake_reap_container):
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 0)
self.assertEqual(container_reaped[0], 1)
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 0)
self.assertEqual(container_reaped[0], 0)
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
container_reaped[0] = 0
r.reap_account(fake_broker, 10, fake_ring.nodes, 1)
self.assertEqual(container_reaped[0], 2)
self.assertEqual(container_reaped[0], 1)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 2)
self.assertEqual(container_reaped[0], 0)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 3)
self.assertEqual(container_reaped[0], 3)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 4)
self.assertEqual(container_reaped[0], 1)
def test_run_once(self):
def prepare_data_dir():
devices_path = tempfile.mkdtemp()

View File

@ -20,7 +20,7 @@ import mock
from swift.account import utils, backend
from swift.common.storage_policy import POLICIES
from swift.common.utils import Timestamp
from swift.common.swob import HeaderKeyDict
from swift.common.header_key_dict import HeaderKeyDict
from test.unit import patch_policies

View File

@ -16,12 +16,12 @@
import json
import mock
import os
import random
import re
import string
import tempfile
import time
import unittest
import shutil
import sys
from eventlet.green import urllib2, socket
from six import StringIO
@ -30,6 +30,9 @@ from six.moves import urllib
from swift.cli import recon
from swift.common import utils
from swift.common.ring import builder
from swift.common.ring import utils as ring_utils
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit import patch_policies
class TestHelpers(unittest.TestCase):
@ -135,22 +138,50 @@ class TestScout(unittest.TestCase):
self.assertEqual(status, -1)
@patch_policies
class TestRecon(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.recon_instance = recon.SwiftRecon()
self.swift_dir = tempfile.gettempdir()
self.ring_name = "test_object_%s" % (
''.join(random.choice(string.digits) for x in range(6)))
self.tmpfile_name = "%s/%s.ring.gz" % (self.swift_dir, self.ring_name)
self.swift_dir = tempfile.mkdtemp()
self.ring_name = POLICIES.legacy.ring_name
self.tmpfile_name = os.path.join(
self.swift_dir, self.ring_name + '.ring.gz')
self.ring_name2 = POLICIES[1].ring_name
self.tmpfile_name2 = os.path.join(
self.swift_dir, self.ring_name2 + '.ring.gz')
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def tearDown(self, *_args, **_kwargs):
try:
os.remove(self.tmpfile_name)
except OSError:
pass
shutil.rmtree(self.swift_dir, ignore_errors=True)
def _make_object_rings(self):
ringbuilder = builder.RingBuilder(2, 3, 1)
devs = [
'r0z0-127.0.0.1:10000/sda1',
'r0z1-127.0.0.1:10001/sda1',
'r1z0-127.0.0.1:10002/sda1',
'r1z1-127.0.0.1:10003/sda1',
]
for raw_dev_str in devs:
dev = ring_utils.parse_add_value(raw_dev_str)
dev['weight'] = 1.0
ringbuilder.add_dev(dev)
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name)
ringbuilder = builder.RingBuilder(2, 2, 1)
devs = [
'r0z0-127.0.0.1:10000/sda1',
'r0z1-127.0.0.2:10004/sda1',
]
for raw_dev_str in devs:
dev = ring_utils.parse_add_value(raw_dev_str)
dev['weight'] = 1.0
ringbuilder.add_dev(dev)
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name2)
def test_gen_stats(self):
stats = self.recon_instance._gen_stats((1, 4, 10, None), 'Sample')
@ -176,47 +207,56 @@ class TestRecon(unittest.TestCase):
self.assertEqual(timestamp2, "2013-12-17 10:00:00")
mock_gmtime.assert_called_with()
def test_get_devices(self):
ringbuilder = builder.RingBuilder(2, 3, 1)
ringbuilder.add_dev({'id': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000,
'device': 'sda1', 'region': 0})
ringbuilder.add_dev({'id': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001,
'device': 'sda1', 'region': 0})
ringbuilder.add_dev({'id': 2, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002,
'device': 'sda1', 'region': 1})
ringbuilder.add_dev({'id': 3, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003,
'device': 'sda1', 'region': 1})
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name)
def test_get_hosts(self):
self._make_object_rings()
ips = self.recon_instance.get_devices(
None, None, self.swift_dir, self.ring_name)
ips = self.recon_instance.get_hosts(
None, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_devices(
0, None, self.swift_dir, self.ring_name)
ips = self.recon_instance.get_hosts(
0, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001)]), ips)
ips = self.recon_instance.get_devices(
1, None, self.swift_dir, self.ring_name)
ips = self.recon_instance.get_hosts(
1, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_devices(
0, 0, self.swift_dir, self.ring_name)
ips = self.recon_instance.get_hosts(
0, 0, self.swift_dir, [self.ring_name])
self.assertEqual(set([('127.0.0.1', 10000)]), ips)
ips = self.recon_instance.get_devices(
1, 1, self.swift_dir, self.ring_name)
ips = self.recon_instance.get_hosts(
1, 1, self.swift_dir, [self.ring_name])
self.assertEqual(set([('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
None, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.1', 10002), ('127.0.0.1', 10003),
('127.0.0.2', 10004)]), ips)
ips = self.recon_instance.get_hosts(
0, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.2', 10004)]), ips)
ips = self.recon_instance.get_hosts(
1, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
0, 1, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(set([('127.0.0.1', 10001),
('127.0.0.2', 10004)]), ips)
def test_get_ringmd5(self):
for server_type in ('account', 'container', 'object', 'object-1'):
ring_name = '%s.ring.gz' % server_type
@ -343,6 +383,89 @@ class TestRecon(unittest.TestCase):
" Failed: %s%%, no_result: %s, reported: %s"
% expected)
def test_get_ring_names(self):
self.recon_instance.server_type = 'not-object'
self.assertEqual(self.recon_instance._get_ring_names(), ['not-object'])
self.recon_instance.server_type = 'object'
with patch_policies([StoragePolicy(0, 'zero', is_default=True)]):
self.assertEqual(self.recon_instance._get_ring_names(),
['object'])
with patch_policies([StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one')]):
self.assertEqual(self.recon_instance._get_ring_names(),
['object', 'object-1'])
self.assertEqual(self.recon_instance._get_ring_names('0'),
['object'])
self.assertEqual(self.recon_instance._get_ring_names('zero'),
['object'])
self.assertEqual(self.recon_instance._get_ring_names('1'),
['object-1'])
self.assertEqual(self.recon_instance._get_ring_names('one'),
['object-1'])
self.assertEqual(self.recon_instance._get_ring_names('3'), [])
self.assertEqual(self.recon_instance._get_ring_names('wrong'),
[])
def test_main_object_hosts_default_all_policies(self):
self._make_object_rings()
discovered_hosts = set()
def server_type_check(hosts):
for h in hosts:
discovered_hosts.add(h)
self.recon_instance.server_type_check = server_type_check
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers"]):
self.recon_instance.main()
expected = set([
('127.0.0.1', 10000),
('127.0.0.1', 10001),
('127.0.0.1', 10002),
('127.0.0.1', 10003),
('127.0.0.2', 10004),
])
self.assertEqual(expected, discovered_hosts)
def test_main_object_hosts_default_unu(self):
self._make_object_rings()
discovered_hosts = set()
def server_type_check(hosts):
for h in hosts:
discovered_hosts.add(h)
self.recon_instance.server_type_check = server_type_check
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers", '--policy=unu']):
self.recon_instance.main()
expected = set([
('127.0.0.1', 10000),
('127.0.0.2', 10004),
])
self.assertEqual(expected, discovered_hosts)
def test_main_object_hosts_default_invalid(self):
self._make_object_rings()
stdout = StringIO()
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers", '--policy=invalid']),\
mock.patch('sys.stdout', stdout):
self.assertRaises(SystemExit, recon.main)
self.assertIn('Invalid Storage Policy', stdout.getvalue())
class TestReconCommands(unittest.TestCase):
def setUp(self):

File diff suppressed because it is too large Load Diff

View File

@ -19,6 +19,7 @@ from collections import defaultdict
from hashlib import md5
from swift.common import swob
from swift.common.swob import HTTPException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path
from test.unit import FakeLogger, FakeRing
@ -85,18 +86,18 @@ class FakeSwift(object):
try:
resp_class, raw_headers, body = self._find_response(method, path)
headers = swob.HeaderKeyDict(raw_headers)
headers = HeaderKeyDict(raw_headers)
except KeyError:
if (env.get('QUERY_STRING')
and (method, env['PATH_INFO']) in self._responses):
resp_class, raw_headers, body = self._find_response(
method, env['PATH_INFO'])
headers = swob.HeaderKeyDict(raw_headers)
headers = HeaderKeyDict(raw_headers)
elif method == 'HEAD' and ('GET', path) in self._responses:
resp_class, raw_headers, body = self._find_response(
'GET', path)
body = None
headers = swob.HeaderKeyDict(raw_headers)
headers = HeaderKeyDict(raw_headers)
elif method == 'GET' and obj and path in self.uploaded:
resp_class = swob.HTTPOk
headers, body = self.uploaded[path]

View File

@ -29,9 +29,10 @@ from eventlet import sleep
from mock import patch, call
from test.unit.common.middleware.helpers import FakeSwift
from swift.common import utils, constraints
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware import bulk
from swift.common.swob import Request, Response, HTTPException, \
HTTPNoContent, HTTPCreated, HeaderKeyDict
HTTPNoContent, HTTPCreated
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED

View File

@ -42,7 +42,10 @@ class FakeApp(object):
body = 'Response to Authorized Request'
else:
body = 'Pass-Through Response'
start_response('200 OK', [('Content-Length', str(len(body)))])
headers = [('Content-Length', str(len(body)))]
if 'HTTP_X_TIMESTAMP' in env:
headers.append(('X-Timestamp', env['HTTP_X_TIMESTAMP']))
start_response('200 OK', headers)
return body
@ -214,18 +217,20 @@ cluster_dfw1 = http://dfw1.host/v1/
req.environ.get('swift.log_info'))
def test_valid_sig(self):
ts = '1455221706.726999_0123456789abcdef'
sig = self.sync.realms_conf.get_sig(
'GET', '/v1/a/c', '0', 'nonce',
'GET', '/v1/a/c', ts, 'nonce',
self.sync.realms_conf.key('US'), 'abc')
req = swob.Request.blank(
'/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig})
req = swob.Request.blank('/v1/a/c', headers={
'x-container-sync-auth': 'US nonce ' + sig,
'x-backend-inbound-x-timestamp': ts})
req.environ[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'}
resp = req.get_response(self.sync)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body, 'Response to Authorized Request')
self.assertTrue(
'cs:valid' in req.environ.get('swift.log_info'),
req.environ.get('swift.log_info'))
self.assertIn('cs:valid', req.environ.get('swift.log_info'))
self.assertIn('X-Timestamp', resp.headers)
self.assertEqual(ts, resp.headers['X-Timestamp'])
def test_valid_sig2(self):
sig = self.sync.realms_conf.get_sig(

View File

@ -24,6 +24,7 @@ import time
import unittest
from swift.common import exceptions, swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware import dlo
from swift.common.utils import closing_if_possible
from test.unit.common.middleware.helpers import FakeSwift
@ -248,7 +249,7 @@ class TestDloHeadManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
@ -257,7 +258,7 @@ class TestDloHeadManifest(DloTestCase):
environ={'REQUEST_METHOD': 'HEAD'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
# etag is manifest's etag
self.assertEqual(headers["Etag"], "etag-manyseg")
@ -267,7 +268,7 @@ class TestDloHeadManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-no-segments',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], '"%s"' % md5hex(""))
self.assertEqual(headers["Content-Length"], "0")
@ -291,7 +292,7 @@ class TestDloGetManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, 'aaaaabbbbbcccccdddddeeeee')
@ -336,7 +337,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'multipart-manifest=get'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], "manifest-etag")
self.assertEqual(body, "manifest-contents")
@ -354,7 +355,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=8-17'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, "bbcccccddd")
@ -368,7 +369,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-19'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, "cccccddddd")
@ -378,7 +379,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, "a")
@ -388,7 +389,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=24-24'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, "e")
@ -398,7 +399,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=18-30'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "7")
self.assertEqual(headers["Content-Range"], "bytes 18-24/25")
@ -417,7 +418,7 @@ class TestDloGetManifest(DloTestCase):
headers={'Range': 'bytes=3-12'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
# The /15 here indicates that this is a 15-byte object. DLO can't tell
@ -448,7 +449,7 @@ class TestDloGetManifest(DloTestCase):
headers={'Range': 'bytes=10-22'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
# this requires multiple pages of container listing, so we can't send
# a Content-Length header
@ -460,7 +461,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-40'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee")
@ -471,7 +472,7 @@ class TestDloGetManifest(DloTestCase):
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers.get("Content-Length"), None)
self.assertEqual(headers.get("Content-Range"), None)
@ -485,7 +486,7 @@ class TestDloGetManifest(DloTestCase):
headers={'Range': 'bytes=5-9,15-19'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers.get("Content-Length"), None)
self.assertEqual(headers.get("Content-Range"), None)
@ -500,7 +501,7 @@ class TestDloGetManifest(DloTestCase):
headers={'If-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
@ -512,7 +513,7 @@ class TestDloGetManifest(DloTestCase):
headers={'If-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(headers['Content-Length'], '0')
@ -527,7 +528,7 @@ class TestDloGetManifest(DloTestCase):
headers={'If-None-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '304 Not Modified')
self.assertEqual(headers['Content-Length'], '0')
@ -539,7 +540,7 @@ class TestDloGetManifest(DloTestCase):
headers={'If-None-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
@ -582,7 +583,7 @@ class TestDloGetManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
@ -628,7 +629,7 @@ class TestDloGetManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
@ -653,7 +654,7 @@ class TestDloGetManifest(DloTestCase):
req = swob.Request.blank('/v1/AUTH_test/mani/festo',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"],
'"' + hashlib.md5("abcdef").hexdigest() + '"')
@ -729,7 +730,7 @@ class TestDloGetManifest(DloTestCase):
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
@ -762,7 +763,7 @@ class TestDloGetManifest(DloTestCase):
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
@ -781,7 +782,7 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-14'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content') # sanity check
self.assertEqual(headers.get('Content-Length'), '15') # sanity check

View File

@ -77,6 +77,7 @@ class TestGatekeeper(unittest.TestCase):
object_transient_sysmeta_headers = {
'x-object-transient-sysmeta-': 'value',
'x-object-transient-sysmeta-foo': 'value'}
x_timestamp_headers = {'X-Timestamp': '1455952805.719739'}
forbidden_headers_out = dict(sysmeta_headers.items() +
x_backend_headers.items() +
@ -84,6 +85,7 @@ class TestGatekeeper(unittest.TestCase):
forbidden_headers_in = dict(sysmeta_headers.items() +
x_backend_headers.items() +
object_transient_sysmeta_headers.items())
shunted_headers_in = dict(x_timestamp_headers.items())
def _assertHeadersEqual(self, expected, actual):
for key in expected:
@ -112,20 +114,63 @@ class TestGatekeeper(unittest.TestCase):
def _test_reserved_header_removed_inbound(self, method):
headers = dict(self.forbidden_headers_in)
headers.update(self.allowed_headers)
headers.update(self.shunted_headers_in)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self._assertHeadersEqual(self.allowed_headers, fake_app.req.headers)
self._assertHeadersAbsent(self.forbidden_headers_in,
fake_app.req.headers)
expected_headers = dict(self.allowed_headers)
# shunt_inbound_x_timestamp should be enabled by default
expected_headers.update({'X-Backend-Inbound-' + k: v
for k, v in self.shunted_headers_in.items()})
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
unexpected_headers = dict(self.forbidden_headers_in.items() +
self.shunted_headers_in.items())
self._assertHeadersAbsent(unexpected_headers, fake_app.req.headers)
def test_reserved_header_removed_inbound(self):
for method in self.methods:
self._test_reserved_header_removed_inbound(method)
def _test_reserved_header_shunted_inbound(self, method):
headers = dict(self.shunted_headers_in)
headers.update(self.allowed_headers)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='true')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
expected_headers = dict(self.allowed_headers)
expected_headers.update({'X-Backend-Inbound-' + k: v
for k, v in self.shunted_headers_in.items()})
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
self._assertHeadersAbsent(self.shunted_headers_in,
fake_app.req.headers)
def test_reserved_header_shunted_inbound(self):
for method in self.methods:
self._test_reserved_header_shunted_inbound(method)
def _test_reserved_header_shunt_bypassed_inbound(self, method):
headers = dict(self.shunted_headers_in)
headers.update(self.allowed_headers)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='false')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
expected_headers = dict(self.allowed_headers.items() +
self.shunted_headers_in.items())
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
def test_reserved_header_shunt_bypassed_inbound(self):
for method in self.methods:
self._test_reserved_header_shunt_bypassed_inbound(method)
def _test_reserved_header_removed_outbound(self, method):
headers = dict(self.forbidden_headers_out)
headers.update(self.allowed_headers)

View File

@ -647,21 +647,16 @@ class TestAuthorize(BaseTestAuthorize):
req = self._check_authenticate(identity=identity)
self.assertTrue(req.environ.get('swift_owner'))
def _check_authorize_for_tenant_owner_match(self, exception=None):
def test_authorize_fails_same_user_and_tenant(self):
# Historically the is_admin option allowed access when user_name
# matched tenant_name, but it is no longer supported. This test is a
# sanity check that the option no longer works.
self.test_auth.is_admin = True
identity = self._get_identity(user_name='same_name',
tenant_name='same_name')
req = self._check_authenticate(identity=identity, exception=exception)
expected = bool(exception is None)
self.assertEqual(bool(req.environ.get('swift_owner')), expected)
def test_authorize_succeeds_as_owner_for_tenant_owner_match(self):
self.test_auth.is_admin = True
self._check_authorize_for_tenant_owner_match()
def test_authorize_fails_as_owner_for_tenant_owner_match(self):
self.test_auth.is_admin = False
self._check_authorize_for_tenant_owner_match(
exception=HTTP_FORBIDDEN)
req = self._check_authenticate(identity=identity,
exception=HTTP_FORBIDDEN)
self.assertFalse(bool(req.environ.get('swift_owner')))
def test_authorize_succeeds_for_container_sync(self):
env = {'swift_sync_key': 'foo', 'REMOTE_ADDR': '127.0.0.1'}

View File

@ -98,18 +98,25 @@ class OpenAndReadTester(object):
class MockOS(object):
def __init__(self, ls_out=None, im_out=False, statvfs_out=None):
def __init__(self, ls_out=None, isdir_out=None, ismount_out=False,
statvfs_out=None):
self.ls_output = ls_out
self.ismount_output = im_out
self.isdir_output = isdir_out
self.ismount_output = ismount_out
self.statvfs_output = statvfs_out
self.listdir_calls = []
self.statvfs_calls = []
self.isdir_calls = []
self.ismount_calls = []
self.statvfs_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_isdir(self, *args, **kwargs):
self.isdir_calls.append((args, kwargs))
return self.isdir_output
def fake_ismount(self, *args, **kwargs):
self.ismount_calls.append((args, kwargs))
if isinstance(self.ismount_output, Exception):
@ -164,7 +171,7 @@ class FakeRecon(object):
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_no_unmounted(self):
def fake_unmounted_empty(self):
return []
def fake_diskusage(self):
@ -214,9 +221,11 @@ class TestReconSuccess(TestCase):
self.mockos = MockOS()
self.fakecache = FakeFromCache()
self.real_listdir = os.listdir
self.real_isdir = os.path.isdir
self.real_ismount = utils.ismount
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.isdir = self.mockos.fake_isdir
utils.ismount = self.mockos.fake_ismount
os.statvfs = self.mockos.fake_statvfs
self.real_from_cache = self.app._from_recon_cache
@ -241,6 +250,7 @@ class TestReconSuccess(TestCase):
def tearDown(self):
os.listdir = self.real_listdir
os.path.isdir = self.real_isdir
utils.ismount = self.real_ismount
os.statvfs = self.real_statvfs
del self.mockos
@ -931,39 +941,63 @@ class TestReconSuccess(TestCase):
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_everything_normal(self):
def test_get_unmounted_excludes_files(self):
unmounted_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_all_mounted(self):
unmounted_resp = []
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = True
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_checkmount_fail(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': 'brokendrive'}]
self.mockos.ls_output = ['fakeone']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(rv, unmounted_resp)
def test_no_get_unmounted(self):
def test_get_unmounted_no_mounts(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = []
self.mockos.ls_output = []
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls, [])
self.assertEqual(rv, unmounted_resp)
def test_get_diskusage(self):
@ -977,20 +1011,37 @@ class TestReconSuccess(TestCase):
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.statvfs_output = statvfs_content
self.mockos.ismount_output = True
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.statvfs_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
def test_get_diskusage_excludes_files(self):
du_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(self.mockos.statvfs_calls, [])
self.assertEqual(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'brokendrive', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
@ -1000,6 +1051,7 @@ class TestReconSuccess(TestCase):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'Input/Output Error', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
rv = self.app.get_diskusage()
self.assertEqual(rv, du_resp)
@ -1256,9 +1308,9 @@ class TestReconMiddleware(unittest.TestCase):
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_unmounted_resp)
def test_recon_no_get_unmounted(self):
def test_recon_get_unmounted_empty(self):
get_unmounted_resp = '[]'
self.app.get_unmounted = self.frecon.fake_no_unmounted
self.app.get_unmounted = self.frecon.fake_unmounted_empty
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = ''.join(self.app(req.environ, start_response))
@ -1340,7 +1392,7 @@ class TestReconMiddleware(unittest.TestCase):
os.listdir = fail_os_listdir
resp = self.real_app_get_device_info()
os.listdir = self.real_listdir
device_path = resp.keys()[0]
device_path = list(resp)[0]
self.assertIsNone(resp[device_path])
def test_get_swift_conf_md5(self):

View File

@ -24,6 +24,7 @@ from mock import patch
from hashlib import md5
from swift.common import swob, utils
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware import slo
from swift.common.swob import Request, Response, HTTPException
from swift.common.utils import quote, closing_if_possible, close_if_possible
@ -1054,7 +1055,7 @@ class TestSloHeadManifest(SloTestCase):
'/v1/AUTH_test/headtest/man',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers.get('Etag', '').strip("'\""),
@ -1071,6 +1072,103 @@ class TestSloHeadManifest(SloTestCase):
self.assertEqual(status, '304 Not Modified')
class TestSloGetRawManifest(SloTestCase):
def setUp(self):
super(TestSloGetRawManifest, self).setUp()
_bc_manifest_json = json.dumps(
[{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10',
'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15), 'bytes': '15',
'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'name': '/gettest/d_10',
'hash': md5hex(md5hex("e" * 5) + md5hex("f" * 5)), 'bytes': '10',
'content_type': 'application/json;swift_bytes=10',
'sub_slo': True,
'last_modified': '1970-01-01T00:00:00.000000'}])
self.bc_etag = md5hex(_bc_manifest_json)
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-bc',
swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=35',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Plant': 'Ficus',
'Etag': md5hex(_bc_manifest_json)},
_bc_manifest_json)
_bc_manifest_json_ranges = json.dumps(
[{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10',
'last_modified': '1970-01-01T00:00:00.000000',
'content_type': 'text/plain', 'range': '1-99'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15), 'bytes': '15',
'last_modified': '1970-01-01T00:00:00.000000',
'content_type': 'text/plain', 'range': '100-200'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-bc-r',
swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Plant': 'Ficus',
'Etag': md5hex(_bc_manifest_json_ranges)},
_bc_manifest_json_ranges)
def test_get_raw_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-bc'
'?multipart-manifest=get&format=raw',
environ={'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
self.assertTrue(('Etag', self.bc_etag) in headers, headers)
self.assertTrue(('X-Static-Large-Object', 'true') in headers, headers)
self.assertTrue(
('Content-Type', 'application/json; charset=utf-8') in headers,
headers)
try:
resp_data = json.loads(body)
except ValueError:
self.fail("Invalid JSON in manifest GET: %r" % body)
self.assertEqual(
resp_data,
[{'etag': md5hex('b' * 10), 'size_bytes': '10',
'path': '/gettest/b_10'},
{'etag': md5hex('c' * 15), 'size_bytes': '15',
'path': '/gettest/c_15'},
{'etag': md5hex(md5hex("e" * 5) + md5hex("f" * 5)),
'size_bytes': '10',
'path': '/gettest/d_10'}])
def test_get_raw_manifest_passthrough_with_ranges(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-bc-r'
'?multipart-manifest=get&format=raw',
environ={'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
self.assertTrue(
('Content-Type', 'application/json; charset=utf-8') in headers,
headers)
try:
resp_data = json.loads(body)
except ValueError:
self.fail("Invalid JSON in manifest GET: %r" % body)
self.assertEqual(
resp_data,
[{'etag': md5hex('b' * 10), 'size_bytes': '10',
'path': '/gettest/b_10', 'range': '1-99'},
{'etag': md5hex('c' * 15), 'size_bytes': '15',
'path': '/gettest/c_15', 'range': '100-200'}],
body)
class TestSloGetManifest(SloTestCase):
def setUp(self):
super(TestSloGetManifest, self).setUp()
@ -1331,7 +1429,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-bc',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
manifest_etag = md5hex(md5hex("b" * 10) + md5hex("c" * 15))
self.assertEqual(status, '200 OK')
@ -1382,7 +1480,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-aabbccdd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(body, (
@ -1469,7 +1567,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '304 Not Modified')
self.assertEqual(headers['Content-Length'], '0')
@ -1481,7 +1579,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': "not-%s" % self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(
@ -1493,7 +1591,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(
@ -1505,7 +1603,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': "not-%s" % self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(headers['Content-Length'], '0')
@ -1518,7 +1616,7 @@ class TestSloGetManifest(SloTestCase):
headers={'If-Match': self.manifest_abcd_etag,
'Range': 'bytes=3-6'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '4')
@ -1529,7 +1627,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
@ -1543,7 +1641,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=3-17'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '15')
@ -1582,7 +1680,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(
@ -1619,7 +1717,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=100000-199999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
count_e = sum(1 if x == 'e' else 0 for x in body)
@ -1656,7 +1754,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(
@ -1678,7 +1776,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=5-29'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '25')
@ -1706,7 +1804,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '1')
@ -1726,7 +1824,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=25-30'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(body, 'cccccd')
@ -1747,7 +1845,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=45-55'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '5')
@ -1769,14 +1867,14 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0,2-2'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
def test_get_segment_with_non_ascii_name(self):
def test_get_segment_with_non_ascii_path(self):
segment_body = u"a møøse once bit my sister".encode("utf-8")
self.app.register(
'GET', u'/v1/AUTH_test/ünicode/öbject-segment'.encode('utf-8'),
@ -1799,7 +1897,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/ünicode/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(body, segment_body)
@ -1808,7 +1906,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-abcd-ranges',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '32')
@ -1850,7 +1948,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-abcd-subranges',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '17')
@ -1899,7 +1997,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=7-26'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '20')
@ -1937,7 +2035,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=4-12'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '9')
@ -1988,7 +2086,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '32')
@ -2025,7 +2123,7 @@ class TestSloGetManifest(SloTestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0,2-2'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Type'], 'application/json')
@ -2039,7 +2137,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-badjson',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '0')
@ -2113,7 +2211,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
@ -2171,7 +2269,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/man1',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertIsInstance(exc, ListingIterError)
# we don't know at header-sending time that things are going to go
@ -2319,7 +2417,7 @@ class TestSloGetManifest(SloTestCase):
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
headers = HeaderKeyDict(headers)
self.assertIsInstance(exc, SegmentError)
self.assertEqual(status, '200 OK')

View File

@ -35,7 +35,8 @@ from hashlib import sha1
from time import time
from swift.common.middleware import tempauth, tempurl
from swift.common.swob import Request, Response, HeaderKeyDict
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from swift.common import utils
@ -214,6 +215,31 @@ class TestTempURL(unittest.TestCase):
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
def test_head_and_get_headers_match(self):
method = 'HEAD'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
% (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
get_method = 'GET'
get_hmac_body = '%s\n%s\n%s' % (get_method, expires, path)
get_sig = hmac.new(key, get_hmac_body, sha1).hexdigest()
get_req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
% (get_sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
get_resp = get_req.get_response(self.tempurl)
self.assertEqual(resp.headers, get_resp.headers)
def test_get_valid_with_filename_and_inline(self):
method = 'GET'
expires = int(time() + 86400)

View File

@ -458,17 +458,17 @@ class Test_html_viewer(unittest.TestCase):
self.log_files)
def test_format_source_code(self):
nfl_os = '%s:%d(%s)' % (os.__file__[:-1], 136, 'makedirs')
self.assertTrue('makedirs' in self.viewer.format_source_code(nfl_os))
self.assertFalse('makedirsXYZ' in
self.viewer.format_source_code(nfl_os))
nfl_illegal = '%s:136(makedirs)' % os.__file__
self.assertTrue(_('The file type are forbidden to access!') in
self.viewer.format_source_code(nfl_illegal))
nfl_not_exist = '%s.py:136(makedirs)' % os.__file__
expected_msg = _('Can not access the file %s.') % os.__file__
self.assertTrue(expected_msg in
self.viewer.format_source_code(nfl_not_exist))
osfile = os.__file__.rstrip('c')
nfl_os = '%s:%d(%s)' % (osfile, 136, 'makedirs')
self.assertIn('makedirs', self.viewer.format_source_code(nfl_os))
self.assertNotIn('makedirsXYZ', self.viewer.format_source_code(nfl_os))
nfl_illegal = '%sc:136(makedirs)' % osfile
self.assertIn(_('The file type are forbidden to access!'),
self.viewer.format_source_code(nfl_illegal))
nfl_not_exist = '%s.py:136(makedirs)' % osfile
expected_msg = _('Can not access the file %s.py.') % osfile
self.assertIn(expected_msg,
self.viewer.format_source_code(nfl_not_exist))
class TestStats2(unittest.TestCase):

View File

@ -26,6 +26,7 @@ from math import ceil
from tempfile import mkdtemp
from shutil import rmtree
import random
import uuid
from six.moves import range
@ -46,7 +47,7 @@ class TestRingBuilder(unittest.TestCase):
def _partition_counts(self, builder, key='id'):
"""
Returns a dictionary mapping the given device key to (number of
partitions assigned to to that key).
partitions assigned to that key).
"""
counts = defaultdict(int)
for part2dev_id in builder._replica2part2dev:
@ -1384,6 +1385,21 @@ class TestRingBuilder(unittest.TestCase):
rb.rebalance() # this would crash since parts_wanted was not set
rb.validate()
def test_reduce_replicas_after_remove_device(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.rebalance()
rb.remove_dev(0)
self.assertRaises(exceptions.RingValidationError, rb.rebalance)
rb.set_replicas(2)
rb.rebalance()
rb.validate()
def test_rebalance_post_upgrade(self):
rb = ring.RingBuilder(8, 3, 1)
# 5 devices: 5 is the smallest number that does not divide 3 * 2^8,
@ -2424,6 +2440,72 @@ class TestRingBuilder(unittest.TestCase):
except exceptions.DuplicateDeviceError:
self.fail("device hole not reused")
def test_increase_partition_power(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
# Let's save the ring, and get the nodes for an object
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
old_part, old_nodes = r.get_nodes("acc", "cont", "obj")
old_version = rb.version
rb.increase_partition_power()
rb.validate()
changed_parts, _balance, removed_devs = rb.rebalance()
self.assertEqual(changed_parts, 0)
self.assertEqual(removed_devs, 0)
old_ring = r
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
new_part, new_nodes = r.get_nodes("acc", "cont", "obj")
# sanity checks
self.assertEqual(rb.part_power, 9)
self.assertEqual(rb.version, old_version + 2)
# make sure there is always the same device assigned to every pair of
# partitions
for replica in rb._replica2part2dev:
for part in range(0, len(replica), 2):
dev = replica[part]
next_dev = replica[part + 1]
self.assertEqual(dev, next_dev)
# same for last_part moves
for part in range(0, rb.parts, 2):
this_last_moved = rb._last_part_moves[part]
next_last_moved = rb._last_part_moves[part + 1]
self.assertEqual(this_last_moved, next_last_moved)
for i in range(100):
suffix = uuid.uuid4()
account = 'account_%s' % suffix
container = 'container_%s' % suffix
obj = 'obj_%s' % suffix
old_part, old_nodes = old_ring.get_nodes(account, container, obj)
new_part, new_nodes = r.get_nodes(account, container, obj)
# Due to the increased partition power, the partition each object
# is assigned to has changed. If the old partition was X, it will
# now be either located in 2*X or 2*X+1
self.assertTrue(new_part in [old_part * 2, old_part * 2 + 1])
# Importantly, we expect the objects to be placed on the same
# nodes after increasing the partition power
self.assertEqual(old_nodes, new_nodes)
class TestGetRequiredOverload(unittest.TestCase):

View File

@ -26,8 +26,9 @@ from six.moves import urllib
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import Timestamp
from swift.common.swob import HeaderKeyDict, RESPONSE_REASONS
from swift.common.swob import RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
from six.moves.http_client import HTTPException

View File

@ -0,0 +1,75 @@
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.header_key_dict import HeaderKeyDict
class TestHeaderKeyDict(unittest.TestCase):
def test_case_insensitive(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
headers['CONTENT-LENGTH'] = 10
headers['content-length'] = 20
self.assertEqual(headers['Content-Length'], '20')
self.assertEqual(headers['content-length'], '20')
self.assertEqual(headers['CONTENT-LENGTH'], '20')
def test_setdefault(self):
headers = HeaderKeyDict()
# it gets set
headers.setdefault('x-rubber-ducky', 'the one')
self.assertEqual(headers['X-Rubber-Ducky'], 'the one')
# it has the right return value
ret = headers.setdefault('x-boat', 'dinghy')
self.assertEqual(ret, 'dinghy')
ret = headers.setdefault('x-boat', 'yacht')
self.assertEqual(ret, 'dinghy')
# shouldn't crash
headers.setdefault('x-sir-not-appearing-in-this-request', None)
def test_del_contains(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
self.assertTrue('Content-Length' in headers)
del headers['Content-Length']
self.assertTrue('Content-Length' not in headers)
def test_update(self):
headers = HeaderKeyDict()
headers.update({'Content-Length': '0'})
headers.update([('Content-Type', 'text/plain')])
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(headers['Content-Type'], 'text/plain')
def test_get(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
self.assertEqual(headers.get('CONTENT-LENGTH'), '20')
self.assertEqual(headers.get('something-else'), None)
self.assertEqual(headers.get('something-else', True), True)
def test_keys(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
headers['cOnTent-tYpe'] = 'text/plain'
headers['SomeThing-eLse'] = 'somevalue'
self.assertEqual(
set(headers.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))

View File

@ -27,12 +27,26 @@ from six.moves.urllib.parse import quote
from test.unit import FakeLogger
from eventlet.green import urllib2
from swift.common import exceptions, internal_client, swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import StoragePolicy
from test.unit import with_tempdir, write_fake_ring, patch_policies
from test.unit.common.middleware.helpers import FakeSwift
class FakeConn(object):
def __init__(self, body=None):
if body is None:
body = []
self.body = body
def read(self):
return json.dumps(self.body)
def info(self):
return {}
def not_sleep(seconds):
pass
@ -338,20 +352,37 @@ class TestInternalClient(unittest.TestCase):
# verify that base_request passes timeout arg on to urlopen
body = {"some": "content"}
class FakeConn(object):
def read(self):
return json.dumps(body)
for timeout in (0.0, 42.0, None):
mocked_func = 'swift.common.internal_client.urllib2.urlopen'
with mock.patch(mocked_func) as mock_urlopen:
mock_urlopen.side_effect = [FakeConn()]
mock_urlopen.side_effect = [FakeConn(body)]
sc = internal_client.SimpleClient('http://0.0.0.0/')
_, resp_body = sc.base_request('GET', timeout=timeout)
mock_urlopen.assert_called_once_with(mock.ANY, timeout=timeout)
# sanity check
self.assertEqual(body, resp_body)
def test_base_full_listing(self):
body1 = [{'name': 'a'}, {'name': "b"}, {'name': "c"}]
body2 = [{'name': 'd'}]
body3 = []
mocked_func = 'swift.common.internal_client.urllib2.urlopen'
with mock.patch(mocked_func) as mock_urlopen:
mock_urlopen.side_effect = [
FakeConn(body1), FakeConn(body2), FakeConn(body3)]
sc = internal_client.SimpleClient('http://0.0.0.0/')
_, resp_body = sc.base_request('GET', full_listing=True)
self.assertEqual(body1 + body2, resp_body)
self.assertEqual(3, mock_urlopen.call_count)
actual_requests = map(
lambda call: call[0][0], mock_urlopen.call_args_list)
self.assertEqual('/?format=json', actual_requests[0].get_selector())
self.assertEqual(
'/?format=json&marker=c', actual_requests[1].get_selector())
self.assertEqual(
'/?format=json&marker=d', actual_requests[2].get_selector())
def test_make_request_method_path_headers(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
@ -1027,7 +1058,7 @@ class TestInternalClient(unittest.TestCase):
'user-agent': 'test', # from InternalClient.make_request
})
self.assertEqual(app.calls_with_headers, [(
'GET', path_info, swob.HeaderKeyDict(req_headers))])
'GET', path_info, HeaderKeyDict(req_headers))])
def test_iter_object_lines(self):
class InternalClient(internal_client.InternalClient):
@ -1180,76 +1211,84 @@ class TestGetAuth(unittest.TestCase):
'http://127.0.0.1', 'user', 'key', auth_version=2.0)
mock_time_value = 1401224049.98
def mock_time():
global mock_time_value
mock_time_value += 1
return mock_time_value
class TestSimpleClient(unittest.TestCase):
def _test_get_head(self, request, urlopen, method):
mock_time_value = [1401224049.98]
def mock_time():
# global mock_time_value
mock_time_value[0] += 1
return mock_time_value[0]
with mock.patch('swift.common.internal_client.time', mock_time):
# basic request, only url as kwarg
request.return_value.get_type.return_value = "http"
urlopen.return_value.read.return_value = ''
urlopen.return_value.getcode.return_value = 200
urlopen.return_value.info.return_value = {'content-length': '345'}
sc = internal_client.SimpleClient(url='http://127.0.0.1')
logger = FakeLogger()
retval = sc.retry_request(
method, headers={'content-length': '123'}, logger=logger)
self.assertEqual(urlopen.call_count, 1)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'content-length': '123'},
data=None)
self.assertEqual([{'content-length': '345'}, None], retval)
self.assertEqual(method, request.return_value.get_method())
self.assertEqual(logger.log_dict['debug'], [(
('-> 2014-05-27T20:54:11 ' + method +
' http://127.0.0.1%3Fformat%3Djson 200 '
'123 345 1401224050.98 1401224051.98 1.0 -',), {})])
# Check if JSON is decoded
urlopen.return_value.read.return_value = '{}'
retval = sc.retry_request(method)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with token
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request(method)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with prefix
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request(method, prefix="pre_")
request.assert_called_with(
'http://127.0.0.1?format=json&prefix=pre_',
headers={'X-Auth-Token': 'token'}, data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with container name
retval = sc.retry_request(method, container='cont')
request.assert_called_with('http://127.0.0.1/cont?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with object name
retval = sc.retry_request(method, container='cont', name='obj')
request.assert_called_with('http://127.0.0.1/cont/obj',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
@mock.patch('swift.common.internal_client.time', mock_time)
def test_get(self, request, urlopen):
# basic GET request, only url as kwarg
request.return_value.get_type.return_value = "http"
urlopen.return_value.read.return_value = ''
urlopen.return_value.getcode.return_value = 200
urlopen.return_value.info.return_value = {'content-length': '345'}
sc = internal_client.SimpleClient(url='http://127.0.0.1')
logger = FakeLogger()
retval = sc.retry_request(
'GET', headers={'content-length': '123'}, logger=logger)
self.assertEqual(urlopen.call_count, 1)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'content-length': '123'},
data=None)
self.assertEqual([None, None], retval)
self.assertEqual('GET', request.return_value.get_method())
self.assertEqual(logger.log_dict['debug'], [(
('-> 2014-05-27T20:54:11 GET http://127.0.0.1%3Fformat%3Djson 200 '
'123 345 1401224050.98 1401224051.98 1.0 -',), {})])
self._test_get_head(request, urlopen, 'GET')
# Check if JSON is decoded
urlopen.return_value.read.return_value = '{}'
retval = sc.retry_request('GET')
self.assertEqual([None, {}], retval)
# same as above, now with token
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request('GET')
request.assert_called_with('http://127.0.0.1?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with prefix
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request('GET', prefix="pre_")
request.assert_called_with('http://127.0.0.1?format=json&prefix=pre_',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with container name
retval = sc.retry_request('GET', container='cont')
request.assert_called_with('http://127.0.0.1/cont?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with object name
retval = sc.retry_request('GET', container='cont', name='obj')
request.assert_called_with('http://127.0.0.1/cont/obj',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
def test_head(self, request, urlopen):
self._test_get_head(request, urlopen, 'HEAD')
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
@ -1271,6 +1310,7 @@ class TestSimpleClient(unittest.TestCase):
request.return_value.get_type.return_value = "http"
mock_resp = mock.MagicMock()
mock_resp.read.return_value = ''
mock_resp.info.return_value = {}
urlopen.side_effect = [urllib2.URLError(''), mock_resp]
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1,
token='token')
@ -1282,13 +1322,14 @@ class TestSimpleClient(unittest.TestCase):
self.assertEqual(urlopen.call_count, 2)
request.assert_called_with('http://127.0.0.1?format=json', data=None,
headers={'X-Auth-Token': 'token'})
self.assertEqual([None, None], retval)
self.assertEqual([{}, None], retval)
self.assertEqual(sc.attempts, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_get_with_retries_param(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
mock_response.info.return_value = {}
mock_urlopen.side_effect = internal_client.httplib.BadStatusLine('')
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
@ -1314,7 +1355,7 @@ class TestSimpleClient(unittest.TestCase):
retval = c.retry_request('GET', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
self.assertEqual([None, None], retval)
self.assertEqual([{}, None], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_with_retries_with_HTTPError(self, mock_urlopen):
@ -1378,10 +1419,6 @@ class TestSimpleClient(unittest.TestCase):
proxy = '%s://%s' % (scheme, proxy_host)
url = 'https://127.0.0.1:1/a'
class FakeConn(object):
def read(self):
return 'irrelevant'
mocked = 'swift.common.internal_client.urllib2.urlopen'
# module level methods

Some files were not shown because too many files have changed in this diff Show More