Merge branch 'master' into feature/crypto
Change-Id: If3f11ec3755803e5a64a42cf3c94e0f76fc30fa9
This commit is contained in:
commit
942c9bb45c
|
@ -76,8 +76,9 @@ def report(success):
|
|||
return
|
||||
next_report = time() + 5
|
||||
eta, eta_unit = compute_eta(begun, created, need_to_create)
|
||||
print '\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' % (item_type,
|
||||
created, need_to_create, round(eta), eta_unit, retries_done),
|
||||
print ('\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries'
|
||||
% (item_type, created, need_to_create, round(eta), eta_unit,
|
||||
retries_done)),
|
||||
stdout.flush()
|
||||
|
||||
|
||||
|
@ -132,6 +133,9 @@ Usage: %%prog [options] [conf_file]
|
|||
retries = int(conf.get('retries', 5))
|
||||
concurrency = int(conf.get('concurrency', 25))
|
||||
endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
|
||||
user_domain_name = str(conf.get('user_domain_name', ''))
|
||||
project_domain_name = str(conf.get('project_domain_name', ''))
|
||||
project_name = str(conf.get('project_name', ''))
|
||||
insecure = options.insecure \
|
||||
or config_true_value(conf.get('keystone_api_insecure', 'no'))
|
||||
container_populate = config_true_value(
|
||||
|
@ -146,6 +150,12 @@ Usage: %%prog [options] [conf_file]
|
|||
retries_done = 0
|
||||
|
||||
os_options = {'endpoint_type': endpoint_type}
|
||||
if user_domain_name:
|
||||
os_options['user_domain_name'] = user_domain_name
|
||||
if project_domain_name:
|
||||
os_options['project_domain_name'] = project_domain_name
|
||||
if project_name:
|
||||
os_options['project_name'] = project_name
|
||||
|
||||
url, token = get_auth(conf['auth_url'], conf['auth_user'],
|
||||
conf['auth_key'],
|
||||
|
|
|
@ -126,7 +126,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
|||
if not json_output:
|
||||
print '\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' \
|
||||
'retries' % (containers_queried[0], containers_listed,
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
round(eta), eta_unit, retries_done[0]),
|
||||
stdout.flush()
|
||||
container_parts = {}
|
||||
for container in containers:
|
||||
|
@ -145,7 +145,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
|||
if not json_output:
|
||||
print '\r\x1B[KQueried %d containers for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % (containers_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
elapsed_unit, retries_done[0])
|
||||
if containers_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
containers_listed - distinct_partitions)
|
||||
|
@ -255,7 +255,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
|||
if not json_output:
|
||||
print '\r\x1B[KQueried %d objects for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % (objects_listed, round(elapsed),
|
||||
elapsed_unit, retries_done[0])
|
||||
elapsed_unit, retries_done[0])
|
||||
if objects_listed - distinct_partitions:
|
||||
print 'There were %d overlapping partitions' % (
|
||||
objects_listed - distinct_partitions)
|
||||
|
@ -363,6 +363,9 @@ Usage: %%prog [options] [conf_file]
|
|||
and not options.container_only
|
||||
if not (object_report or container_report):
|
||||
exit("Neither container or object report is set to run")
|
||||
user_domain_name = str(conf.get('user_domain_name', ''))
|
||||
project_domain_name = str(conf.get('project_domain_name', ''))
|
||||
project_name = str(conf.get('project_name', ''))
|
||||
insecure = options.insecure \
|
||||
or config_true_value(conf.get('keystone_api_insecure', 'no'))
|
||||
if options.debug:
|
||||
|
@ -371,6 +374,12 @@ Usage: %%prog [options] [conf_file]
|
|||
coropool = GreenPool(size=concurrency)
|
||||
|
||||
os_options = {'endpoint_type': endpoint_type}
|
||||
if user_domain_name:
|
||||
os_options['user_domain_name'] = user_domain_name
|
||||
if project_domain_name:
|
||||
os_options['project_domain_name'] = project_domain_name
|
||||
if project_name:
|
||||
os_options['project_name'] = project_name
|
||||
|
||||
url, token = get_auth(conf['auth_url'], conf['auth_user'],
|
||||
conf['auth_key'],
|
||||
|
|
|
@ -188,12 +188,6 @@ Number of replication workers to spawn. The default is 8.
|
|||
Time in seconds to wait between replication passes. The default is 30.
|
||||
.IP \fBinterval\fR
|
||||
Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30.
|
||||
.IP \fBerror_suppression_interval\fR
|
||||
How long without an error before a node's error count is reset. This will also be how long before a node is re-enabled after suppression is triggered.
|
||||
The default is 60 seconds.
|
||||
.IP \fBerror_suppression_limit\fR
|
||||
How many errors can accumulate before a node is temporarily ignored. The default
|
||||
is 10 seconds.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
|
|
|
@ -43,7 +43,13 @@ Authentication system URL
|
|||
.IP "\fBauth_user\fR"
|
||||
Authentication system account/user name
|
||||
.IP "\fBauth_key\fR"
|
||||
Authentication system account/user password
|
||||
Authentication system account/user password
|
||||
.IP "\fBproject_name\fR"
|
||||
Project name in case of keystone auth version 3
|
||||
.IP "\fBproject_domain_name\fR"
|
||||
Project domain name in case of keystone auth version 3
|
||||
.IP "\fBuser_domain_name\fR"
|
||||
User domain name in case of keystone auth version 3
|
||||
.IP "\fBswift_dir\fR"
|
||||
Location of openstack-swift configuration and ring files
|
||||
.IP "\fBdispersion_coverage\fR"
|
||||
|
@ -70,6 +76,9 @@ Whether to run the object report. The default is yes.
|
|||
.IP "auth_key = dpstats"
|
||||
.IP "swift_dir = /etc/swift"
|
||||
.IP "# keystone_api_insecure = no"
|
||||
.IP "# project_name = dpstats"
|
||||
.IP "# project_domain_name = default"
|
||||
.IP "# user_domain_name = default"
|
||||
.IP "# dispersion_coverage = 1.0"
|
||||
.IP "# retries = 5"
|
||||
.IP "# concurrency = 25"
|
||||
|
|
|
@ -85,6 +85,9 @@ Example \fI/etc/swift/dispersion.conf\fR:
|
|||
.IP "auth_user = dpstats:dpstats"
|
||||
.IP "auth_key = dpstats"
|
||||
.IP "swift_dir = /etc/swift"
|
||||
.IP "# project_name = dpstats"
|
||||
.IP "# project_domain_name = default"
|
||||
.IP "# user_domain_name = default"
|
||||
.IP "# dispersion_coverage = 1.0"
|
||||
.IP "# retries = 5"
|
||||
.IP "# concurrency = 25"
|
||||
|
|
|
@ -101,6 +101,9 @@ Example \fI/etc/swift/dispersion.conf\fR:
|
|||
.IP "auth_user = dpstats:dpstats"
|
||||
.IP "auth_key = dpstats"
|
||||
.IP "swift_dir = /etc/swift"
|
||||
.IP "# project_name = dpstats"
|
||||
.IP "# project_domain_name = default"
|
||||
.IP "# user_domain_name = default"
|
||||
.IP "# dispersion_coverage = 1.0"
|
||||
.IP "# retries = 5"
|
||||
.IP "# concurrency = 25"
|
||||
|
|
|
@ -270,7 +270,8 @@ configuration file, /etc/swift/dispersion.conf. Example conf file::
|
|||
|
||||
There are also options for the conf file for specifying the dispersion coverage
|
||||
(defaults to 1%), retries, concurrency, etc. though usually the defaults are
|
||||
fine.
|
||||
fine. If you want to use keystone v3 for authentication there are options like
|
||||
auth_version, user_domain_name, project_domain_name and project_name.
|
||||
|
||||
Once the configuration is in place, run `swift-dispersion-populate` to populate
|
||||
the containers and objects throughout the cluster.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
CORS
|
||||
====
|
||||
|
||||
CORS_ is a mechanisim to allow code running in a browser (Javascript for
|
||||
CORS_ is a mechanism to allow code running in a browser (Javascript for
|
||||
example) make requests to a domain other then the one from where it originated.
|
||||
|
||||
Swift supports CORS requests to containers and objects.
|
||||
|
|
|
@ -340,7 +340,7 @@ paste.deploy works (at least at the time of this writing.)
|
|||
|
||||
`name3` got the local value from the `app:myapp` subsection because it is using
|
||||
the special paste.deploy syntax of ``set option_name = value``. So, if you want
|
||||
a default value for most app/filters but want to overridde it in one
|
||||
a default value for most app/filters but want to override it in one
|
||||
subsection, this is how you do it.
|
||||
|
||||
`name4` got the global value from `DEFAULT` since it's only in that section
|
||||
|
|
|
@ -42,7 +42,7 @@ To execute the unit tests:
|
|||
|
||||
Remarks:
|
||||
If you installed using: `cd ~/swift; sudo python setup.py develop`,
|
||||
you may need to do: `cd ~/swift; sudo chown -R swift:swift swift.egg-info`
|
||||
you may need to do: `cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`
|
||||
prior to running tox.
|
||||
|
||||
* Optionally, run only specific tox builds:
|
||||
|
|
|
@ -4,7 +4,7 @@ Pluggable On-Disk Back-end APIs
|
|||
|
||||
The internal REST API used between the proxy server and the account, container
|
||||
and object server is almost identical to public Swift REST API, but with a few
|
||||
internal extentsions (for example, update an account with a new container).
|
||||
internal extensions (for example, update an account with a new container).
|
||||
|
||||
The pluggable back-end APIs for the three REST API servers (account,
|
||||
container, object) abstracts the needs for servicing the various REST APIs
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
===========================
|
||||
First Contribution to Swift
|
||||
===========================
|
||||
|
||||
-------------
|
||||
Getting Swift
|
||||
-------------
|
||||
|
||||
Swift's source code is hosted on github and managed with git. The current
|
||||
trunk can be checked out like this:
|
||||
|
||||
``git clone https://github.com/openstack/swift.git``
|
||||
|
||||
This will clone the Swift repository under your account.
|
||||
|
||||
A source tarball for the latest release of Swift is available on the
|
||||
`launchpad project page <https://launchpad.net/swift>`_.
|
||||
|
||||
Prebuilt packages for Ubuntu and RHEL variants are available.
|
||||
|
||||
* `Swift Ubuntu Packages <https://launchpad.net/ubuntu/+source/swift>`_
|
||||
* `Swift RDO Packages <https://www.rdoproject.org/Repositories>`_
|
||||
|
||||
--------------------
|
||||
Source Control Setup
|
||||
--------------------
|
||||
|
||||
Swift uses `git` for source control. The OpenStack
|
||||
`Developer's Guide <http://docs.openstack.org/infra/manual/developers.html>`_
|
||||
describes the steps for setting up Git and all the necessary accounts for
|
||||
contributing code to Swift.
|
||||
|
||||
----------------
|
||||
Changes to Swift
|
||||
----------------
|
||||
|
||||
Once you have the source code and source control set up, you can make your
|
||||
changes to Swift.
|
||||
|
||||
-------
|
||||
Testing
|
||||
-------
|
||||
|
||||
The `Development Guidelines <development_guidelines>`_ describes the testing
|
||||
requirements before submitting Swift code.
|
||||
|
||||
In summary, you can execute tox from the swift home directory (where you
|
||||
checked out the source code):
|
||||
|
||||
``tox``
|
||||
|
||||
Tox will present tests results. Notice that in the beginning, it is very common
|
||||
to break many coding style guidelines.
|
||||
|
||||
--------------------------
|
||||
Proposing changes to Swift
|
||||
--------------------------
|
||||
|
||||
The OpenStack
|
||||
`Developer's Guide <http://docs.openstack.org/infra/manual/developers.html>`_
|
||||
describes the most common `git` commands that you will need.
|
||||
|
||||
Following is a list of the commands that you need to know for your first
|
||||
contribution to Swift:
|
||||
|
||||
To clone a copy of Swift:
|
||||
|
||||
``git clone https://github.com/openstack/swift.git``
|
||||
|
||||
Under the swift directory, set up the Gerrit repository. The following command
|
||||
configures the repository to know about Gerrit and makes the Change-Id commit
|
||||
hook get installed. You only need to do this once:
|
||||
|
||||
``git review -s``
|
||||
|
||||
To create your development branch (substitute branch_name for a name of your
|
||||
choice:
|
||||
|
||||
``git checkout -b <branch_name>``
|
||||
|
||||
To check the files that have been updated in your branch:
|
||||
|
||||
``git status``
|
||||
|
||||
To check the differences between your branch and the repository:
|
||||
|
||||
``git diff``
|
||||
|
||||
Assuming you have not added new files, you commit all your changes using:
|
||||
|
||||
``git commit -a``
|
||||
|
||||
Read the `Summary of Git commit message structure <https://wiki.openstack.org/wiki/GitCommitMessages?%22Summary%20of%20Git%20commit%20message%20structure%22#Summary_of_Git_commit_message_structure>`_
|
||||
for best practices on writing the commit message. When you are ready to send
|
||||
your changes for review use:
|
||||
|
||||
``git review``
|
||||
|
||||
If successful, Git response message will contain a URL you can use to track your
|
||||
changes.
|
||||
|
||||
If you need to make further changes to the same review, you can commit them
|
||||
using:
|
||||
|
||||
``git commit -a --amend``
|
||||
|
||||
This will commit the changes under the same set of changes you issued earlier.
|
||||
Notice that in order to send your latest version for review, you will still
|
||||
need to call:
|
||||
|
||||
``git review``
|
||||
|
||||
---------------------
|
||||
Tracking your changes
|
||||
---------------------
|
||||
|
||||
After you proposed your changes to Swift, you can track the review in:
|
||||
|
||||
* `<https://review.openstack.org>`_
|
||||
|
||||
---------------
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
You may run into the following error when starting Swift if you rebase
|
||||
your commit using:
|
||||
|
||||
``git rebase``
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Traceback (most recent call last):
|
||||
File "/usr/local/bin/swift-init", line 5, in <module>
|
||||
from pkg_resources import require
|
||||
File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 2749, in <module>
|
||||
working_set = WorkingSet._build_master()
|
||||
File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 446, in _build_master
|
||||
return cls._build_from_requirements(__requires__)
|
||||
File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 459, in _build_from_requirements
|
||||
dists = ws.resolve(reqs, Environment())
|
||||
File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 628, in resolve
|
||||
raise DistributionNotFound(req)
|
||||
pkg_resources.DistributionNotFound: swift==2.3.1.devXXX
|
||||
(where XXX represents a dev version of Swift).
|
||||
|
||||
This happens because `git rebase` will retrieve code for a different version of
|
||||
Swift in the development stream, but the start scripts under `/usr/local/bin` have
|
||||
not been updated. The solution is to execute the following command under the swift
|
||||
directory (which contains `setup.py`):
|
||||
|
||||
``sudo python setup.py develop``
|
||||
|
|
@ -18,23 +18,6 @@ Swift is written in Python and has these dependencies:
|
|||
|
||||
There is no current support for Python 3.
|
||||
|
||||
-------------
|
||||
Getting Swift
|
||||
-------------
|
||||
|
||||
Swift's source code is hosted on github and managed with git. The current
|
||||
trunk can be checked out like this:
|
||||
|
||||
``git clone https://github.com/openstack/swift.git``
|
||||
|
||||
A source tarball for the latest release of Swift is available on the
|
||||
`launchpad project page <https://launchpad.net/swift>`_.
|
||||
|
||||
Prebuilt packages for Ubuntu and RHEL variants are available.
|
||||
|
||||
* `Swift Ubuntu Packages <https://launchpad.net/ubuntu/+source/swift>`_
|
||||
* `Swift RDO Packages <https://openstack.redhat.com/Repositories>`_
|
||||
|
||||
-----------
|
||||
Development
|
||||
-----------
|
||||
|
@ -42,9 +25,9 @@ Development
|
|||
To get started with development with Swift, or to just play around, the
|
||||
following docs will be useful:
|
||||
|
||||
* :doc:`Swift All in One <development_saio>` - Set up a VM with Swift
|
||||
installed
|
||||
* :doc:`Swift All in One <development_saio>` - Set up a VM with Swift installed
|
||||
* :doc:`Development Guidelines <development_guidelines>`
|
||||
* :doc:`First Contribution to Swift <first_contribution_swift>`
|
||||
* :doc:`Associated Projects <associated_projects>`
|
||||
|
||||
--------------------------
|
||||
|
|
|
@ -68,6 +68,7 @@ Developer Documentation
|
|||
|
||||
development_guidelines
|
||||
development_saio
|
||||
first_contribution_swift
|
||||
policies_saio
|
||||
development_auth
|
||||
development_middleware
|
||||
|
|
|
@ -59,7 +59,7 @@ client_etag The etag header value given by the client.
|
|||
transaction_id The transaction id of the request.
|
||||
headers The headers given in the request.
|
||||
request_time The duration of the request.
|
||||
source The "source" of the reuqest. This may be set for requests
|
||||
source The "source" of the request. This may be set for requests
|
||||
that are generated in order to fulfill client requests,
|
||||
e.g. bulk uploads.
|
||||
log_info Various info that may be useful for diagnostics, e.g. the
|
||||
|
|
|
@ -13,7 +13,7 @@ architecture. For each request, it will look up the location of the account,
|
|||
container, or object in the ring (see below) and route the request accordingly.
|
||||
For Erasure Code type policies, the Proxy Server is also responsible for
|
||||
encoding and decoding object data. See :doc:`overview_erasure_code` for
|
||||
complete information on Erasure Code suport. The public API is also exposed
|
||||
complete information on Erasure Code support. The public API is also exposed
|
||||
through the Proxy Server.
|
||||
|
||||
A large number of failures are also handled in the Proxy Server. For
|
||||
|
|
|
@ -425,7 +425,7 @@ The basic flow looks like this:
|
|||
* The proxy waits for a minimal number of two object servers to respond with a
|
||||
success (2xx) status before responding to the client with a successful
|
||||
status. In this particular case it was decided that two responses was
|
||||
the mininum amount to know that the file would be propagated in case of
|
||||
the minimum amount to know that the file would be propagated in case of
|
||||
failure from other others and because a greater number would potentially
|
||||
mean more latency, which should be avoided if possible.
|
||||
|
||||
|
|
|
@ -100,13 +100,6 @@ use = egg:swift#recon
|
|||
# run_pause is deprecated, use interval instead
|
||||
# run_pause = 30
|
||||
#
|
||||
# How long without an error before a node's error count is reset. This will
|
||||
# also be how long before a node is reenabled after suppression is triggered.
|
||||
# error_suppression_interval = 60
|
||||
#
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
#
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
#
|
||||
|
|
|
@ -13,6 +13,16 @@ auth_key = testing
|
|||
# auth_key = password
|
||||
# auth_version = 2.0
|
||||
#
|
||||
# NOTE: If you want to use keystone (auth version 3.0), then its configuration
|
||||
# would look something like:
|
||||
# auth_url = http://localhost:5000/v3/
|
||||
# auth_user = user
|
||||
# auth_key = password
|
||||
# auth_version = 3.0
|
||||
# project_name = project
|
||||
# project_domain_name = project_domain
|
||||
# user_domain_name = user_domain
|
||||
#
|
||||
# endpoint_type = publicURL
|
||||
# keystone_api_insecure = no
|
||||
#
|
||||
|
|
|
@ -122,11 +122,10 @@ class AccountAuditor(Daemon):
|
|||
continue
|
||||
raise InvalidAccountInfo(_(
|
||||
'The total %(key)s for the container (%(total)s) does not '
|
||||
'match the sum of %(key)s across policies (%(sum)s)') % {
|
||||
'key': key,
|
||||
'total': info[key],
|
||||
'sum': policy_totals[key],
|
||||
})
|
||||
'match the sum of %(key)s across policies (%(sum)s)')
|
||||
% {'key': key,
|
||||
'total': info[key],
|
||||
'sum': policy_totals[key]})
|
||||
|
||||
def account_audit(self, path):
|
||||
"""
|
||||
|
|
|
@ -18,7 +18,7 @@ Pluggable Back-end for Account Server
|
|||
|
||||
from uuid import uuid4
|
||||
import time
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
|
||||
import sqlite3
|
||||
|
||||
|
|
|
@ -365,7 +365,8 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
|
|||
datadir = DATADIR_BASE
|
||||
|
||||
# try to extract policy index from datafile disk path
|
||||
policy_index = int(extract_policy(datafile) or POLICIES.legacy)
|
||||
fullpath = os.path.abspath(datafile)
|
||||
policy_index = int(extract_policy(fullpath) or POLICIES.legacy)
|
||||
|
||||
try:
|
||||
if policy_index:
|
||||
|
|
|
@ -96,26 +96,30 @@ ARG_PARSER.add_argument(
|
|||
help="Path to the scenario file")
|
||||
|
||||
|
||||
class ParseCommandError(ValueError):
|
||||
|
||||
def __init__(self, name, round_index, command_index, msg):
|
||||
msg = "Invalid %s (round %s, command %s): %s" % (
|
||||
name, round_index, command_index, msg)
|
||||
super(ParseCommandError, self).__init__(msg)
|
||||
|
||||
|
||||
def _parse_weight(round_index, command_index, weight_str):
|
||||
try:
|
||||
weight = float(weight_str)
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
"Invalid weight %r (round %d, command %d): %s"
|
||||
% (weight_str, round_index, command_index, err))
|
||||
raise ParseCommandError('weight', round_index, command_index, err)
|
||||
if weight < 0:
|
||||
raise ValueError(
|
||||
"Negative weight (round %d, command %d)"
|
||||
% (round_index, command_index))
|
||||
raise ParseCommandError('weight', round_index, command_index,
|
||||
'cannot be negative')
|
||||
return weight
|
||||
|
||||
|
||||
def _parse_add_command(round_index, command_index, command):
|
||||
if len(command) != 3:
|
||||
raise ValueError(
|
||||
"Invalid add command (round %d, command %d): expected array of "
|
||||
"length 3, but got %d"
|
||||
% (round_index, command_index, len(command)))
|
||||
raise ParseCommandError(
|
||||
'add command', round_index, command_index,
|
||||
'expected array of length 3, but got %r' % command)
|
||||
|
||||
dev_str = command[1]
|
||||
weight_str = command[2]
|
||||
|
@ -123,43 +127,47 @@ def _parse_add_command(round_index, command_index, command):
|
|||
try:
|
||||
dev = parse_add_value(dev_str)
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
"Invalid device specifier '%s' in add (round %d, command %d): %s"
|
||||
% (dev_str, round_index, command_index, err))
|
||||
raise ParseCommandError('device specifier', round_index,
|
||||
command_index, err)
|
||||
|
||||
dev['weight'] = _parse_weight(round_index, command_index, weight_str)
|
||||
|
||||
if dev['region'] is None:
|
||||
dev['region'] = 1
|
||||
|
||||
default_key_map = {
|
||||
'replication_ip': 'ip',
|
||||
'replication_port': 'port',
|
||||
}
|
||||
for empty_key, default_key in default_key_map.items():
|
||||
if dev[empty_key] is None:
|
||||
dev[empty_key] = dev[default_key]
|
||||
|
||||
return ['add', dev]
|
||||
|
||||
|
||||
def _parse_remove_command(round_index, command_index, command):
|
||||
if len(command) != 2:
|
||||
raise ValueError(
|
||||
"Invalid remove command (round %d, command %d): expected array of "
|
||||
"length 2, but got %d"
|
||||
% (round_index, command_index, len(command)))
|
||||
raise ParseCommandError('remove commnd', round_index, command_index,
|
||||
"expected array of length 2, but got %r" %
|
||||
(command,))
|
||||
|
||||
dev_str = command[1]
|
||||
|
||||
try:
|
||||
dev_id = int(dev_str)
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
"Invalid device ID '%s' in remove (round %d, command %d): %s"
|
||||
% (dev_str, round_index, command_index, err))
|
||||
raise ParseCommandError('device ID in remove',
|
||||
round_index, command_index, err)
|
||||
|
||||
return ['remove', dev_id]
|
||||
|
||||
|
||||
def _parse_set_weight_command(round_index, command_index, command):
|
||||
if len(command) != 3:
|
||||
raise ValueError(
|
||||
"Invalid remove command (round %d, command %d): expected array of "
|
||||
"length 3, but got %d"
|
||||
% (round_index, command_index, len(command)))
|
||||
raise ParseCommandError('remove command', round_index, command_index,
|
||||
"expected array of length 3, but got %r" %
|
||||
(command,))
|
||||
|
||||
dev_str = command[1]
|
||||
weight_str = command[2]
|
||||
|
@ -167,14 +175,21 @@ def _parse_set_weight_command(round_index, command_index, command):
|
|||
try:
|
||||
dev_id = int(dev_str)
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
"Invalid device ID '%s' in set_weight (round %d, command %d): %s"
|
||||
% (dev_str, round_index, command_index, err))
|
||||
raise ParseCommandError('device ID in set_weight',
|
||||
round_index, command_index, err)
|
||||
|
||||
weight = _parse_weight(round_index, command_index, weight_str)
|
||||
return ['set_weight', dev_id, weight]
|
||||
|
||||
|
||||
def _parse_save_command(round_index, command_index, command):
|
||||
if len(command) != 2:
|
||||
raise ParseCommandError(
|
||||
command, round_index, command_index,
|
||||
"expected array of length 2 but got %r" % (command,))
|
||||
return ['save', command[1]]
|
||||
|
||||
|
||||
def parse_scenario(scenario_data):
|
||||
"""
|
||||
Takes a serialized scenario and turns it into a data structure suitable
|
||||
|
@ -236,9 +251,12 @@ def parse_scenario(scenario_data):
|
|||
if not isinstance(raw_scenario['rounds'], list):
|
||||
raise ValueError("rounds must be an array")
|
||||
|
||||
parser_for_command = {'add': _parse_add_command,
|
||||
'remove': _parse_remove_command,
|
||||
'set_weight': _parse_set_weight_command}
|
||||
parser_for_command = {
|
||||
'add': _parse_add_command,
|
||||
'remove': _parse_remove_command,
|
||||
'set_weight': _parse_set_weight_command,
|
||||
'save': _parse_save_command,
|
||||
}
|
||||
|
||||
parsed_scenario['rounds'] = []
|
||||
for round_index, raw_round in enumerate(raw_scenario['rounds']):
|
||||
|
@ -268,18 +286,24 @@ def run_scenario(scenario):
|
|||
|
||||
rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1)
|
||||
rb.set_overload(scenario['overload'])
|
||||
|
||||
command_map = {
|
||||
'add': rb.add_dev,
|
||||
'remove': rb.remove_dev,
|
||||
'set_weight': rb.set_dev_weight,
|
||||
'save': rb.save,
|
||||
}
|
||||
|
||||
for round_index, commands in enumerate(scenario['rounds']):
|
||||
print "Round %d" % (round_index + 1)
|
||||
|
||||
for command in commands:
|
||||
if command[0] == 'add':
|
||||
rb.add_dev(command[1])
|
||||
elif command[0] == 'remove':
|
||||
rb.remove_dev(command[1])
|
||||
elif command[0] == 'set_weight':
|
||||
rb.set_dev_weight(command[1], command[2])
|
||||
else:
|
||||
raise ValueError("unknown command %r" % (command[0],))
|
||||
key = command.pop(0)
|
||||
try:
|
||||
command_f = command_map[key]
|
||||
except KeyError:
|
||||
raise ValueError("unknown command %r" % key)
|
||||
command_f(*command)
|
||||
|
||||
rebalance_number = 1
|
||||
parts_moved, old_balance = rb.rebalance(seed=seed)
|
||||
|
|
|
@ -930,13 +930,19 @@ swift-ring-builder <builder_file> dispersion <search_filter> [options]
|
|||
for tier_name, dispersion in report['graph']:
|
||||
replica_counts_repr = replica_counts_tmpl % tuple(
|
||||
dispersion['replicas'])
|
||||
print('%-' + str(tier_width) + 's ' + part_count_width +
|
||||
' %6.02f %6d %s') % (tier_name,
|
||||
dispersion['placed_parts'],
|
||||
dispersion['dispersion'],
|
||||
dispersion['max_replicas'],
|
||||
replica_counts_repr,
|
||||
)
|
||||
template = ''.join([
|
||||
'%-', str(tier_width), 's ',
|
||||
part_count_width,
|
||||
' %6.02f %6d %s',
|
||||
])
|
||||
args = (
|
||||
tier_name,
|
||||
dispersion['placed_parts'],
|
||||
dispersion['dispersion'],
|
||||
dispersion['max_replicas'],
|
||||
replica_counts_repr,
|
||||
)
|
||||
print(template % args)
|
||||
exit(status)
|
||||
|
||||
def validate():
|
||||
|
|
|
@ -17,7 +17,8 @@ import os
|
|||
import urllib
|
||||
import time
|
||||
from urllib import unquote
|
||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from swift.common import utils, exceptions
|
||||
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
|
||||
|
|
|
@ -13,13 +13,14 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ConfigParser
|
||||
import errno
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import time
|
||||
|
||||
from six.moves import configparser
|
||||
|
||||
from swift import gettext_ as _
|
||||
from swift.common.utils import get_valid_utf8_str
|
||||
|
||||
|
@ -61,9 +62,9 @@ class ContainerSyncRealms(object):
|
|||
if mtime != self.conf_path_mtime:
|
||||
self.conf_path_mtime = mtime
|
||||
try:
|
||||
conf = ConfigParser.SafeConfigParser()
|
||||
conf = configparser.SafeConfigParser()
|
||||
conf.read(self.conf_path)
|
||||
except ConfigParser.ParsingError as err:
|
||||
except configparser.ParsingError as err:
|
||||
self.logger.error(
|
||||
_('Could not load %r: %s'), self.conf_path, err)
|
||||
else:
|
||||
|
@ -72,11 +73,11 @@ class ContainerSyncRealms(object):
|
|||
'DEFAULT', 'mtime_check_interval')
|
||||
self.next_mtime_check = \
|
||||
now + self.mtime_check_interval
|
||||
except ConfigParser.NoOptionError:
|
||||
except configparser.NoOptionError:
|
||||
self.mtime_check_interval = 300
|
||||
self.next_mtime_check = \
|
||||
now + self.mtime_check_interval
|
||||
except (ConfigParser.ParsingError, ValueError) as err:
|
||||
except (configparser.ParsingError, ValueError) as err:
|
||||
self.logger.error(
|
||||
_('Error in %r with mtime_check_interval: %s'),
|
||||
self.conf_path, err)
|
||||
|
|
|
@ -23,7 +23,7 @@ from uuid import uuid4
|
|||
import sys
|
||||
import time
|
||||
import errno
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
from swift import gettext_ as _
|
||||
from tempfile import mkstemp
|
||||
|
||||
|
|
|
@ -20,10 +20,10 @@ through the proxy.
|
|||
|
||||
import os
|
||||
import socket
|
||||
from httplib import HTTPException
|
||||
from time import time
|
||||
|
||||
from eventlet import sleep, Timeout
|
||||
from six.moves.http_client import HTTPException
|
||||
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.exceptions import ClientException
|
||||
|
@ -401,7 +401,7 @@ def direct_put_object(node, part, account, container, name, contents,
|
|||
headers['Content-Length'] = '0'
|
||||
if isinstance(contents, basestring):
|
||||
contents = [contents]
|
||||
#Incase the caller want to insert an object with specific age
|
||||
# Incase the caller want to insert an object with specific age
|
||||
add_ts = 'X-Timestamp' not in headers
|
||||
|
||||
if content_length is None:
|
||||
|
@ -543,8 +543,8 @@ def retry(func, *args, **kwargs):
|
|||
# Shouldn't actually get down here, but just in case.
|
||||
if args and 'ip' in args[0]:
|
||||
raise ClientException('Raise too many retries',
|
||||
http_host=args[
|
||||
0]['ip'], http_port=args[0]['port'],
|
||||
http_host=args[0]['ip'],
|
||||
http_port=args[0]['port'],
|
||||
http_device=args[0]['device'])
|
||||
else:
|
||||
raise ClientException('Raise too many retries')
|
||||
|
|
|
@ -67,7 +67,7 @@ def is_server_error(status):
|
|||
# List of HTTP status codes
|
||||
|
||||
###############################################################################
|
||||
## 1xx Informational
|
||||
# 1xx Informational
|
||||
###############################################################################
|
||||
|
||||
HTTP_CONTINUE = 100
|
||||
|
@ -77,7 +77,7 @@ HTTP_CHECKPOINT = 103
|
|||
HTTP_REQUEST_URI_TOO_LONG = 122
|
||||
|
||||
###############################################################################
|
||||
## 2xx Success
|
||||
# 2xx Success
|
||||
###############################################################################
|
||||
|
||||
HTTP_OK = 200
|
||||
|
@ -91,7 +91,7 @@ HTTP_MULTI_STATUS = 207 # WebDAV
|
|||
HTTP_IM_USED = 226
|
||||
|
||||
###############################################################################
|
||||
## 3xx Redirection
|
||||
# 3xx Redirection
|
||||
###############################################################################
|
||||
|
||||
HTTP_MULTIPLE_CHOICES = 300
|
||||
|
@ -105,7 +105,7 @@ HTTP_TEMPORARY_REDIRECT = 307
|
|||
HTTP_RESUME_INCOMPLETE = 308
|
||||
|
||||
###############################################################################
|
||||
## 4xx Client Error
|
||||
# 4xx Client Error
|
||||
###############################################################################
|
||||
|
||||
HTTP_BAD_REQUEST = 400
|
||||
|
@ -141,7 +141,7 @@ HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450
|
|||
HTTP_CLIENT_CLOSED_REQUEST = 499
|
||||
|
||||
###############################################################################
|
||||
## 5xx Server Error
|
||||
# 5xx Server Error
|
||||
###############################################################################
|
||||
|
||||
HTTP_INTERNAL_SERVER_ERROR = 500
|
||||
|
|
|
@ -704,7 +704,7 @@ class Server(object):
|
|||
pid = self.spawn(conf_file, **kwargs)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
#TODO(clayg): should I check if self.cmd exists earlier?
|
||||
# TODO(clayg): should I check if self.cmd exists earlier?
|
||||
print(_("%s does not exist") % self.cmd)
|
||||
break
|
||||
else:
|
||||
|
|
|
@ -44,7 +44,7 @@ version is at:
|
|||
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
|
||||
"""
|
||||
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import logging
|
||||
import time
|
||||
from bisect import bisect
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from hashlib import md5
|
||||
from swift.common import constraints
|
||||
from swift.common.exceptions import ListingIterError, SegmentError
|
||||
|
|
|
@ -410,7 +410,7 @@ class KeystoneAuth(object):
|
|||
user_id, user_name = env_identity['user']
|
||||
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
|
||||
|
||||
#allow OPTIONS requests to proceed as normal
|
||||
# allow OPTIONS requests to proceed as normal
|
||||
if req.method == 'OPTIONS':
|
||||
return
|
||||
|
||||
|
@ -526,7 +526,7 @@ class KeystoneAuth(object):
|
|||
except ValueError:
|
||||
return HTTPNotFound(request=req)
|
||||
|
||||
#allow OPTIONS requests to proceed as normal
|
||||
# allow OPTIONS requests to proceed as normal
|
||||
if req.method == 'OPTIONS':
|
||||
return
|
||||
|
||||
|
|
|
@ -14,7 +14,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
|
||||
|
||||
from swift.common.memcached import (MemcacheRing, CONN_TIMEOUT, POOL_TIMEOUT,
|
||||
IO_TIMEOUT, TRY_COUNT)
|
||||
|
|
|
@ -340,7 +340,7 @@ class ReconMiddleware(object):
|
|||
elif rcheck == 'replication' and rtype in all_rtypes:
|
||||
content = self.get_replication_info(rtype)
|
||||
elif rcheck == 'replication' and rtype is None:
|
||||
#handle old style object replication requests
|
||||
# handle old style object replication requests
|
||||
content = self.get_replication_info('object')
|
||||
elif rcheck == "devices":
|
||||
content = self.get_device_info()
|
||||
|
|
|
@ -531,7 +531,7 @@ class TempAuth(object):
|
|||
return None
|
||||
|
||||
if req.method == 'OPTIONS':
|
||||
#allow OPTIONS requests to proceed as normal
|
||||
# allow OPTIONS requests to proceed as normal
|
||||
self.logger.debug("Allow OPTIONS request.")
|
||||
return None
|
||||
|
||||
|
@ -674,15 +674,15 @@ class TempAuth(object):
|
|||
user = req.headers.get('x-auth-user')
|
||||
if not user or ':' not in user:
|
||||
self.logger.increment('token_denied')
|
||||
return HTTPUnauthorized(request=req, headers=
|
||||
{'Www-Authenticate':
|
||||
'Swift realm="%s"' % account})
|
||||
auth = 'Swift realm="%s"' % account
|
||||
return HTTPUnauthorized(request=req,
|
||||
headers={'Www-Authenticate': auth})
|
||||
account2, user = user.split(':', 1)
|
||||
if account != account2:
|
||||
self.logger.increment('token_denied')
|
||||
return HTTPUnauthorized(request=req, headers=
|
||||
{'Www-Authenticate':
|
||||
'Swift realm="%s"' % account})
|
||||
auth = 'Swift realm="%s"' % account
|
||||
return HTTPUnauthorized(request=req,
|
||||
headers={'Www-Authenticate': auth})
|
||||
key = req.headers.get('x-storage-pass')
|
||||
if not key:
|
||||
key = req.headers.get('x-auth-key')
|
||||
|
@ -692,9 +692,9 @@ class TempAuth(object):
|
|||
user = req.headers.get('x-storage-user')
|
||||
if not user or ':' not in user:
|
||||
self.logger.increment('token_denied')
|
||||
return HTTPUnauthorized(request=req, headers=
|
||||
{'Www-Authenticate':
|
||||
'Swift realm="unknown"'})
|
||||
auth = 'Swift realm="unknown"'
|
||||
return HTTPUnauthorized(request=req,
|
||||
headers={'Www-Authenticate': auth})
|
||||
account, user = user.split(':', 1)
|
||||
key = req.headers.get('x-auth-key')
|
||||
if not key:
|
||||
|
@ -711,14 +711,14 @@ class TempAuth(object):
|
|||
account_user = account + ':' + user
|
||||
if account_user not in self.users:
|
||||
self.logger.increment('token_denied')
|
||||
return HTTPUnauthorized(request=req, headers=
|
||||
{'Www-Authenticate':
|
||||
'Swift realm="%s"' % account})
|
||||
auth = 'Swift realm="%s"' % account
|
||||
return HTTPUnauthorized(request=req,
|
||||
headers={'Www-Authenticate': auth})
|
||||
if self.users[account_user]['key'] != key:
|
||||
self.logger.increment('token_denied')
|
||||
return HTTPUnauthorized(request=req, headers=
|
||||
{'Www-Authenticate':
|
||||
'Swift realm="unknown"'})
|
||||
auth = 'Swift realm="unknown"'
|
||||
return HTTPUnauthorized(request=req,
|
||||
headers={'Www-Authenticate': auth})
|
||||
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
|
||||
# Get memcache client
|
||||
memcache_client = cache_from_env(req.environ)
|
||||
|
|
|
@ -423,7 +423,7 @@ class HTMLViewer(object):
|
|||
plt.yticks(y_pos, nfls)
|
||||
plt.xlabel(names[metric_selected])
|
||||
plt.title('Profile Statistics (by %s)' % names[metric_selected])
|
||||
#plt.gcf().tight_layout(pad=1.2)
|
||||
# plt.gcf().tight_layout(pad=1.2)
|
||||
with tempfile.TemporaryFile() as profile_img:
|
||||
plt.savefig(profile_img, format='png', dpi=300)
|
||||
profile_img.seek(0)
|
||||
|
|
|
@ -20,7 +20,7 @@ import itertools
|
|||
import logging
|
||||
import math
|
||||
import random
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
from copy import deepcopy
|
||||
|
||||
from array import array
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import array
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import inspect
|
||||
from collections import defaultdict
|
||||
from gzip import GzipFile
|
||||
|
|
|
@ -11,12 +11,14 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
import os
|
||||
import string
|
||||
import textwrap
|
||||
import six
|
||||
|
||||
from six.moves.configparser import ConfigParser
|
||||
|
||||
from swift.common.utils import (
|
||||
config_true_value, SWIFT_CONF_FILE, whataremyips)
|
||||
from swift.common.ring import Ring, RingData
|
||||
|
|
|
@ -131,7 +131,7 @@ class _UTC(tzinfo):
|
|||
UTC = _UTC()
|
||||
|
||||
|
||||
class WsgiStringIO(BytesIO):
|
||||
class WsgiBytesIO(BytesIO):
|
||||
"""
|
||||
This class adds support for the additional wsgi.input methods defined on
|
||||
eventlet.wsgi.Input to the BytesIO class which would otherwise be a fine
|
||||
|
@ -760,16 +760,16 @@ def _req_environ_property(environ_field):
|
|||
def _req_body_property():
|
||||
"""
|
||||
Set and retrieve the Request.body parameter. It consumes wsgi.input and
|
||||
returns the results. On assignment, uses a WsgiStringIO to create a new
|
||||
returns the results. On assignment, uses a WsgiBytesIO to create a new
|
||||
wsgi.input.
|
||||
"""
|
||||
def getter(self):
|
||||
body = self.environ['wsgi.input'].read()
|
||||
self.environ['wsgi.input'] = WsgiStringIO(body)
|
||||
self.environ['wsgi.input'] = WsgiBytesIO(body)
|
||||
return body
|
||||
|
||||
def setter(self, value):
|
||||
self.environ['wsgi.input'] = WsgiStringIO(value)
|
||||
self.environ['wsgi.input'] = WsgiBytesIO(value)
|
||||
self.environ['CONTENT_LENGTH'] = str(len(value))
|
||||
|
||||
return property(getter, setter, doc="Get and set the request body str")
|
||||
|
@ -837,7 +837,7 @@ class Request(object):
|
|||
:param path: encoded, parsed, and unquoted into PATH_INFO
|
||||
:param environ: WSGI environ dictionary
|
||||
:param headers: HTTP headers
|
||||
:param body: stuffed in a WsgiStringIO and hung on wsgi.input
|
||||
:param body: stuffed in a WsgiBytesIO and hung on wsgi.input
|
||||
:param kwargs: any environ key with an property setter
|
||||
"""
|
||||
headers = headers or {}
|
||||
|
@ -872,10 +872,10 @@ class Request(object):
|
|||
}
|
||||
env.update(environ)
|
||||
if body is not None:
|
||||
env['wsgi.input'] = WsgiStringIO(body)
|
||||
env['wsgi.input'] = WsgiBytesIO(body)
|
||||
env['CONTENT_LENGTH'] = str(len(body))
|
||||
elif 'wsgi.input' not in env:
|
||||
env['wsgi.input'] = WsgiStringIO()
|
||||
env['wsgi.input'] = WsgiBytesIO()
|
||||
req = Request(env)
|
||||
for key, val in headers.items():
|
||||
req.headers[key] = val
|
||||
|
@ -982,7 +982,7 @@ class Request(object):
|
|||
env.update({
|
||||
'REQUEST_METHOD': 'GET',
|
||||
'CONTENT_LENGTH': '0',
|
||||
'wsgi.input': WsgiStringIO(),
|
||||
'wsgi.input': WsgiBytesIO(),
|
||||
})
|
||||
return Request(env)
|
||||
|
||||
|
|
|
@ -38,16 +38,14 @@ from urllib import quote as _quote
|
|||
from contextlib import contextmanager, closing
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError, \
|
||||
RawConfigParser
|
||||
from optparse import OptionParser
|
||||
from Queue import Queue, Empty
|
||||
|
||||
from tempfile import mkstemp, NamedTemporaryFile
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import glob
|
||||
from urlparse import urlparse as stdlib_urlparse, ParseResult
|
||||
import itertools
|
||||
|
@ -64,6 +62,9 @@ import netifaces
|
|||
import codecs
|
||||
utf8_decoder = codecs.getdecoder('utf-8')
|
||||
utf8_encoder = codecs.getencoder('utf-8')
|
||||
from six.moves.configparser import ConfigParser, NoSectionError, \
|
||||
NoOptionError, RawConfigParser
|
||||
from six.moves.queue import Queue, Empty
|
||||
from six.moves import range
|
||||
|
||||
from swift import gettext_ as _
|
||||
|
@ -568,9 +569,9 @@ class FallocateWrapper(object):
|
|||
self.func_name = 'posix_fallocate'
|
||||
self.fallocate = noop_libc_function
|
||||
return
|
||||
## fallocate is preferred because we need the on-disk size to match
|
||||
## the allocated size. Older versions of sqlite require that the
|
||||
## two sizes match. However, fallocate is Linux only.
|
||||
# fallocate is preferred because we need the on-disk size to match
|
||||
# the allocated size. Older versions of sqlite require that the
|
||||
# two sizes match. However, fallocate is Linux only.
|
||||
for func in ('fallocate', 'posix_fallocate'):
|
||||
self.func_name = func
|
||||
self.fallocate = load_libc_function(func, log_error=False)
|
||||
|
|
|
@ -461,10 +461,14 @@ class WorkersStrategy(object):
|
|||
|
||||
def loop_timeout(self):
|
||||
"""
|
||||
:returns: None; to block in :py:func:`green.os.wait`
|
||||
We want to keep from busy-waiting, but we also need a non-None value so
|
||||
the main loop gets a chance to tell whether it should keep running or
|
||||
not (e.g. SIGHUP received).
|
||||
|
||||
So we return 0.5.
|
||||
"""
|
||||
|
||||
return None
|
||||
return 0.5
|
||||
|
||||
def bind_ports(self):
|
||||
"""
|
||||
|
|
|
@ -19,7 +19,7 @@ Pluggable Back-ends for Container Server
|
|||
import os
|
||||
from uuid import uuid4
|
||||
import time
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
|
||||
from six.moves import range
|
||||
import sqlite3
|
||||
|
|
|
@ -185,7 +185,11 @@ class ContainerController(BaseStorageServer):
|
|||
return HTTPBadRequest(req=req)
|
||||
|
||||
if account_partition:
|
||||
updates = zip(account_hosts, account_devices)
|
||||
# zip is lazy on py3, but we need a list, so force evaluation.
|
||||
# On py2 it's an extra list copy, but the list is so small
|
||||
# (one element per replica in account ring, usually 3) that it
|
||||
# doesn't matter.
|
||||
updates = list(zip(account_hosts, account_devices))
|
||||
else:
|
||||
updates = []
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
|
|
|
@ -6,16 +6,16 @@
|
|||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.3.1.dev170\n"
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-23 06:11+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
#: swift/account/auditor.py:59
|
||||
#, python-format
|
||||
|
@ -63,7 +63,7 @@ msgstr ""
|
|||
msgid "ERROR Could not get account info %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/account/reaper.py:134 swift/common/utils.py:2146
|
||||
#: swift/account/reaper.py:134 swift/common/utils.py:2147
|
||||
#: swift/obj/diskfile.py:480 swift/obj/updater.py:88 swift/obj/updater.py:131
|
||||
#, python-format
|
||||
msgid "Skipping %s as it is not mounted"
|
||||
|
@ -153,7 +153,7 @@ msgstr ""
|
|||
msgid "Exception with objects for container %(container)s for account %(account)s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/account/server.py:275 swift/container/server.py:582
|
||||
#: swift/account/server.py:275 swift/container/server.py:586
|
||||
#: swift/obj/server.py:914
|
||||
#, python-format
|
||||
msgid "ERROR __call__ error with %(method)s %(path)s "
|
||||
|
@ -164,13 +164,13 @@ msgstr ""
|
|||
msgid "Error encoding to UTF-8: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/container_sync_realms.py:59
|
||||
#: swift/common/container_sync_realms.py:68
|
||||
#: swift/common/container_sync_realms.py:60
|
||||
#: swift/common/container_sync_realms.py:69
|
||||
#, python-format
|
||||
msgid "Could not load %r: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/container_sync_realms.py:81
|
||||
#: swift/common/container_sync_realms.py:82
|
||||
#, python-format
|
||||
msgid "Error in %r with mtime_check_interval: %s"
|
||||
msgstr ""
|
||||
|
@ -397,110 +397,110 @@ msgstr ""
|
|||
msgid "ERROR: An error occurred while retrieving segments"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:390
|
||||
#: swift/common/utils.py:391
|
||||
#, python-format
|
||||
msgid "Unable to locate %s in libc. Leaving as a no-op."
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:580
|
||||
#: swift/common/utils.py:581
|
||||
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:664
|
||||
#: swift/common/utils.py:665
|
||||
#, python-format
|
||||
msgid "Unable to perform fsync() on directory %s: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1076
|
||||
#: swift/common/utils.py:1077
|
||||
#, python-format
|
||||
msgid "%s: Connection reset by peer"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1078 swift/common/utils.py:1081
|
||||
#: swift/common/utils.py:1079 swift/common/utils.py:1082
|
||||
#, python-format
|
||||
msgid "%s: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1316
|
||||
#: swift/common/utils.py:1317
|
||||
msgid "Connection refused"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1318
|
||||
#: swift/common/utils.py:1319
|
||||
msgid "Host unreachable"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1320
|
||||
#: swift/common/utils.py:1321
|
||||
msgid "Connection timeout"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1623
|
||||
#: swift/common/utils.py:1624
|
||||
msgid "UNCAUGHT EXCEPTION"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1678
|
||||
#: swift/common/utils.py:1679
|
||||
msgid "Error: missing config path argument"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:1683
|
||||
#: swift/common/utils.py:1684
|
||||
#, python-format
|
||||
msgid "Error: unable to locate %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2007
|
||||
#: swift/common/utils.py:2008
|
||||
#, python-format
|
||||
msgid "Unable to read config from %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2013
|
||||
#: swift/common/utils.py:2014
|
||||
#, python-format
|
||||
msgid "Unable to find %s config section in %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2372
|
||||
#: swift/common/utils.py:2373
|
||||
#, python-format
|
||||
msgid "Invalid X-Container-Sync-To format %r"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2377
|
||||
#: swift/common/utils.py:2378
|
||||
#, python-format
|
||||
msgid "No realm key for %r"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2381
|
||||
#: swift/common/utils.py:2382
|
||||
#, python-format
|
||||
msgid "No cluster endpoint for %r %r"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2390
|
||||
#: swift/common/utils.py:2391
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
|
||||
"\"https\"."
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2394
|
||||
#: swift/common/utils.py:2395
|
||||
msgid "Path required in X-Container-Sync-To"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2397
|
||||
#: swift/common/utils.py:2398
|
||||
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2402
|
||||
#: swift/common/utils.py:2403
|
||||
#, python-format
|
||||
msgid "Invalid host %r in X-Container-Sync-To"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/utils.py:2594
|
||||
#: swift/common/utils.py:2595
|
||||
msgid "Exception dumping recon cache"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/wsgi.py:198
|
||||
#: swift/common/wsgi.py:199
|
||||
#, python-format
|
||||
msgid "Could not bind to %s:%s after trying for %s seconds"
|
||||
msgstr ""
|
||||
|
||||
#: swift/common/wsgi.py:208
|
||||
#: swift/common/wsgi.py:209
|
||||
msgid ""
|
||||
"WARNING: SSL should only be enabled for testing purposes. Use external "
|
||||
"SSL termination for a production deployment."
|
||||
|
@ -654,14 +654,14 @@ msgid ""
|
|||
"request: \"%s\" vs \"%s\""
|
||||
msgstr ""
|
||||
|
||||
#: swift/container/server.py:221
|
||||
#: swift/container/server.py:225
|
||||
#, python-format
|
||||
msgid ""
|
||||
"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
|
||||
"later): Response %(status)s %(reason)s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/container/server.py:230
|
||||
#: swift/container/server.py:234
|
||||
#, python-format
|
||||
msgid ""
|
||||
"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
|
||||
|
@ -732,7 +732,7 @@ msgstr ""
|
|||
msgid "ERROR: Failed to get paths to drive partitions: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/container/updater.py:92 swift/obj/reconstructor.py:797
|
||||
#: swift/container/updater.py:92 swift/obj/reconstructor.py:815
|
||||
#: swift/obj/replicator.py:498 swift/obj/replicator.py:586
|
||||
#, python-format
|
||||
msgid "%s is not mounted"
|
||||
|
@ -925,7 +925,7 @@ msgstr ""
|
|||
msgid "Exception while deleting object %s %s %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:478
|
||||
#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:490
|
||||
#, python-format
|
||||
msgid "Invalid response %(resp)s from %(full_path)s"
|
||||
msgstr ""
|
||||
|
@ -940,75 +940,76 @@ msgstr ""
|
|||
msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:344
|
||||
#: swift/obj/reconstructor.py:348
|
||||
#, python-format
|
||||
msgid ""
|
||||
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed"
|
||||
" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
|
||||
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of "
|
||||
"%(device)d/%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in "
|
||||
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:357 swift/obj/replicator.py:430
|
||||
#: swift/obj/reconstructor.py:369 swift/obj/replicator.py:430
|
||||
#, python-format
|
||||
msgid ""
|
||||
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
|
||||
"synced"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:364 swift/obj/replicator.py:437
|
||||
#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:437
|
||||
#, python-format
|
||||
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:372
|
||||
#: swift/obj/reconstructor.py:384
|
||||
#, python-format
|
||||
msgid "Nothing reconstructed for %s seconds."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:401 swift/obj/replicator.py:474
|
||||
#: swift/obj/reconstructor.py:413 swift/obj/replicator.py:474
|
||||
msgid "Lockup detected.. killing live coros."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:448
|
||||
#: swift/obj/reconstructor.py:460
|
||||
#, python-format
|
||||
msgid "Trying to sync suffixes with %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:473
|
||||
#: swift/obj/reconstructor.py:485
|
||||
#, python-format
|
||||
msgid "%s responded as unmounted"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:860 swift/obj/replicator.py:306
|
||||
#: swift/obj/reconstructor.py:886 swift/obj/replicator.py:306
|
||||
#, python-format
|
||||
msgid "Removing partition: %s"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:876
|
||||
#: swift/obj/reconstructor.py:902
|
||||
msgid "Ring change detected. Aborting current reconstruction pass."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:895
|
||||
#: swift/obj/reconstructor.py:921
|
||||
msgid "Exception in top-levelreconstruction loop"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:905
|
||||
#: swift/obj/reconstructor.py:931
|
||||
msgid "Running object reconstructor in script mode."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:914
|
||||
#: swift/obj/reconstructor.py:940
|
||||
#, python-format
|
||||
msgid "Object reconstruction complete (once). (%.02f minutes)"
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:921
|
||||
#: swift/obj/reconstructor.py:947
|
||||
msgid "Starting object reconstructor in daemon mode."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:925
|
||||
#: swift/obj/reconstructor.py:951
|
||||
msgid "Starting object reconstruction pass."
|
||||
msgstr ""
|
||||
|
||||
#: swift/obj/reconstructor.py:930
|
||||
#: swift/obj/reconstructor.py:956
|
||||
#, python-format
|
||||
msgid "Object reconstruction complete. (%.02f minutes)"
|
||||
msgstr ""
|
||||
|
|
|
@ -8,8 +8,8 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-23 06:11+0000\n"
|
||||
"PO-Revision-Date: 2015-07-14 01:18+0000\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: 2015-07-28 00:33+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/"
|
||||
"language/zh_CN/)\n"
|
||||
|
@ -17,7 +17,7 @@ msgstr ""
|
|||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
||||
msgid ""
|
||||
"\n"
|
||||
|
|
|
@ -79,7 +79,7 @@ class AuditorWorker(object):
|
|||
else:
|
||||
description = _(' - %s') % device_dir_str
|
||||
self.logger.info(_('Begin object audit "%s" mode (%s%s)') %
|
||||
(mode, self.auditor_type, description))
|
||||
(mode, self.auditor_type, description))
|
||||
begin = reported = time.time()
|
||||
self.total_bytes_processed = 0
|
||||
self.total_files_processed = 0
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,7 +19,7 @@ import random
|
|||
import time
|
||||
import itertools
|
||||
from collections import defaultdict
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import shutil
|
||||
|
||||
from eventlet import (GreenPile, GreenPool, Timeout, sleep, hubs, tpool,
|
||||
|
@ -319,11 +319,11 @@ class ObjectReconstructor(Daemon):
|
|||
except (Exception, Timeout):
|
||||
self.logger.exception(
|
||||
_("Error trying to rebuild %(path)s "
|
||||
"policy#%(policy)d frag#%(frag_index)s"), {
|
||||
'path': path,
|
||||
'policy': policy,
|
||||
'frag_index': frag_index,
|
||||
})
|
||||
"policy#%(policy)d frag#%(frag_index)s"),
|
||||
{'path': path,
|
||||
'policy': policy,
|
||||
'frag_index': frag_index,
|
||||
})
|
||||
break
|
||||
if not all(fragment_payload):
|
||||
break
|
||||
|
@ -337,22 +337,34 @@ class ObjectReconstructor(Daemon):
|
|||
"""
|
||||
Logs various stats for the currently running reconstruction pass.
|
||||
"""
|
||||
if self.reconstruction_count:
|
||||
if (self.device_count and self.part_count and
|
||||
self.reconstruction_device_count):
|
||||
elapsed = (time.time() - self.start) or 0.000001
|
||||
rate = self.reconstruction_count / elapsed
|
||||
rate = self.reconstruction_part_count / elapsed
|
||||
total_part_count = (self.part_count *
|
||||
self.device_count /
|
||||
self.reconstruction_device_count)
|
||||
self.logger.info(
|
||||
_("%(reconstructed)d/%(total)d (%(percentage).2f%%)"
|
||||
" partitions reconstructed in %(time).2fs (%(rate).2f/sec, "
|
||||
"%(remaining)s remaining)"),
|
||||
{'reconstructed': self.reconstruction_count,
|
||||
'total': self.job_count,
|
||||
" partitions of %(device)d/%(dtotal)d "
|
||||
"(%(dpercentage).2f%%) devices"
|
||||
" reconstructed in %(time).2fs "
|
||||
"(%(rate).2f/sec, %(remaining)s remaining)"),
|
||||
{'reconstructed': self.reconstruction_part_count,
|
||||
'total': self.part_count,
|
||||
'percentage':
|
||||
self.reconstruction_count * 100.0 / self.job_count,
|
||||
self.reconstruction_part_count * 100.0 / self.part_count,
|
||||
'device': self.reconstruction_device_count,
|
||||
'dtotal': self.device_count,
|
||||
'dpercentage':
|
||||
self.reconstruction_device_count * 100.0 / self.device_count,
|
||||
'time': time.time() - self.start, 'rate': rate,
|
||||
'remaining': '%d%s' % compute_eta(self.start,
|
||||
self.reconstruction_count,
|
||||
self.job_count)})
|
||||
if self.suffix_count:
|
||||
'remaining': '%d%s' %
|
||||
compute_eta(self.start,
|
||||
self.reconstruction_part_count,
|
||||
total_part_count)})
|
||||
|
||||
if self.suffix_count and self.partition_times:
|
||||
self.logger.info(
|
||||
_("%(checked)d suffixes checked - "
|
||||
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
|
||||
|
@ -474,14 +486,11 @@ class ObjectReconstructor(Daemon):
|
|||
self._full_path(node, job['partition'], '',
|
||||
job['policy']))
|
||||
elif resp.status != HTTP_OK:
|
||||
full_path = self._full_path(node, job['partition'], '',
|
||||
job['policy'])
|
||||
self.logger.error(
|
||||
_("Invalid response %(resp)s "
|
||||
"from %(full_path)s"), {
|
||||
'resp': resp.status,
|
||||
'full_path': self._full_path(
|
||||
node, job['partition'], '',
|
||||
job['policy'])
|
||||
})
|
||||
_("Invalid response %(resp)s from %(full_path)s"),
|
||||
{'resp': resp.status, 'full_path': full_path})
|
||||
else:
|
||||
remote_suffixes = pickle.loads(resp.read())
|
||||
except (Exception, Timeout):
|
||||
|
@ -781,16 +790,22 @@ class ObjectReconstructor(Daemon):
|
|||
self._diskfile_mgr = self._df_router[policy]
|
||||
self.load_object_ring(policy)
|
||||
data_dir = get_data_dir(policy)
|
||||
local_devices = itertools.ifilter(
|
||||
local_devices = list(itertools.ifilter(
|
||||
lambda dev: dev and is_local_device(
|
||||
ips, self.port,
|
||||
dev['replication_ip'], dev['replication_port']),
|
||||
policy.object_ring.devs)
|
||||
policy.object_ring.devs))
|
||||
|
||||
if override_devices:
|
||||
self.device_count = len(override_devices)
|
||||
else:
|
||||
self.device_count = len(local_devices)
|
||||
|
||||
for local_dev in local_devices:
|
||||
if override_devices and (local_dev['device'] not in
|
||||
override_devices):
|
||||
continue
|
||||
self.reconstruction_device_count += 1
|
||||
dev_path = self._df_router[policy].get_dev_path(
|
||||
local_dev['device'])
|
||||
if not dev_path:
|
||||
|
@ -814,6 +829,8 @@ class ObjectReconstructor(Daemon):
|
|||
self.logger.exception(
|
||||
'Unable to list partitions in %r' % obj_path)
|
||||
continue
|
||||
|
||||
self.part_count += len(partitions)
|
||||
for partition in partitions:
|
||||
part_path = join(obj_path, partition)
|
||||
if not (partition.isdigit() and
|
||||
|
@ -821,6 +838,7 @@ class ObjectReconstructor(Daemon):
|
|||
self.logger.warning(
|
||||
'Unexpected entity in data dir: %r' % part_path)
|
||||
remove_file(part_path)
|
||||
self.reconstruction_part_count += 1
|
||||
continue
|
||||
partition = int(partition)
|
||||
if override_partitions and (partition not in
|
||||
|
@ -833,6 +851,7 @@ class ObjectReconstructor(Daemon):
|
|||
'part_path': part_path,
|
||||
}
|
||||
yield part_info
|
||||
self.reconstruction_part_count += 1
|
||||
|
||||
def build_reconstruction_jobs(self, part_info):
|
||||
"""
|
||||
|
@ -850,10 +869,14 @@ class ObjectReconstructor(Daemon):
|
|||
def _reset_stats(self):
|
||||
self.start = time.time()
|
||||
self.job_count = 0
|
||||
self.part_count = 0
|
||||
self.device_count = 0
|
||||
self.suffix_count = 0
|
||||
self.suffix_sync = 0
|
||||
self.suffix_hash = 0
|
||||
self.reconstruction_count = 0
|
||||
self.reconstruction_part_count = 0
|
||||
self.reconstruction_device_count = 0
|
||||
self.last_reconstruction_count = -1
|
||||
|
||||
def delete_partition(self, path):
|
||||
|
|
|
@ -20,7 +20,7 @@ import random
|
|||
import shutil
|
||||
import time
|
||||
import itertools
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
from swift import gettext_ as _
|
||||
|
||||
import eventlet
|
||||
|
@ -37,8 +37,7 @@ from swift.common.bufferedhttp import http_connect
|
|||
from swift.common.daemon import Daemon
|
||||
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
|
||||
from swift.obj import ssync_sender
|
||||
from swift.obj.diskfile import (DiskFileManager, get_hashes, get_data_dir,
|
||||
get_tmp_dir)
|
||||
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
|
||||
from swift.common.storage_policy import POLICIES, REPL_POLICY
|
||||
|
||||
|
||||
|
@ -332,7 +331,7 @@ class ObjectReplicator(Daemon):
|
|||
begin = time.time()
|
||||
try:
|
||||
hashed, local_hash = tpool_reraise(
|
||||
get_hashes, job['path'],
|
||||
self._diskfile_mgr._get_hashes, job['path'],
|
||||
do_listdir=(self.replication_count % 10) == 0,
|
||||
reclaim_age=self.reclaim_age)
|
||||
self.suffix_hash += hashed
|
||||
|
@ -377,7 +376,7 @@ class ObjectReplicator(Daemon):
|
|||
if not suffixes:
|
||||
continue
|
||||
hashed, recalc_hash = tpool_reraise(
|
||||
get_hashes,
|
||||
self._diskfile_mgr._get_hashes,
|
||||
job['path'], recalculate=suffixes,
|
||||
reclaim_age=self.reclaim_age)
|
||||
self.logger.update_stats('suffix.hashes', hashed)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
""" Object Server for Swift """
|
||||
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import json
|
||||
import os
|
||||
import multiprocessing
|
||||
|
@ -685,9 +685,6 @@ class ObjectController(BaseStorageServer):
|
|||
"""Handle HTTP GET requests for the Swift Object Server."""
|
||||
device, partition, account, container, obj, policy = \
|
||||
get_name_and_placement(request, 5, 5, True)
|
||||
keep_cache = self.keep_cache_private or (
|
||||
'X-Auth-Token' not in request.headers and
|
||||
'X-Storage-Token' not in request.headers)
|
||||
try:
|
||||
disk_file = self.get_diskfile(
|
||||
device, partition, account, container, obj,
|
||||
|
|
|
@ -319,7 +319,11 @@ class Receiver(object):
|
|||
header = header.strip().lower()
|
||||
value = value.strip()
|
||||
subreq.headers[header] = value
|
||||
replication_headers.append(header)
|
||||
if header != 'etag':
|
||||
# make sure ssync doesn't cause 'Etag' to be added to
|
||||
# obj metadata in addition to 'ETag' which object server
|
||||
# sets (note capitalization)
|
||||
replication_headers.append(header)
|
||||
if header == 'content-length':
|
||||
content_length = int(value)
|
||||
# Establish subrequest body, if needed.
|
||||
|
|
|
@ -211,8 +211,10 @@ class Sender(object):
|
|||
self.job['policy'], self.suffixes,
|
||||
frag_index=self.job.get('frag_index'))
|
||||
if self.remote_check_objs is not None:
|
||||
hash_gen = ifilter(lambda (path, object_hash, timestamp):
|
||||
object_hash in self.remote_check_objs, hash_gen)
|
||||
hash_gen = ifilter(
|
||||
lambda path_objhash_timestamp:
|
||||
path_objhash_timestamp[1] in
|
||||
self.remote_check_objs, hash_gen)
|
||||
for path, object_hash, timestamp in hash_gen:
|
||||
self.available_map[object_hash] = timestamp
|
||||
with exceptions.MessageTimeout(
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
|
|
@ -569,11 +569,11 @@ class Application(object):
|
|||
else:
|
||||
log = self.logger.exception
|
||||
log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s'
|
||||
' re: %(info)s'), {
|
||||
'type': typ, 'ip': node['ip'], 'port':
|
||||
node['port'], 'device': node['device'],
|
||||
'info': additional_info
|
||||
}, **kwargs)
|
||||
' re: %(info)s'),
|
||||
{'type': typ, 'ip': node['ip'],
|
||||
'port': node['port'], 'device': node['device'],
|
||||
'info': additional_info},
|
||||
**kwargs)
|
||||
|
||||
def modify_wsgi_pipeline(self, pipe):
|
||||
"""
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# process, which may cause wedges in the gate later.
|
||||
|
||||
# Hacking already pins down pep8, pyflakes and flake8
|
||||
hacking>=0.8.0,<0.9
|
||||
hacking>=0.10.0,<0.11
|
||||
coverage
|
||||
nose
|
||||
nosexcover
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import httplib
|
||||
import mock
|
||||
import os
|
||||
import sys
|
||||
|
@ -24,15 +23,19 @@ import eventlet
|
|||
import eventlet.debug
|
||||
import functools
|
||||
import random
|
||||
from ConfigParser import ConfigParser, NoSectionError
|
||||
|
||||
from time import time, sleep
|
||||
from httplib import HTTPException
|
||||
from urlparse import urlparse
|
||||
from nose import SkipTest
|
||||
from contextlib import closing
|
||||
from gzip import GzipFile
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from six.moves.configparser import ConfigParser, NoSectionError
|
||||
from six.moves import http_client
|
||||
from six.moves.http_client import HTTPException
|
||||
|
||||
from swift.common.middleware.memcache import MemcacheMiddleware
|
||||
from swift.common.storage_policy import parse_storage_policies, PolicyError
|
||||
|
||||
|
@ -53,7 +56,7 @@ from swift.container import server as container_server
|
|||
from swift.obj import server as object_server, mem_server as mem_object_server
|
||||
import swift.proxy.controllers.obj
|
||||
|
||||
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
|
||||
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
|
||||
DEBUG = True
|
||||
|
||||
# In order to get the proper blocking behavior of sockets without using
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import httplib
|
||||
import os
|
||||
import random
|
||||
import socket
|
||||
|
@ -22,11 +21,11 @@ import time
|
|||
import urllib
|
||||
|
||||
import simplejson as json
|
||||
|
||||
from nose import SkipTest
|
||||
from xml.dom import minidom
|
||||
|
||||
import six
|
||||
from six.moves import http_client
|
||||
from swiftclient import get_auth
|
||||
|
||||
from swift.common import constraints
|
||||
|
@ -34,7 +33,7 @@ from swift.common.utils import config_true_value
|
|||
|
||||
from test import safe_repr
|
||||
|
||||
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
|
||||
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
|
||||
|
||||
|
||||
class AuthenticationFailed(Exception):
|
||||
|
@ -166,10 +165,10 @@ class Connection(object):
|
|||
x = storage_url.split('/')
|
||||
|
||||
if x[0] == 'http:':
|
||||
self.conn_class = httplib.HTTPConnection
|
||||
self.conn_class = http_client.HTTPConnection
|
||||
self.storage_port = 80
|
||||
elif x[0] == 'https:':
|
||||
self.conn_class = httplib.HTTPSConnection
|
||||
self.conn_class = http_client.HTTPSConnection
|
||||
self.storage_port = 443
|
||||
else:
|
||||
raise ValueError('unexpected protocol %s' % (x[0]))
|
||||
|
@ -209,7 +208,7 @@ class Connection(object):
|
|||
def http_connect(self):
|
||||
self.connection = self.conn_class(self.storage_host,
|
||||
port=self.storage_port)
|
||||
#self.connection.set_debuglevel(3)
|
||||
# self.connection.set_debuglevel(3)
|
||||
|
||||
def make_path(self, path=None, cfg=None):
|
||||
if path is None:
|
||||
|
@ -283,7 +282,7 @@ class Connection(object):
|
|||
|
||||
try:
|
||||
self.response = try_request()
|
||||
except httplib.HTTPException as e:
|
||||
except http_client.HTTPException as e:
|
||||
fail_messages.append(safe_repr(e))
|
||||
continue
|
||||
|
||||
|
@ -335,7 +334,7 @@ class Connection(object):
|
|||
|
||||
self.connection = self.conn_class(self.storage_host,
|
||||
port=self.storage_port)
|
||||
#self.connection.set_debuglevel(3)
|
||||
# self.connection.set_debuglevel(3)
|
||||
self.connection.putrequest('PUT', path)
|
||||
for key, value in headers.items():
|
||||
self.connection.putheader(key, value)
|
||||
|
|
|
@ -89,22 +89,22 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), None)
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), None)
|
||||
resp = retry(post, 'Value')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
||||
|
||||
def test_invalid_acls(self):
|
||||
|
@ -204,7 +204,7 @@ class TestAccount(unittest.TestCase):
|
|||
# read-only can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
# but not acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
|
@ -221,7 +221,7 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), 'value')
|
||||
|
||||
@requires_acls
|
||||
|
@ -255,7 +255,7 @@ class TestAccount(unittest.TestCase):
|
|||
# read-write can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
# but not acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
|
@ -296,7 +296,7 @@ class TestAccount(unittest.TestCase):
|
|||
# admin can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
# including acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'),
|
||||
acl_json_str)
|
||||
|
@ -309,7 +309,7 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
|
||||
# admin can even revoke their own access
|
||||
|
@ -359,8 +359,9 @@ class TestAccount(unittest.TestCase):
|
|||
# read-only tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertTrue(
|
||||
resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# but not temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||
|
@ -377,8 +378,9 @@ class TestAccount(unittest.TestCase):
|
|||
# read-write tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertTrue(
|
||||
resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# but not temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||
|
@ -395,8 +397,9 @@ class TestAccount(unittest.TestCase):
|
|||
# admin tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertTrue(
|
||||
resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# including temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'),
|
||||
|
@ -412,8 +415,9 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertTrue(
|
||||
resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'),
|
||||
secret)
|
||||
|
||||
|
@ -689,17 +693,17 @@ class TestAccount(unittest.TestCase):
|
|||
if (tf.web_front_end == 'integral'):
|
||||
resp = retry(post, uni_key, '1')
|
||||
resp.read()
|
||||
self.assertTrue(resp.status in (201, 204))
|
||||
self.assertIn(resp.status, (201, 204))
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1')
|
||||
resp = retry(post, 'X-Account-Meta-uni', uni_value)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-uni'),
|
||||
uni_value.encode('utf-8'))
|
||||
if (tf.web_front_end == 'integral'):
|
||||
|
@ -708,7 +712,7 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')),
|
||||
uni_value.encode('utf-8'))
|
||||
|
||||
|
@ -730,14 +734,14 @@ class TestAccount(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-one'), '1')
|
||||
resp = retry(post, 'X-Account-Meta-Two', '2')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-account-meta-one'), '1')
|
||||
self.assertEqual(resp.getheader('x-account-meta-two'), '2')
|
||||
|
||||
|
@ -875,7 +879,7 @@ class TestAccountInNonDefaultDomain(unittest.TestCase):
|
|||
resp = retry(head, use_account=4)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertTrue('X-Account-Project-Domain-Id' in resp.headers)
|
||||
self.assertIn('X-Account-Project-Domain-Id', resp.headers)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -72,7 +72,7 @@ class TestContainer(unittest.TestCase):
|
|||
body = resp.read()
|
||||
if resp.status == 404:
|
||||
break
|
||||
self.assert_(resp.status // 100 == 2, resp.status)
|
||||
self.assertTrue(resp.status // 100 == 2, resp.status)
|
||||
objs = json.loads(body)
|
||||
if not objs:
|
||||
break
|
||||
|
@ -93,7 +93,7 @@ class TestContainer(unittest.TestCase):
|
|||
# container may have not been created
|
||||
resp = retry(delete, self.container)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (204, 404))
|
||||
self.assertIn(resp.status, (204, 404))
|
||||
|
||||
def test_multi_metadata(self):
|
||||
if tf.skip:
|
||||
|
@ -114,14 +114,14 @@ class TestContainer(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
|
||||
resp = retry(post, 'X-Container-Meta-Two', '2')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
|
||||
self.assertEqual(resp.getheader('x-container-meta-two'), '2')
|
||||
|
||||
|
@ -147,14 +147,14 @@ class TestContainer(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1')
|
||||
resp = retry(post, 'X-Container-Meta-uni', uni_value)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Container-Meta-uni'),
|
||||
uni_value.encode('utf-8'))
|
||||
if (tf.web_front_end == 'integral'):
|
||||
|
@ -163,7 +163,7 @@ class TestContainer(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')),
|
||||
uni_value.encode('utf-8'))
|
||||
|
||||
|
@ -198,11 +198,11 @@ class TestContainer(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 201)
|
||||
resp = retry(head, name)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
|
||||
resp = retry(get, name)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
|
||||
resp = retry(delete, name)
|
||||
resp.read()
|
||||
|
@ -214,11 +214,11 @@ class TestContainer(unittest.TestCase):
|
|||
self.assertEqual(resp.status, 201)
|
||||
resp = retry(head, name)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), None)
|
||||
resp = retry(get, name)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), None)
|
||||
resp = retry(delete, name)
|
||||
resp.read()
|
||||
|
@ -246,22 +246,22 @@ class TestContainer(unittest.TestCase):
|
|||
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), None)
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), None)
|
||||
resp = retry(post, 'Value')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertIn(resp.status, (200, 204))
|
||||
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
|
||||
|
||||
def test_PUT_bad_metadata(self):
|
||||
|
@ -484,7 +484,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get)
|
||||
raise Exception('Should not have been able to GET')
|
||||
except Exception as err:
|
||||
self.assert_(str(err).startswith('No result after '), err)
|
||||
self.assertTrue(str(err).startswith('No result after '), err)
|
||||
|
||||
def post(url, token, parsed, conn):
|
||||
conn.request('POST', parsed.path + '/' + self.name, '',
|
||||
|
@ -511,7 +511,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get)
|
||||
raise Exception('Should not have been able to GET')
|
||||
except Exception as err:
|
||||
self.assert_(str(err).startswith('No result after '), err)
|
||||
self.assertTrue(str(err).startswith('No result after '), err)
|
||||
|
||||
def test_cross_account_container(self):
|
||||
if tf.skip or tf.skip2:
|
||||
|
@ -743,7 +743,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.name in listing)
|
||||
self.assertIn(self.name, listing)
|
||||
|
||||
# read-only can not create containers
|
||||
new_container_name = str(uuid4())
|
||||
|
@ -758,7 +758,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(new_container_name in listing)
|
||||
self.assertIn(new_container_name, listing)
|
||||
|
||||
@requires_acls
|
||||
def test_read_only_acl_metadata(self):
|
||||
|
@ -858,7 +858,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.name in listing)
|
||||
self.assertIn(self.name, listing)
|
||||
|
||||
# can create new containers
|
||||
new_container_name = str(uuid4())
|
||||
|
@ -868,7 +868,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(new_container_name in listing)
|
||||
self.assertIn(new_container_name, listing)
|
||||
|
||||
# can also delete them
|
||||
resp = retry(delete, new_container_name, use_account=3)
|
||||
|
@ -877,7 +877,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(new_container_name not in listing)
|
||||
self.assertNotIn(new_container_name, listing)
|
||||
|
||||
# even if they didn't create them
|
||||
empty_container_name = str(uuid4())
|
||||
|
@ -1000,7 +1000,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.name in listing)
|
||||
self.assertIn(self.name, listing)
|
||||
|
||||
# can create new containers
|
||||
new_container_name = str(uuid4())
|
||||
|
@ -1010,7 +1010,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(new_container_name in listing)
|
||||
self.assertIn(new_container_name, listing)
|
||||
|
||||
# can also delete them
|
||||
resp = retry(delete, new_container_name, use_account=3)
|
||||
|
@ -1019,7 +1019,7 @@ class TestContainer(unittest.TestCase):
|
|||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(new_container_name not in listing)
|
||||
self.assertNotIn(new_container_name, listing)
|
||||
|
||||
# even if they didn't create them
|
||||
empty_container_name = str(uuid4())
|
||||
|
@ -1595,7 +1595,7 @@ class BaseTestContainerACLs(unittest.TestCase):
|
|||
while True:
|
||||
resp = retry(get, use_account=self.account)
|
||||
body = resp.read()
|
||||
self.assert_(resp.status // 100 == 2, resp.status)
|
||||
self.assertTrue(resp.status // 100 == 2, resp.status)
|
||||
objs = json.loads(body)
|
||||
if not objs:
|
||||
break
|
||||
|
|
|
@ -89,7 +89,7 @@ class TestObject(unittest.TestCase):
|
|||
body = resp.read()
|
||||
if resp.status == 404:
|
||||
break
|
||||
self.assert_(resp.status // 100 == 2, resp.status)
|
||||
self.assertTrue(resp.status // 100 == 2, resp.status)
|
||||
objs = json.loads(body)
|
||||
if not objs:
|
||||
break
|
||||
|
@ -107,7 +107,7 @@ class TestObject(unittest.TestCase):
|
|||
for container in self.containers:
|
||||
resp = retry(delete, container)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (204, 404))
|
||||
self.assertIn(resp.status, (204, 404))
|
||||
|
||||
def test_if_none_match(self):
|
||||
def put(url, token, parsed, conn):
|
||||
|
@ -387,7 +387,7 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get)
|
||||
raise Exception('Should not have been able to GET')
|
||||
except Exception as err:
|
||||
self.assert_(str(err).startswith('No result after '))
|
||||
self.assertTrue(str(err).startswith('No result after '))
|
||||
|
||||
def post(url, token, parsed, conn):
|
||||
conn.request('POST', parsed.path + '/' + self.container, '',
|
||||
|
@ -412,7 +412,7 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get)
|
||||
raise Exception('Should not have been able to GET')
|
||||
except Exception as err:
|
||||
self.assert_(str(err).startswith('No result after '))
|
||||
self.assertTrue(str(err).startswith('No result after '))
|
||||
|
||||
def test_private_object(self):
|
||||
if tf.skip or tf.skip3:
|
||||
|
@ -562,7 +562,7 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.obj in listing)
|
||||
self.assertIn(self.obj, listing)
|
||||
|
||||
# can get object
|
||||
resp = retry(get, self.obj, use_account=3)
|
||||
|
@ -585,8 +585,8 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(obj_name not in listing)
|
||||
self.assert_(self.obj in listing)
|
||||
self.assertNotIn(obj_name, listing)
|
||||
self.assertIn(self.obj, listing)
|
||||
|
||||
@requires_acls
|
||||
def test_read_write(self):
|
||||
|
@ -643,7 +643,7 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.obj in listing)
|
||||
self.assertIn(self.obj, listing)
|
||||
|
||||
# can get object
|
||||
resp = retry(get, self.obj, use_account=3)
|
||||
|
@ -666,8 +666,8 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(obj_name in listing)
|
||||
self.assert_(self.obj not in listing)
|
||||
self.assertIn(obj_name, listing)
|
||||
self.assertNotIn(self.obj, listing)
|
||||
|
||||
@requires_acls
|
||||
def test_admin(self):
|
||||
|
@ -724,7 +724,7 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(self.obj in listing)
|
||||
self.assertIn(self.obj, listing)
|
||||
|
||||
# can get object
|
||||
resp = retry(get, self.obj, use_account=3)
|
||||
|
@ -747,8 +747,8 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(get_listing, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
self.assert_(obj_name in listing)
|
||||
self.assert_(self.obj not in listing)
|
||||
self.assertIn(obj_name, listing)
|
||||
self.assertNotIn(self.obj, listing)
|
||||
|
||||
def test_manifest(self):
|
||||
if tf.skip:
|
||||
|
@ -1168,7 +1168,7 @@ class TestObject(unittest.TestCase):
|
|||
resp.read()
|
||||
self.assertEquals(resp.status, 200)
|
||||
headers = dict((k.lower(), v) for k, v in resp.getheaders())
|
||||
self.assertTrue('access-control-allow-origin' not in headers)
|
||||
self.assertNotIn('access-control-allow-origin', headers)
|
||||
|
||||
resp = retry(check_cors,
|
||||
'GET', 'cat', {'Origin': 'http://secret.com'})
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from httplib import HTTPConnection
|
||||
|
||||
import os
|
||||
from subprocess import Popen, PIPE
|
||||
import sys
|
||||
|
@ -22,8 +22,9 @@ from collections import defaultdict
|
|||
import unittest
|
||||
from nose import SkipTest
|
||||
|
||||
from swiftclient import get_auth, head_account
|
||||
from six.moves.http_client import HTTPConnection
|
||||
|
||||
from swiftclient import get_auth, head_account
|
||||
from swift.obj.diskfile import get_data_dir
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import readconf, renamer
|
||||
|
|
|
@ -49,8 +49,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
# Create container2/object1
|
||||
client.put_object(self.url, self.token, container2, 'object1', '1234')
|
||||
|
@ -71,8 +71,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
# Get to final state
|
||||
self.get_to_final_state()
|
||||
|
@ -93,8 +93,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
kill_nonprimary_server(anodes, self.ipport2server, self.pids)
|
||||
|
@ -123,8 +123,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(not found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
# Run container updaters
|
||||
Manager(['container-updater']).once()
|
||||
|
@ -143,8 +143,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 2)
|
||||
self.assertEquals(container['bytes'], 9)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(not found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
# Restart other primary account server
|
||||
start_server((anodes[0]['ip'], anodes[0]['port']),
|
||||
|
@ -166,8 +166,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
# Get to final state
|
||||
self.get_to_final_state()
|
||||
|
@ -187,8 +187,8 @@ class TestAccountFailures(ReplProbeTest):
|
|||
found2 = True
|
||||
self.assertEquals(container['count'], 2)
|
||||
self.assertEquals(container['bytes'], 9)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
self.assertTrue(not found1)
|
||||
self.assertTrue(found2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import httplib
|
||||
import re
|
||||
import unittest
|
||||
|
||||
from six.moves import http_client
|
||||
from swiftclient import get_auth
|
||||
from test.probe.common import ReplProbeTest
|
||||
from urlparse import urlparse
|
||||
|
@ -49,7 +49,7 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest):
|
|||
host, port = netloc.split(':')
|
||||
port = int(port)
|
||||
|
||||
conn = httplib.HTTPConnection(host, port)
|
||||
conn = http_client.HTTPConnection(host, port)
|
||||
conn.request(method, self._account_path(account), headers=headers)
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
|
|
|
@ -65,9 +65,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
self.assert_(len(found_policy_indexes) > 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
head_responses)
|
||||
self.assertTrue(
|
||||
len(found_policy_indexes) > 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
head_responses)
|
||||
# find our object
|
||||
orig_policy_index = None
|
||||
for policy_index in found_policy_indexes:
|
||||
|
@ -102,9 +103,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
self.assert_(len(found_policy_indexes) == 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
head_responses)
|
||||
self.assertTrue(len(found_policy_indexes) == 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
head_responses)
|
||||
|
||||
expected_policy_index = found_policy_indexes.pop()
|
||||
self.assertNotEqual(orig_policy_index, expected_policy_index)
|
||||
|
@ -165,9 +166,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||
found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for
|
||||
node, metadata in head_responses)
|
||||
self.assert_(len(found_policy_indexes) > 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
head_responses)
|
||||
self.assertTrue(
|
||||
len(found_policy_indexes) > 1,
|
||||
'primary nodes did not disagree about policy index %r' %
|
||||
head_responses)
|
||||
# find our object
|
||||
orig_policy_index = ts_policy_index = None
|
||||
for policy_index in found_policy_indexes:
|
||||
|
@ -207,11 +209,11 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||
new_found_policy_indexes = \
|
||||
set(metadata['X-Backend-Storage-Policy-Index'] for node,
|
||||
metadata in head_responses)
|
||||
self.assert_(len(new_found_policy_indexes) == 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
dict((node['port'],
|
||||
metadata['X-Backend-Storage-Policy-Index'])
|
||||
for node, metadata in head_responses))
|
||||
self.assertTrue(len(new_found_policy_indexes) == 1,
|
||||
'primary nodes disagree about policy index %r' %
|
||||
dict((node['port'],
|
||||
metadata['X-Backend-Storage-Policy-Index'])
|
||||
for node, metadata in head_responses))
|
||||
expected_policy_index = new_found_policy_indexes.pop()
|
||||
self.assertEqual(orig_policy_index, expected_policy_index)
|
||||
# validate object fully deleted
|
||||
|
|
|
@ -54,7 +54,7 @@ class TestObjectAsyncUpdate(ReplProbeTest):
|
|||
self.ipport2server, self.pids)
|
||||
|
||||
# Assert it does not know about container/obj
|
||||
self.assert_(not direct_client.direct_get_container(
|
||||
self.assertTrue(not direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1])
|
||||
|
||||
# Run the object-updaters
|
||||
|
@ -63,7 +63,7 @@ class TestObjectAsyncUpdate(ReplProbeTest):
|
|||
# Assert the other primary server now knows about container/obj
|
||||
objs = [o['name'] for o in direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1]]
|
||||
self.assert_(obj in objs)
|
||||
self.assertTrue(obj in objs)
|
||||
|
||||
|
||||
class TestUpdateOverrides(ReplProbeTest):
|
||||
|
|
|
@ -87,7 +87,7 @@ class TestObjectExpirer(ReplProbeTest):
|
|||
self.account, self.container_name, self.object_name,
|
||||
acceptable_statuses=(4,),
|
||||
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
|
||||
self.assert_('x-backend-timestamp' in metadata)
|
||||
self.assertTrue('x-backend-timestamp' in metadata)
|
||||
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
|
||||
create_timestamp)
|
||||
|
||||
|
@ -122,9 +122,9 @@ class TestObjectExpirer(ReplProbeTest):
|
|||
self.fail('found object in %s and also %s' %
|
||||
(found_in_policy, policy))
|
||||
found_in_policy = policy
|
||||
self.assert_('x-backend-timestamp' in metadata)
|
||||
self.assert_(Timestamp(metadata['x-backend-timestamp']) >
|
||||
create_timestamp)
|
||||
self.assertTrue('x-backend-timestamp' in metadata)
|
||||
self.assertTrue(Timestamp(metadata['x-backend-timestamp']) >
|
||||
create_timestamp)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -37,12 +37,12 @@ from swift.common import swob, utils
|
|||
from swift.common.ring import Ring, RingData
|
||||
from hashlib import md5
|
||||
import logging.handlers
|
||||
from httplib import HTTPException
|
||||
from swift.common import constraints
|
||||
from swift.common import storage_policy
|
||||
|
||||
from six.moves.http_client import HTTPException
|
||||
from swift.common import constraints, storage_policy
|
||||
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
|
||||
import functools
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
from gzip import GzipFile
|
||||
import mock as mocklib
|
||||
import inspect
|
||||
|
@ -1052,7 +1052,7 @@ def generate_bad_metadata_headers(server_type):
|
|||
v = 'v' * constraints.MAX_META_VALUE_LENGTH
|
||||
while size < constraints.MAX_META_OVERALL_SIZE:
|
||||
k = ('%s%04d%s' %
|
||||
(prefix, x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4)))
|
||||
(prefix, x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4)))
|
||||
headers[k] = v
|
||||
size += chunk
|
||||
x += 1
|
||||
|
|
|
@ -77,7 +77,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
|||
# add a row to "local" db
|
||||
broker.put_container('/a/c', time.time(), 0, 0, 0,
|
||||
POLICIES.default.idx)
|
||||
#replicate
|
||||
# replicate
|
||||
daemon = replicator.AccountReplicator({})
|
||||
|
||||
def _rsync_file(db_file, remote_file, **kwargs):
|
||||
|
|
|
@ -386,6 +386,23 @@ class TestPrintObjFullMeta(TestCliInfoBase):
|
|||
print_obj(self.datafile, swift_dir=self.testdir)
|
||||
self.assertTrue('/objects-1/' in out.getvalue())
|
||||
|
||||
def test_print_obj_policy_index(self):
|
||||
# Check an output of policy index when current directory is in
|
||||
# object-* directory
|
||||
out = StringIO()
|
||||
hash_dir = os.path.dirname(self.datafile)
|
||||
file_name = os.path.basename(self.datafile)
|
||||
|
||||
# Change working directory to object hash dir
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(hash_dir)
|
||||
with mock.patch('sys.stdout', out):
|
||||
print_obj(file_name, swift_dir=self.testdir)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
self.assertTrue('X-Backend-Storage-Policy-Index: 1' in out.getvalue())
|
||||
|
||||
def test_print_obj_meta_and_ts_files(self):
|
||||
# verify that print_obj will also read from meta and ts files
|
||||
base = os.path.splitext(self.datafile)[0]
|
||||
|
@ -610,3 +627,21 @@ Other Metadata:
|
|||
No metadata found'''
|
||||
|
||||
self.assertEquals(out.getvalue().strip(), exp_out)
|
||||
|
||||
|
||||
class TestPrintObjWeirdPath(TestPrintObjFullMeta):
|
||||
def setUp(self):
|
||||
super(TestPrintObjWeirdPath, self).setUp()
|
||||
# device name is objects-0 instead of sda, this is weird.
|
||||
self.datafile = os.path.join(self.testdir,
|
||||
'objects-0', 'objects-1',
|
||||
'1', 'ea8',
|
||||
'db4449e025aca992307c7c804a67eea8',
|
||||
'1402017884.18202.data')
|
||||
utils.mkdirs(os.path.dirname(self.datafile))
|
||||
with open(self.datafile, 'wb') as fp:
|
||||
md = {'name': '/AUTH_admin/c/obj',
|
||||
'Content-Type': 'application/octet-stream',
|
||||
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
|
||||
'Content-Length': 0}
|
||||
write_metadata(fp, md)
|
||||
|
|
|
@ -387,7 +387,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
res_account = 'Invalid: http://127.0.0.1:6012/ is account-server'
|
||||
valid = "1/1 hosts ok, 0 error[s] while checking hosts."
|
||||
|
||||
#Test for object server type - default
|
||||
# Test for object server type - default
|
||||
with nested(*patches):
|
||||
self.recon.server_type_check(hosts)
|
||||
|
||||
|
@ -396,7 +396,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
self.assertTrue(res_account in output.splitlines())
|
||||
stdout.truncate(0)
|
||||
|
||||
#Test ok for object server type - default
|
||||
# Test ok for object server type - default
|
||||
with nested(*patches):
|
||||
self.recon.server_type_check([hosts[0]])
|
||||
|
||||
|
@ -404,7 +404,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
self.assertTrue(valid in output.splitlines())
|
||||
stdout.truncate(0)
|
||||
|
||||
#Test for account server type
|
||||
# Test for account server type
|
||||
with nested(*patches):
|
||||
self.recon.server_type = 'account'
|
||||
self.recon.server_type_check(hosts)
|
||||
|
@ -414,7 +414,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
self.assertTrue(res_object in output.splitlines())
|
||||
stdout.truncate(0)
|
||||
|
||||
#Test ok for account server type
|
||||
# Test ok for account server type
|
||||
with nested(*patches):
|
||||
self.recon.server_type = 'account'
|
||||
self.recon.server_type_check([hosts[2]])
|
||||
|
@ -423,7 +423,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
self.assertTrue(valid in output.splitlines())
|
||||
stdout.truncate(0)
|
||||
|
||||
#Test for container server type
|
||||
# Test for container server type
|
||||
with nested(*patches):
|
||||
self.recon.server_type = 'container'
|
||||
self.recon.server_type_check(hosts)
|
||||
|
@ -433,7 +433,7 @@ class TestReconCommands(unittest.TestCase):
|
|||
self.assertTrue(res_object in output.splitlines())
|
||||
stdout.truncate(0)
|
||||
|
||||
#Test ok for container server type
|
||||
# Test ok for container server type
|
||||
with nested(*patches):
|
||||
self.recon.server_type = 'container'
|
||||
self.recon.server_type_check([hosts[1]])
|
||||
|
|
|
@ -14,22 +14,27 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import json
|
||||
import mock
|
||||
import unittest
|
||||
from StringIO import StringIO
|
||||
from test.unit import with_tempdir
|
||||
|
||||
from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario
|
||||
|
||||
|
||||
class TestRunScenario(unittest.TestCase):
|
||||
def test_it_runs(self):
|
||||
@with_tempdir
|
||||
def test_it_runs(self, tempdir):
|
||||
builder_path = os.path.join(tempdir, 'test.builder')
|
||||
scenario = {
|
||||
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
|
||||
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100],
|
||||
['add', 'z2-3.4.5.6:7/sda9', 200]],
|
||||
[['set_weight', 0, 150]],
|
||||
[['remove', 1]]]}
|
||||
[['remove', 1]],
|
||||
[['save', builder_path]]]}
|
||||
parsed = parse_scenario(json.dumps(scenario))
|
||||
|
||||
fake_stdout = StringIO()
|
||||
|
@ -40,6 +45,7 @@ class TestRunScenario(unittest.TestCase):
|
|||
# this doesn't crash and produces output that resembles something
|
||||
# useful is good enough.
|
||||
self.assertTrue('Rebalance' in fake_stdout.getvalue())
|
||||
self.assertTrue(os.path.exists(builder_path))
|
||||
|
||||
|
||||
class TestParseScenario(unittest.TestCase):
|
||||
|
@ -62,8 +68,8 @@ class TestParseScenario(unittest.TestCase):
|
|||
'meta': '',
|
||||
'port': 7,
|
||||
'region': 1,
|
||||
'replication_ip': None,
|
||||
'replication_port': None,
|
||||
'replication_ip': '3.4.5.6',
|
||||
'replication_port': 7,
|
||||
'weight': 100.0,
|
||||
'zone': 2}],
|
||||
['add', {'device': u'sda9',
|
||||
|
@ -71,8 +77,8 @@ class TestParseScenario(unittest.TestCase):
|
|||
'meta': '',
|
||||
'port': 7,
|
||||
'region': 1,
|
||||
'replication_ip': None,
|
||||
'replication_port': None,
|
||||
'replication_ip': '3.4.5.6',
|
||||
'replication_port': 7,
|
||||
'weight': 200.0,
|
||||
'zone': 2}]],
|
||||
[['set_weight', 0, 150.0]],
|
||||
|
@ -180,7 +186,14 @@ class TestParseScenario(unittest.TestCase):
|
|||
|
||||
# can't parse
|
||||
busted = dict(base, rounds=[[['add', 'not a good value', 100]]])
|
||||
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
|
||||
# N.B. the ValueError's coming out of ring.utils.parse_add_value
|
||||
# are already pretty good
|
||||
expected = "Invalid device specifier (round 0, command 0): " \
|
||||
"Invalid add value: not a good value"
|
||||
try:
|
||||
parse_scenario(json.dumps(busted))
|
||||
except ValueError as err:
|
||||
self.assertEqual(str(err), expected)
|
||||
|
||||
# negative weight
|
||||
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', -1]]])
|
||||
|
@ -216,7 +229,12 @@ class TestParseScenario(unittest.TestCase):
|
|||
|
||||
# bad dev id
|
||||
busted = dict(base, rounds=[[['set_weight', 'not an int', 90]]])
|
||||
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
|
||||
expected = "Invalid device ID in set_weight (round 0, command 0): " \
|
||||
"invalid literal for int() with base 10: 'not an int'"
|
||||
try:
|
||||
parse_scenario(json.dumps(busted))
|
||||
except ValueError as e:
|
||||
self.assertEqual(str(e), expected)
|
||||
|
||||
# negative weight
|
||||
busted = dict(base, rounds=[[['set_weight', 1, -1]]])
|
||||
|
@ -225,3 +243,11 @@ class TestParseScenario(unittest.TestCase):
|
|||
# bogus weight
|
||||
busted = dict(base, rounds=[[['set_weight', 1, 'bogus']]])
|
||||
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
|
||||
|
||||
def test_bad_save(self):
|
||||
base = {
|
||||
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
|
||||
|
||||
# no builder name
|
||||
busted = dict(base, rounds=[[['save']]])
|
||||
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
|
||||
|
|
|
@ -20,6 +20,7 @@ import six
|
|||
import tempfile
|
||||
import unittest
|
||||
import uuid
|
||||
import shlex
|
||||
|
||||
from swift.cli import ringbuilder
|
||||
from swift.common import exceptions
|
||||
|
@ -29,6 +30,9 @@ from swift.common.ring import RingBuilder
|
|||
class RunSwiftRingBuilderMixin(object):
|
||||
|
||||
def run_srb(self, *argv):
|
||||
if len(argv) == 1 and isinstance(argv[0], basestring):
|
||||
# convert a single string to a list
|
||||
argv = shlex.split(argv[0])
|
||||
mock_stdout = six.StringIO()
|
||||
mock_stderr = six.StringIO()
|
||||
|
||||
|
@ -40,7 +44,10 @@ class RunSwiftRingBuilderMixin(object):
|
|||
ringbuilder.main(srb_args)
|
||||
except SystemExit as err:
|
||||
if err.code not in (0, 1): # (success, warning)
|
||||
raise
|
||||
msg = 'Unexpected exit status %s\n' % err.code
|
||||
msg += 'STDOUT:\n%s\nSTDERR:\n%s\n' % (
|
||||
mock_stdout.getvalue(), mock_stderr.getvalue())
|
||||
self.fail(msg)
|
||||
return (mock_stdout.getvalue(), mock_stderr.getvalue())
|
||||
|
||||
|
||||
|
@ -1741,6 +1748,13 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
|
|||
err = exc
|
||||
self.assertEquals(err.code, 2)
|
||||
|
||||
def test_dispersion_command(self):
|
||||
self.create_sample_ring()
|
||||
self.run_srb('rebalance')
|
||||
out, err = self.run_srb('dispersion -v')
|
||||
self.assertIn('dispersion', out.lower())
|
||||
self.assertFalse(err)
|
||||
|
||||
|
||||
class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin):
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache})
|
||||
res = req.get_response(app)
|
||||
#Response code of 200 because authentication itself is not done here
|
||||
# Response code of 200 because authentication itself is not done here
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
||||
def test_no_quotas(self):
|
||||
|
@ -253,7 +253,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -267,7 +267,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -281,7 +281,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -294,7 +294,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -306,7 +306,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o3'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -318,7 +318,7 @@ class TestAccountQuota(unittest.TestCase):
|
|||
cache = FakeCache(None)
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': 'bad_path'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 412)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#-*- coding:utf-8 -*-
|
||||
# coding: utf-8
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -105,7 +105,8 @@ class TestListEndpoints(unittest.TestCase):
|
|||
def FakeGetInfo(self, env, app, swift_source=None):
|
||||
info = {'status': 0, 'sync_key': None, 'meta': {},
|
||||
'cors': {'allow_origin': None, 'expose_headers': None,
|
||||
'max_age': None}, 'sysmeta': {}, 'read_acl': None,
|
||||
'max_age': None},
|
||||
'sysmeta': {}, 'read_acl': None,
|
||||
'object_count': None, 'write_acl': None, 'versions': None,
|
||||
'bytes': None}
|
||||
info['storage_policy'] = self.policy_to_test
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
import os
|
||||
from textwrap import dedent
|
||||
import unittest
|
||||
from ConfigParser import NoSectionError, NoOptionError
|
||||
|
||||
import mock
|
||||
from six.moves.configparser import NoSectionError, NoOptionError
|
||||
|
||||
from swift.common.middleware import memcache
|
||||
from swift.common.memcached import MemcacheRing
|
||||
|
|
|
@ -101,8 +101,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -114,8 +114,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -136,8 +136,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -147,8 +147,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -158,8 +158,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o3'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -169,7 +169,7 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': 'bad_path'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 412)
|
||||
|
@ -179,8 +179,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
|
||||
req = Request.blank('/v1/a/c2/o3',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -201,8 +201,8 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
'swift.object/a/c2/o2': {'length': 10},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -213,7 +213,7 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
|
@ -227,10 +227,10 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
'status': 200, 'object_count': 1}
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.container/a/c': a_c_cache,
|
||||
'swift.container/a2/c': a2_c_cache},
|
||||
'swift.container/a/c': a_c_cache,
|
||||
'swift.container/a2/c': a2_c_cache},
|
||||
headers={'Destination': '/c/o',
|
||||
'Destination-Account': 'a2'})
|
||||
'Destination-Account': 'a2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
self.assertEquals(res.body, 'Upload exceeds quota.')
|
||||
|
@ -243,10 +243,10 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
'status': 200, 'object_count': 1}
|
||||
req = Request.blank('/v1/a2/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.container/a/c': a_c_cache,
|
||||
'swift.container/a2/c': a2_c_cache},
|
||||
'swift.container/a/c': a_c_cache,
|
||||
'swift.container/a2/c': a2_c_cache},
|
||||
headers={'X-Copy-From': '/c2/o2',
|
||||
'X-Copy-From-Account': 'a'})
|
||||
'X-Copy-From-Account': 'a'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 413)
|
||||
self.assertEquals(res.body, 'Upload exceeds quota.')
|
||||
|
@ -266,7 +266,7 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
|
||||
req = Request.blank('/v1/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'x-copy-from': '/c2/o2'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
@ -276,7 +276,7 @@ class TestContainerQuotas(unittest.TestCase):
|
|||
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
|
||||
req = Request.blank('/v1/a/c2/o2',
|
||||
environ={'REQUEST_METHOD': 'COPY',
|
||||
'swift.cache': cache},
|
||||
'swift.cache': cache},
|
||||
headers={'Destination': '/c/o'})
|
||||
res = req.get_response(app)
|
||||
self.assertEquals(res.status_int, 200)
|
||||
|
|
|
@ -774,11 +774,11 @@ class TestReconSuccess(TestCase):
|
|||
self.assertEquals(rv, unmounted_resp)
|
||||
|
||||
def test_get_diskusage(self):
|
||||
#posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
|
||||
# f_bfree=1113075, f_bavail=1013351,
|
||||
# f_files=498736,
|
||||
# f_ffree=397839, f_favail=397839, f_flag=0,
|
||||
# f_namemax=255)
|
||||
# posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
|
||||
# f_bfree=1113075, f_bavail=1013351,
|
||||
# f_files=498736,
|
||||
# f_ffree=397839, f_favail=397839, f_flag=0,
|
||||
# f_namemax=255)
|
||||
statvfs_content = (4096, 4096, 1963185, 1113075, 1013351, 498736,
|
||||
397839, 397839, 0, 255)
|
||||
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
|
||||
|
@ -815,9 +815,9 @@ class TestReconSuccess(TestCase):
|
|||
self.mockos.ismount_output = True
|
||||
|
||||
def fake_lstat(*args, **kwargs):
|
||||
#posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
|
||||
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
|
||||
# st_mtime=9, st_ctime=10)
|
||||
# posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
|
||||
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
|
||||
# st_mtime=9, st_ctime=10)
|
||||
return stat_result((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
|
||||
|
||||
def fake_exists(*args, **kwargs):
|
||||
|
@ -867,7 +867,7 @@ class TestReconMiddleware(unittest.TestCase):
|
|||
os.listdir = self.fake_list
|
||||
self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"})
|
||||
os.listdir = self.real_listdir
|
||||
#self.app.object_recon = True
|
||||
# self.app.object_recon = True
|
||||
self.app.get_mem = self.frecon.fake_mem
|
||||
self.app.get_load = self.frecon.fake_load
|
||||
self.app.get_async_info = self.frecon.fake_async
|
||||
|
@ -927,21 +927,21 @@ class TestReconMiddleware(unittest.TestCase):
|
|||
|
||||
def test_recon_get_replication_all(self):
|
||||
get_replication_resp = ['{"replicationtest": "1"}']
|
||||
#test account
|
||||
# test account
|
||||
req = Request.blank('/recon/replication/account',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, get_replication_resp)
|
||||
self.assertEquals(self.frecon.fake_replication_rtype, 'account')
|
||||
self.frecon.fake_replication_rtype = None
|
||||
#test container
|
||||
# test container
|
||||
req = Request.blank('/recon/replication/container',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, get_replication_resp)
|
||||
self.assertEquals(self.frecon.fake_replication_rtype, 'container')
|
||||
self.frecon.fake_replication_rtype = None
|
||||
#test object
|
||||
# test object
|
||||
req = Request.blank('/recon/replication/object',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
|
|
|
@ -920,10 +920,11 @@ class TestTempURL(unittest.TestCase):
|
|||
self.assertTrue('swift.auth_scheme' not in environ)
|
||||
|
||||
# Rejected by TempURL
|
||||
environ = {'REQUEST_METHOD': 'PUT',
|
||||
'QUERY_STRING':
|
||||
'temp_url_sig=dummy&temp_url_expires=1234'}
|
||||
req = self._make_request('/v1/a/c/o', keys=['abc'],
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'QUERY_STRING':
|
||||
'temp_url_sig=dummy&temp_url_expires=1234'})
|
||||
environ=environ)
|
||||
resp = req.get_response(self.tempurl)
|
||||
self.assertEquals(resp.status_int, 401)
|
||||
self.assertTrue('Temp URL invalid' in resp.body)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mock
|
|||
import operator
|
||||
import os
|
||||
import unittest
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
from array import array
|
||||
from collections import defaultdict
|
||||
from math import ceil
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import array
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
@ -313,8 +313,8 @@ class TestRing(TestRingBase):
|
|||
|
||||
def test_reload_old_style_pickled_ring(self):
|
||||
devs = [{'id': 0, 'zone': 0,
|
||||
'weight': 1.0, 'ip': '10.1.1.1',
|
||||
'port': 6000},
|
||||
'weight': 1.0, 'ip': '10.1.1.1',
|
||||
'port': 6000},
|
||||
{'id': 1, 'zone': 0,
|
||||
'weight': 1.0, 'ip': '10.1.1.1',
|
||||
'port': 6000},
|
||||
|
@ -382,72 +382,72 @@ class TestRing(TestRingBase):
|
|||
self.assertEquals(part, 0)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a1')
|
||||
self.assertEquals(part, 0)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a4')
|
||||
self.assertEquals(part, 1)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[1],
|
||||
self.intended_devs[4]])])
|
||||
self.intended_devs[4]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('aa')
|
||||
self.assertEquals(part, 1)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[1],
|
||||
self.intended_devs[4]])])
|
||||
self.intended_devs[4]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c1')
|
||||
self.assertEquals(part, 0)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c0')
|
||||
self.assertEquals(part, 3)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[1],
|
||||
self.intended_devs[4]])])
|
||||
self.intended_devs[4]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c3')
|
||||
self.assertEquals(part, 2)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c2')
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c', 'o1')
|
||||
self.assertEquals(part, 1)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[1],
|
||||
self.intended_devs[4]])])
|
||||
self.intended_devs[4]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c', 'o5')
|
||||
self.assertEquals(part, 0)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c', 'o0')
|
||||
self.assertEquals(part, 0)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||
self.assertEquals(part, 2)
|
||||
self.assertEquals(nodes, [dict(node, index=i) for i, node in
|
||||
enumerate([self.intended_devs[0],
|
||||
self.intended_devs[3]])])
|
||||
self.intended_devs[3]])])
|
||||
|
||||
def add_dev_to_ring(self, new_dev):
|
||||
self.ring.devs.append(new_dev)
|
||||
|
|
|
@ -21,7 +21,7 @@ import unittest
|
|||
from tempfile import mkdtemp
|
||||
from shutil import rmtree, copy
|
||||
from uuid import uuid4
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
|
||||
import simplejson
|
||||
import sqlite3
|
||||
|
|
|
@ -14,10 +14,11 @@
|
|||
""" Tests for swift.common.storage_policies """
|
||||
import six
|
||||
import unittest
|
||||
from ConfigParser import ConfigParser
|
||||
import os
|
||||
import mock
|
||||
from functools import partial
|
||||
|
||||
from six.moves.configparser import ConfigParser
|
||||
from tempfile import NamedTemporaryFile
|
||||
from test.unit import patch_policies, FakeRing, temptree
|
||||
from swift.common.storage_policy import (
|
||||
|
|
|
@ -28,14 +28,15 @@ import os
|
|||
import mock
|
||||
import random
|
||||
import re
|
||||
from six import StringIO
|
||||
from six.moves import range
|
||||
import socket
|
||||
import stat
|
||||
import sys
|
||||
import json
|
||||
import math
|
||||
|
||||
from six import StringIO
|
||||
from six.moves.queue import Queue, Empty
|
||||
from six.moves import range
|
||||
from textwrap import dedent
|
||||
|
||||
import tempfile
|
||||
|
@ -47,7 +48,6 @@ import fcntl
|
|||
import shutil
|
||||
from contextlib import nested
|
||||
|
||||
from Queue import Queue, Empty
|
||||
from getpass import getuser
|
||||
from shutil import rmtree
|
||||
from functools import partial
|
||||
|
@ -1086,7 +1086,7 @@ class TestUtils(unittest.TestCase):
|
|||
self.assertTrue(got_exc)
|
||||
got_exc = False
|
||||
try:
|
||||
for line in lfo.xreadlines():
|
||||
for line in lfo:
|
||||
pass
|
||||
except Exception:
|
||||
got_exc = True
|
||||
|
@ -4298,7 +4298,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
else:
|
||||
return orig_listdir(path)
|
||||
|
||||
#Check Raise on Bad partition
|
||||
# Check Raise on Bad partition
|
||||
tmpdir = mkdtemp()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
|
@ -4315,7 +4315,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
self.assertRaises(OSError, audit)
|
||||
rmtree(tmpdir)
|
||||
|
||||
#Check Raise on Bad Suffix
|
||||
# Check Raise on Bad Suffix
|
||||
tmpdir = mkdtemp()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
|
@ -4334,7 +4334,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
self.assertRaises(OSError, audit)
|
||||
rmtree(tmpdir)
|
||||
|
||||
#Check Raise on Bad Hash
|
||||
# Check Raise on Bad Hash
|
||||
tmpdir = mkdtemp()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
|
@ -4358,14 +4358,14 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
logger = FakeLogger()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
#Create a file, that represents a non-dir drive
|
||||
# Create a file, that represents a non-dir drive
|
||||
open(os.path.join(tmpdir, 'asdf'), 'w')
|
||||
locations = utils.audit_location_generator(
|
||||
tmpdir, "data", mount_check=False, logger=logger
|
||||
)
|
||||
self.assertEqual(list(locations), [])
|
||||
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
|
||||
#Test without the logger
|
||||
# Test without the logger
|
||||
locations = utils.audit_location_generator(
|
||||
tmpdir, "data", mount_check=False
|
||||
)
|
||||
|
@ -4376,7 +4376,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
logger = FakeLogger()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
#Create a file, that represents a non-dir drive
|
||||
# Create a file, that represents a non-dir drive
|
||||
open(os.path.join(tmpdir, 'asdf'), 'w')
|
||||
locations = utils.audit_location_generator(
|
||||
tmpdir, "data", mount_check=True, logger=logger
|
||||
|
@ -4384,7 +4384,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
self.assertEqual(list(locations), [])
|
||||
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
|
||||
|
||||
#Test without the logger
|
||||
# Test without the logger
|
||||
locations = utils.audit_location_generator(
|
||||
tmpdir, "data", mount_check=True
|
||||
)
|
||||
|
@ -4416,7 +4416,7 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
logger = FakeLogger()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
#Create a file, that represents a non-dir drive
|
||||
# Create a file, that represents a non-dir drive
|
||||
open(os.path.join(tmpdir, 'asdf'), 'w')
|
||||
partition = os.path.join(data, "partition1")
|
||||
os.makedirs(partition)
|
||||
|
|
|
@ -1090,9 +1090,10 @@ class TestWorkersStrategy(unittest.TestCase):
|
|||
self.addCleanup(patcher.stop)
|
||||
|
||||
def test_loop_timeout(self):
|
||||
# This strategy should block in the green.os.wait() until a worker
|
||||
# process exits.
|
||||
self.assertEqual(None, self.strategy.loop_timeout())
|
||||
# This strategy should sit in the green.os.wait() for a bit (to avoid
|
||||
# busy-waiting) but not forever (so the keep-running flag actually
|
||||
# gets checked).
|
||||
self.assertEqual(0.5, self.strategy.loop_timeout())
|
||||
|
||||
def test_binding(self):
|
||||
self.assertEqual(None, self.strategy.bind_ports())
|
||||
|
|
|
@ -994,8 +994,8 @@ class TestContainerBroker(unittest.TestCase):
|
|||
'/snakes', Timestamp(0).internal, 0,
|
||||
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
|
||||
|
||||
#def list_objects_iter(self, limit, marker, prefix, delimiter,
|
||||
# path=None, format=None):
|
||||
# def list_objects_iter(self, limit, marker, prefix, delimiter,
|
||||
# path=None, format=None):
|
||||
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
|
||||
self.assertEquals([row[0] for row in listing],
|
||||
['/pets/fish/', '/pets/fish_info.txt'])
|
||||
|
|
|
@ -319,13 +319,13 @@ class TestContainerController(unittest.TestCase):
|
|||
|
||||
def test_PUT(self):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '1'})
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 201)
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '2'})
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 202)
|
||||
|
||||
|
@ -359,14 +359,14 @@ class TestContainerController(unittest.TestCase):
|
|||
with mock.patch("swift.container.server.ContainerBroker",
|
||||
InterceptedCoBr):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '1'})
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201)
|
||||
state[0] = "race"
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '1'})
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 202)
|
||||
|
||||
|
@ -940,9 +940,9 @@ class TestContainerController(unittest.TestCase):
|
|||
snowman = u'\u2603'
|
||||
container_name = snowman.encode('utf-8')
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/%s' % container_name, environ={
|
||||
'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '1'})
|
||||
'/sda1/p/a/%s' % container_name,
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'HTTP_X_TIMESTAMP': '1'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEquals(resp.status_int, 201)
|
||||
|
||||
|
|
|
@ -861,9 +861,10 @@ class TestContainerSync(unittest.TestCase):
|
|||
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
||||
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
|
||||
'0')
|
||||
return (200, {'other-header': 'other header value',
|
||||
'etag': '"etagvalue"', 'x-timestamp': '1.2',
|
||||
'content-type': 'text/plain; swift_bytes=123'},
|
||||
return (200,
|
||||
{'other-header': 'other header value',
|
||||
'etag': '"etagvalue"', 'x-timestamp': '1.2',
|
||||
'content-type': 'text/plain; swift_bytes=123'},
|
||||
iter('contents'))
|
||||
|
||||
cs.swift.get_object = fake_get_object
|
||||
|
@ -881,12 +882,13 @@ class TestContainerSync(unittest.TestCase):
|
|||
self.assertEquals(headers['X-Newest'], True)
|
||||
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
|
||||
'0')
|
||||
return (200, {'date': 'date value',
|
||||
'last-modified': 'last modified value',
|
||||
'x-timestamp': '1.2',
|
||||
'other-header': 'other header value',
|
||||
'etag': '"etagvalue"',
|
||||
'content-type': 'text/plain; swift_bytes=123'},
|
||||
return (200,
|
||||
{'date': 'date value',
|
||||
'last-modified': 'last modified value',
|
||||
'x-timestamp': '1.2',
|
||||
'other-header': 'other header value',
|
||||
'etag': '"etagvalue"',
|
||||
'content-type': 'text/plain; swift_bytes=123'},
|
||||
iter('contents'))
|
||||
|
||||
cs.swift.get_object = fake_get_object
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import mock
|
||||
import os
|
||||
import unittest
|
||||
|
|
|
@ -22,12 +22,11 @@ import string
|
|||
from shutil import rmtree
|
||||
from hashlib import md5
|
||||
from tempfile import mkdtemp
|
||||
from test.unit import FakeLogger, patch_policies
|
||||
from test.unit import FakeLogger, patch_policies, make_timestamp_iter
|
||||
from swift.obj import auditor
|
||||
from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \
|
||||
get_data_dir, DiskFileManager, AuditLocation
|
||||
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
|
||||
storage_directory
|
||||
from swift.common.utils import mkdirs, normalize_timestamp, Timestamp
|
||||
from swift.common.storage_policy import StoragePolicy, POLICIES
|
||||
|
||||
|
||||
|
@ -432,28 +431,17 @@ class TestAuditor(unittest.TestCase):
|
|||
self.auditor.run_audit(**kwargs)
|
||||
self.assertTrue(os.path.isdir(quarantine_path))
|
||||
|
||||
def setup_bad_zero_byte(self, with_ts=False):
|
||||
def setup_bad_zero_byte(self, timestamp=None):
|
||||
if timestamp is None:
|
||||
timestamp = Timestamp(time.time())
|
||||
self.auditor = auditor.ObjectAuditor(self.conf)
|
||||
self.auditor.log_time = 0
|
||||
ts_file_path = ''
|
||||
if with_ts:
|
||||
name_hash = hash_path('a', 'c', 'o')
|
||||
dir_path = os.path.join(
|
||||
self.devices, 'sda',
|
||||
storage_directory(get_data_dir(POLICIES[0]), '0', name_hash))
|
||||
ts_file_path = os.path.join(dir_path, '99999.ts')
|
||||
if not os.path.exists(dir_path):
|
||||
mkdirs(dir_path)
|
||||
fp = open(ts_file_path, 'w')
|
||||
write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
|
||||
fp.close()
|
||||
|
||||
etag = md5()
|
||||
with self.disk_file.create() as writer:
|
||||
etag = etag.hexdigest()
|
||||
metadata = {
|
||||
'ETag': etag,
|
||||
'X-Timestamp': str(normalize_timestamp(time.time())),
|
||||
'X-Timestamp': timestamp.internal,
|
||||
'Content-Length': 10,
|
||||
}
|
||||
writer.put(metadata)
|
||||
|
@ -461,7 +449,6 @@ class TestAuditor(unittest.TestCase):
|
|||
etag = etag.hexdigest()
|
||||
metadata['ETag'] = etag
|
||||
write_metadata(writer._fd, metadata)
|
||||
return ts_file_path
|
||||
|
||||
def test_object_run_fast_track_all(self):
|
||||
self.setup_bad_zero_byte()
|
||||
|
@ -512,12 +499,36 @@ class TestAuditor(unittest.TestCase):
|
|||
self.auditor = auditor.ObjectAuditor(self.conf)
|
||||
self.assertRaises(SystemExit, self.auditor.fork_child, self)
|
||||
|
||||
def test_with_tombstone(self):
|
||||
ts_file_path = self.setup_bad_zero_byte(with_ts=True)
|
||||
self.assertTrue(ts_file_path.endswith('ts'))
|
||||
def test_with_only_tombstone(self):
|
||||
# sanity check that auditor doesn't touch solitary tombstones
|
||||
ts_iter = make_timestamp_iter()
|
||||
self.setup_bad_zero_byte(timestamp=ts_iter.next())
|
||||
self.disk_file.delete(ts_iter.next())
|
||||
files = os.listdir(self.disk_file._datadir)
|
||||
self.assertEqual(1, len(files))
|
||||
self.assertTrue(files[0].endswith('ts'))
|
||||
kwargs = {'mode': 'once'}
|
||||
self.auditor.run_audit(**kwargs)
|
||||
self.assertTrue(os.path.exists(ts_file_path))
|
||||
files_after = os.listdir(self.disk_file._datadir)
|
||||
self.assertEqual(files, files_after)
|
||||
|
||||
def test_with_tombstone_and_data(self):
|
||||
# rsync replication could leave a tombstone and data file in object
|
||||
# dir - verify they are both removed during audit
|
||||
ts_iter = make_timestamp_iter()
|
||||
ts_tomb = ts_iter.next()
|
||||
ts_data = ts_iter.next()
|
||||
self.setup_bad_zero_byte(timestamp=ts_data)
|
||||
tomb_file_path = os.path.join(self.disk_file._datadir,
|
||||
'%s.ts' % ts_tomb.internal)
|
||||
with open(tomb_file_path, 'wb') as fd:
|
||||
write_metadata(fd, {'X-Timestamp': ts_tomb.internal})
|
||||
files = os.listdir(self.disk_file._datadir)
|
||||
self.assertEqual(2, len(files))
|
||||
self.assertTrue(os.path.basename(tomb_file_path) in files, files)
|
||||
kwargs = {'mode': 'once'}
|
||||
self.auditor.run_audit(**kwargs)
|
||||
self.assertFalse(os.path.exists(self.disk_file._datadir))
|
||||
|
||||
def test_sleeper(self):
|
||||
with mock.patch(
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
"""Tests for swift.obj.diskfile"""
|
||||
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import os
|
||||
import errno
|
||||
import itertools
|
||||
|
@ -512,7 +512,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
|||
chosen = dict((f[1], os.path.join(class_under_test._datadir, f[0]))
|
||||
for f in test if f[1])
|
||||
expected = tuple(chosen.get(ext) for ext in returned_ext_order)
|
||||
files = list(zip(*test)[0])
|
||||
# list(zip(...)) for py3 compatibility (zip is lazy there)
|
||||
files = list(list(zip(*test))[0])
|
||||
|
||||
for _order in ('ordered', 'shuffled', 'shuffled'):
|
||||
class_under_test = self._get_diskfile(policy, frag_index)
|
||||
try:
|
||||
|
@ -531,7 +533,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
|||
# check that expected files are left in hashdir after cleanup
|
||||
for test in scenarios:
|
||||
class_under_test = self.df_router[policy]
|
||||
files = list(zip(*test)[0])
|
||||
# list(zip(...)) for py3 compatibility (zip is lazy there)
|
||||
files = list(list(zip(*test))[0])
|
||||
hashdir = os.path.join(self.testdir, str(uuid.uuid4()))
|
||||
os.mkdir(hashdir)
|
||||
for fname in files:
|
||||
|
@ -557,7 +560,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
|||
# same scenarios as passed to _test_hash_cleanup_listdir_files
|
||||
for test in scenarios:
|
||||
class_under_test = self.df_router[policy]
|
||||
files = list(zip(*test)[0])
|
||||
# list(zip(...)) for py3 compatibility (zip is lazy there)
|
||||
files = list(list(zip(*test))[0])
|
||||
dev_path = os.path.join(self.testdir, str(uuid.uuid4()))
|
||||
hashdir = os.path.join(
|
||||
dev_path, diskfile.get_data_dir(policy),
|
||||
|
@ -570,8 +574,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
|||
with mock.patch('swift.obj.diskfile.time') as mock_time:
|
||||
# don't reclaim anything
|
||||
mock_time.time.return_value = 0.0
|
||||
mock_func = 'swift.obj.diskfile.DiskFileManager.get_dev_path'
|
||||
with mock.patch(mock_func) as mock_path:
|
||||
mocked = 'swift.obj.diskfile.BaseDiskFileManager.get_dev_path'
|
||||
with mock.patch(mocked) as mock_path:
|
||||
mock_path.return_value = dev_path
|
||||
for _ in class_under_test.yield_hashes(
|
||||
'ignored', '0', policy, suffixes=['abc']):
|
||||
|
@ -1015,6 +1019,39 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
|
|||
class_under_test.manager.get_ondisk_files, files,
|
||||
self.testdir)
|
||||
|
||||
def test_hash_cleanup_listdir_reclaim(self):
|
||||
# Each scenario specifies a list of (filename, extension, [survives])
|
||||
# tuples. If extension is set or 'survives' is True, the filename
|
||||
# should still be in the dir after cleanup.
|
||||
much_older = Timestamp(time() - 2000).internal
|
||||
older = Timestamp(time() - 1001).internal
|
||||
newer = Timestamp(time() - 900).internal
|
||||
scenarios = [[('%s.ts' % older, False, False)],
|
||||
|
||||
# fresh tombstone is preserved
|
||||
[('%s.ts' % newer, '.ts', True)],
|
||||
|
||||
# .data files are not reclaimed, ever
|
||||
[('%s.data' % older, '.data', True)],
|
||||
[('%s.data' % newer, '.data', True)],
|
||||
|
||||
# ... and we could have a mixture of fresh and stale .data
|
||||
[('%s.data' % newer, '.data', True),
|
||||
('%s.data' % older, False, False)],
|
||||
|
||||
# tombstone reclaimed despite newer data
|
||||
[('%s.data' % newer, '.data', True),
|
||||
('%s.data' % older, False, False),
|
||||
('%s.ts' % much_older, '.ts', False)],
|
||||
|
||||
# tombstone reclaimed despite junk file
|
||||
[('junk', False, True),
|
||||
('%s.ts' % much_older, '.ts', False)],
|
||||
]
|
||||
|
||||
self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default,
|
||||
reclaim_age=1000)
|
||||
|
||||
def test_yield_hashes(self):
|
||||
old_ts = '1383180000.12345'
|
||||
fresh_ts = Timestamp(time() - 10).internal
|
||||
|
@ -1279,7 +1316,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
|
|||
|
||||
# ...even when other older files are in dir
|
||||
[('%s.durable' % older, False, False),
|
||||
('%s.ts' % much_older, False, False)],
|
||||
('%s.ts' % much_older, False, False)],
|
||||
|
||||
# isolated .data files are cleaned up when stale
|
||||
[('%s#2.data' % older, False, False),
|
||||
|
@ -1300,16 +1337,12 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
|
|||
[('%s#2.data' % newer, False, True),
|
||||
('%s#4.data' % older, False, False)],
|
||||
|
||||
# TODO these remaining scenarios exhibit different
|
||||
# behavior than the legacy replication DiskFileManager
|
||||
# behavior...
|
||||
|
||||
# tombstone reclaimed despite newer non-durable data
|
||||
[('%s#2.data' % newer, False, True),
|
||||
('%s#4.data' % older, False, False),
|
||||
('%s.ts' % much_older, '.ts', False)],
|
||||
|
||||
# tombstone reclaimed despite newer non-durable data
|
||||
# tombstone reclaimed despite much older durable
|
||||
[('%s.ts' % older, '.ts', False),
|
||||
('%s.durable' % much_older, False, False)],
|
||||
|
||||
|
@ -1329,11 +1362,11 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
|
|||
'0000000006.00000.durable'],
|
||||
|
||||
['0000000007.00000.meta',
|
||||
'0000000006.00000#1.data'],
|
||||
'0000000006.00000#1.data'],
|
||||
|
||||
['0000000007.00000.meta',
|
||||
'0000000006.00000.durable',
|
||||
'0000000005.00000#1.data']
|
||||
'0000000006.00000.durable',
|
||||
'0000000005.00000#1.data']
|
||||
]
|
||||
for files in scenarios:
|
||||
class_under_test = self._get_diskfile(POLICIES.default)
|
||||
|
@ -4015,14 +4048,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
for policy in self.iter_policies():
|
||||
file1, file2 = [self.ts().internal + '.meta' for i in range(2)]
|
||||
file_list = [file1, file2]
|
||||
if policy.policy_type == EC_POLICY:
|
||||
# EC policy does tolerate only .meta's in dir when cleaning up
|
||||
expected = [file2]
|
||||
else:
|
||||
# the get_ondisk_files contract validation doesn't allow a
|
||||
# directory with only .meta files
|
||||
expected = AssertionError()
|
||||
self.check_hash_cleanup_listdir(policy, file_list, expected)
|
||||
self.check_hash_cleanup_listdir(policy, file_list, [file2])
|
||||
|
||||
def test_hash_cleanup_listdir_ignore_orphaned_ts(self):
|
||||
for policy in self.iter_policies():
|
||||
|
@ -4056,13 +4082,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
file1 = Timestamp(old_float).internal + '.ts'
|
||||
file2 = Timestamp(time() + 2).internal + '.meta'
|
||||
file_list = [file1, file2]
|
||||
if policy.policy_type == EC_POLICY:
|
||||
# EC will clean up old .ts despite a .meta
|
||||
expected = [file2]
|
||||
else:
|
||||
# An orphaned .meta will not clean up a very old .ts
|
||||
expected = [file2, file1]
|
||||
self.check_hash_cleanup_listdir(policy, file_list, expected)
|
||||
self.check_hash_cleanup_listdir(policy, file_list, [file2])
|
||||
|
||||
def test_hash_cleanup_listdir_keep_single_old_data(self):
|
||||
for policy in self.iter_policies():
|
||||
|
@ -4127,13 +4147,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
file1 = self._datafilename(Timestamp(1), policy)
|
||||
file2 = '0000000002.00000.ts'
|
||||
file_list = [file1, file2]
|
||||
if policy.policy_type == EC_POLICY:
|
||||
# the .ts gets reclaimed up despite failed .data delete
|
||||
expected = []
|
||||
else:
|
||||
# the .ts isn't reclaimed because there were two files in dir
|
||||
expected = [file2]
|
||||
self.check_hash_cleanup_listdir(policy, file_list, expected)
|
||||
self.check_hash_cleanup_listdir(policy, file_list, [])
|
||||
|
||||
# invalidate_hash tests - behavior
|
||||
|
||||
|
@ -4237,14 +4251,12 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
old_time = time() - 1001
|
||||
timestamp = Timestamp(old_time)
|
||||
df.delete(timestamp.internal)
|
||||
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
|
||||
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
|
||||
expected = {
|
||||
# repl is broken, it doesn't use self.reclaim_age
|
||||
REPL_POLICY: tombstone_hash,
|
||||
EC_POLICY: {},
|
||||
REPL_POLICY: {suffix: EMPTY_ETAG},
|
||||
EC_POLICY: {suffix: {}},
|
||||
}[policy.policy_type]
|
||||
self.assertEqual(hashes, {suffix: expected})
|
||||
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
|
||||
self.assertEqual(hashes, expected)
|
||||
|
||||
def test_hash_suffix_one_datafile(self):
|
||||
for policy in self.iter_policies():
|
||||
|
|
|
@ -17,14 +17,14 @@ import unittest
|
|||
import os
|
||||
from hashlib import md5
|
||||
import mock
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import tempfile
|
||||
import time
|
||||
import shutil
|
||||
import re
|
||||
import random
|
||||
import struct
|
||||
from eventlet import Timeout
|
||||
from eventlet import Timeout, sleep
|
||||
|
||||
from contextlib import closing, nested, contextmanager
|
||||
from gzip import GzipFile
|
||||
|
@ -599,10 +599,74 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
|
|||
self.assertFalse(jobs) # that should be all of them
|
||||
check_jobs(part_num)
|
||||
|
||||
def test_run_once(self):
|
||||
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
|
||||
def _run_once(self, http_count, extra_devices, override_devices=None):
|
||||
ring_devs = list(self.policy.object_ring.devs)
|
||||
for device, parts in extra_devices.items():
|
||||
device_path = os.path.join(self.devices, device)
|
||||
os.mkdir(device_path)
|
||||
for part in range(parts):
|
||||
os.makedirs(os.path.join(device_path, 'objects-1', str(part)))
|
||||
# we update the ring to make is_local happy
|
||||
devs = [dict(d) for d in ring_devs]
|
||||
for d in devs:
|
||||
d['device'] = device
|
||||
self.policy.object_ring.devs.extend(devs)
|
||||
self.reconstructor.stats_interval = 0
|
||||
self.process_job = lambda j: sleep(0)
|
||||
with mocked_http_conn(*[200] * http_count, body=pickle.dumps({})):
|
||||
with mock_ssync_sender():
|
||||
self.reconstructor.run_once()
|
||||
self.reconstructor.run_once(devices=override_devices)
|
||||
|
||||
def test_run_once(self):
|
||||
# sda1: 3 is done in setup
|
||||
extra_devices = {
|
||||
'sdb1': 4,
|
||||
'sdc1': 1,
|
||||
'sdd1': 0,
|
||||
}
|
||||
self._run_once(18, extra_devices)
|
||||
stats_lines = set()
|
||||
for line in self.logger.get_lines_for_level('info'):
|
||||
if 'devices reconstructed in' not in line:
|
||||
continue
|
||||
stat_line = line.split('of', 1)[0].strip()
|
||||
stats_lines.add(stat_line)
|
||||
acceptable = set([
|
||||
'0/3 (0.00%) partitions',
|
||||
'8/8 (100.00%) partitions',
|
||||
])
|
||||
matched = stats_lines & acceptable
|
||||
self.assertEqual(matched, acceptable,
|
||||
'missing some expected acceptable:\n%s' % (
|
||||
'\n'.join(sorted(acceptable - matched))))
|
||||
self.assertEqual(self.reconstructor.reconstruction_device_count, 4)
|
||||
self.assertEqual(self.reconstructor.reconstruction_part_count, 8)
|
||||
self.assertEqual(self.reconstructor.part_count, 8)
|
||||
|
||||
def test_run_once_override_devices(self):
|
||||
# sda1: 3 is done in setup
|
||||
extra_devices = {
|
||||
'sdb1': 4,
|
||||
'sdc1': 1,
|
||||
'sdd1': 0,
|
||||
}
|
||||
self._run_once(2, extra_devices, 'sdc1')
|
||||
stats_lines = set()
|
||||
for line in self.logger.get_lines_for_level('info'):
|
||||
if 'devices reconstructed in' not in line:
|
||||
continue
|
||||
stat_line = line.split('of', 1)[0].strip()
|
||||
stats_lines.add(stat_line)
|
||||
acceptable = set([
|
||||
'1/1 (100.00%) partitions',
|
||||
])
|
||||
matched = stats_lines & acceptable
|
||||
self.assertEqual(matched, acceptable,
|
||||
'missing some expected acceptable:\n%s' % (
|
||||
'\n'.join(sorted(acceptable - matched))))
|
||||
self.assertEqual(self.reconstructor.reconstruction_device_count, 1)
|
||||
self.assertEqual(self.reconstructor.reconstruction_part_count, 1)
|
||||
self.assertEqual(self.reconstructor.part_count, 1)
|
||||
|
||||
def test_get_response(self):
|
||||
part = self.part_nums[0]
|
||||
|
@ -621,6 +685,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
|
|||
|
||||
def test_reconstructor_skips_bogus_partition_dirs(self):
|
||||
# A directory in the wrong place shouldn't crash the reconstructor
|
||||
self.reconstructor._reset_stats()
|
||||
rmtree(self.objects_1)
|
||||
os.mkdir(self.objects_1)
|
||||
|
||||
|
@ -699,6 +764,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
|
|||
self.assertEqual(expected_partners, sorted(got_partners))
|
||||
|
||||
def test_collect_parts(self):
|
||||
self.reconstructor._reset_stats()
|
||||
parts = []
|
||||
for part_info in self.reconstructor.collect_parts():
|
||||
parts.append(part_info['partition'])
|
||||
|
@ -709,6 +775,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
|
|||
def blowup_mkdirs(path):
|
||||
raise OSError('Ow!')
|
||||
|
||||
self.reconstructor._reset_stats()
|
||||
with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
|
||||
rmtree(self.objects_1, ignore_errors=1)
|
||||
parts = []
|
||||
|
@ -734,6 +801,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
|
|||
# since our collect_parts job is a generator, that yields directly
|
||||
# into build_jobs and then spawns it's safe to do the remove_files
|
||||
# without making reconstructor startup slow
|
||||
self.reconstructor._reset_stats()
|
||||
for part_info in self.reconstructor.collect_parts():
|
||||
self.assertNotEqual(pol_1_part_1_path, part_info['part_path'])
|
||||
self.assertFalse(os.path.exists(pol_1_part_1_path))
|
||||
|
@ -1033,6 +1101,7 @@ class TestObjectReconstructor(unittest.TestCase):
|
|||
self.reconstructor.job_count = 1
|
||||
|
||||
def tearDown(self):
|
||||
self.reconstructor._reset_stats()
|
||||
self.reconstructor.stats_line()
|
||||
shutil.rmtree(self.testdir)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import os
|
|||
import mock
|
||||
from gzip import GzipFile
|
||||
from shutil import rmtree
|
||||
import cPickle as pickle
|
||||
import six.moves.cPickle as pickle
|
||||
import time
|
||||
import tempfile
|
||||
from contextlib import contextmanager, closing
|
||||
|
@ -1282,7 +1282,7 @@ class TestObjectReplicator(unittest.TestCase):
|
|||
mount_check='false', timeout='300', stats_interval='1')
|
||||
replicator = object_replicator.ObjectReplicator(conf)
|
||||
was_connector = object_replicator.http_connect
|
||||
was_get_hashes = object_replicator.get_hashes
|
||||
was_get_hashes = object_replicator.DiskFileManager._get_hashes
|
||||
was_execute = tpool.execute
|
||||
self.get_hash_count = 0
|
||||
try:
|
||||
|
@ -1300,7 +1300,7 @@ class TestObjectReplicator(unittest.TestCase):
|
|||
|
||||
self.i_failed = False
|
||||
object_replicator.http_connect = mock_http_connect(200)
|
||||
object_replicator.get_hashes = fake_get_hashes
|
||||
object_replicator.DiskFileManager._get_hashes = fake_get_hashes
|
||||
replicator.logger.exception = \
|
||||
lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
|
||||
# Write some files into '1' and run replicate- they should be moved
|
||||
|
@ -1337,7 +1337,7 @@ class TestObjectReplicator(unittest.TestCase):
|
|||
self.assertFalse(self.i_failed)
|
||||
finally:
|
||||
object_replicator.http_connect = was_connector
|
||||
object_replicator.get_hashes = was_get_hashes
|
||||
object_replicator.DiskFileManager._get_hashes = was_get_hashes
|
||||
tpool.execute = was_execute
|
||||
|
||||
def test_run(self):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue