Merge remote-tracking branch 'gerrit/master' into feature/losf
Change-Id: If9d7c63f3c4c15fbccff31e2b77a6911bb95972a
This commit is contained in:
commit
481f126e6b
149
.zuul.yaml
149
.zuul.yaml
|
@ -6,7 +6,7 @@
|
|||
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
timeout: 2400
|
||||
timeout: 3600
|
||||
vars:
|
||||
tox_environment:
|
||||
TMPDIR: '{{ ansible_env.HOME }}/xfstmp'
|
||||
|
@ -139,30 +139,6 @@
|
|||
vars:
|
||||
tox_envlist: func-ec-py3
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-domain-remap-staticweb-py37
|
||||
parent: swift-tox-func-py37
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 3.7.
|
||||
|
||||
Uses tox with the ``func-domain-remap-staticweb-py3`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-domain-remap-staticweb-py3
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-s3api-py37
|
||||
parent: swift-tox-func-py37
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 3.7.
|
||||
|
||||
Uses tox with the ``func-s3api`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-s3api-py3
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-py27-centos-7
|
||||
parent: swift-tox-func-py27
|
||||
|
@ -219,30 +195,6 @@
|
|||
parent: swift-tox-func-ec-py27
|
||||
nodeset: centos-7
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-domain-remap-staticweb-py27
|
||||
parent: swift-tox-base
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 2.7.
|
||||
|
||||
Uses tox with the ``func-domain-remap-staticweb`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-domain-remap-staticweb
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-s3api-py27
|
||||
parent: swift-tox-base
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 2.7.
|
||||
|
||||
Uses tox with the ``func-s3api`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-s3api
|
||||
|
||||
- job:
|
||||
name: swift-dsvm-functional
|
||||
parent: devstack-minimal
|
||||
|
@ -256,16 +208,12 @@
|
|||
override-checkout: master
|
||||
- name: opendev.org/openstack/devstack
|
||||
override-checkout: master
|
||||
timeout: 2700
|
||||
timeout: 3600
|
||||
vars:
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt'
|
||||
# This tox env get run twice; once for Keystone and once for tempauth
|
||||
tox_envlist: func
|
||||
devstack_localrc:
|
||||
# Other services are fine to run py3
|
||||
USE_PYTHON3: true
|
||||
# explicitly state that we want to test swift under py2
|
||||
DISABLED_PYTHON3_PACKAGES: 'swift'
|
||||
SWIFT_HASH: changeme
|
||||
# We don't need multiple replicas to run purely functional tests.
|
||||
# In fact, devstack special cases some things when there's only
|
||||
|
@ -279,25 +227,12 @@
|
|||
devstack_services:
|
||||
keystone: true
|
||||
swift: true
|
||||
s3api: true
|
||||
zuul_work_dir: src/opendev.org/openstack/swift
|
||||
pre-run: tools/playbooks/dsvm/pre.yaml
|
||||
run: tools/playbooks/dsvm/run.yaml
|
||||
post-run: tools/playbooks/dsvm/post.yaml
|
||||
|
||||
- job:
|
||||
name: swift-dsvm-functional-py3
|
||||
parent: swift-dsvm-functional
|
||||
description: |
|
||||
Setup a Swift/Keystone environment under py3 and run Swift's func tests
|
||||
(also under py3).
|
||||
vars:
|
||||
# This tox env get run twice; once for Keystone and once for tempauth
|
||||
tox_envlist: func-py3
|
||||
devstack_localrc:
|
||||
USE_PYTHON3: true
|
||||
# explicitly clear swift's default-disabled status
|
||||
DISABLED_PYTHON3_PACKAGES: ''
|
||||
|
||||
- job:
|
||||
name: swift-dsvm-functional-ipv6
|
||||
parent: swift-dsvm-functional
|
||||
|
@ -313,7 +248,7 @@
|
|||
nodeset: centos-7
|
||||
description: |
|
||||
Setup a SAIO dev environment and run ceph-s3tests
|
||||
timeout: 2400
|
||||
timeout: 3600
|
||||
pre-run:
|
||||
- tools/playbooks/common/install_dependencies.yaml
|
||||
- tools/playbooks/saio_single_node_setup/setup_saio.yaml
|
||||
|
@ -559,59 +494,39 @@
|
|||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-func-encryption-py27:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-domain-remap-staticweb-py27:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-func-ec-py27:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-s3api-py27:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-func-losf-py27:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
|
||||
# py3 functional tests
|
||||
- swift-tox-func-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-func-encryption-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-domain-remap-staticweb-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-func-ec-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-s3api-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
|
||||
# Other tests
|
||||
- swift-tox-func-s3api-ceph-s3tests-tempauth:
|
||||
|
@ -621,29 +536,24 @@
|
|||
# Also keep doc/s3api -- it holds known failures for these tests
|
||||
- ^doc/(requirements.txt|(manpages|source)/.*)$
|
||||
- ^test/(unit|probe)/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-probetests-centos-7:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|releasenotes)/.*$
|
||||
# Keep doc/saio -- we use those sample configs in the saio playbooks
|
||||
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
|
||||
- ^test/(unit|functional)/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-dsvm-functional:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-dsvm-functional-py3:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-dsvm-functional-ipv6:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-lower-constraints:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
|
@ -657,24 +567,24 @@
|
|||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
# 2.25.0 had a test issue; see https://review.opendev.org/#/c/721518/
|
||||
voting: false
|
||||
- tempest-integrated-object-storage:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- tempest-ipv6-only:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- grenade:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
gate:
|
||||
jobs:
|
||||
# For gate jobs, err towards running more jobs (so, generally avoid
|
||||
|
@ -689,37 +599,28 @@
|
|||
- swift-tox-py38
|
||||
- swift-tox-func-py27
|
||||
- swift-tox-func-encryption-py27
|
||||
- swift-tox-func-domain-remap-staticweb-py27
|
||||
- swift-tox-func-ec-py27
|
||||
- swift-tox-func-s3api-py27
|
||||
- swift-tox-func-losf-py27
|
||||
- swift-tox-func-py37
|
||||
- swift-tox-func-encryption
|
||||
- swift-tox-func-domain-remap-staticweb-py37
|
||||
- swift-tox-func-ec-py37
|
||||
- swift-tox-func-s3api-py37
|
||||
- swift-probetests-centos-7:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|releasenotes)/.*$
|
||||
# Keep doc/saio -- we use those sample configs in the saio playbooks
|
||||
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
|
||||
- ^test/(unit|functional)/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-dsvm-functional:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-dsvm-functional-py3:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-dsvm-functional-ipv6:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- swift-tox-lower-constraints:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
|
@ -733,17 +634,17 @@
|
|||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- tempest-ipv6-only:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
- grenade:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
|
||||
experimental:
|
||||
jobs:
|
||||
- swift-tox-py27-centos-7
|
||||
|
|
11
CHANGELOG
11
CHANGELOG
|
@ -2,12 +2,6 @@ swift (2.25.0, OpenStack Ussuri)
|
|||
|
||||
* WSGI server processes can now notify systemd when they are ready.
|
||||
|
||||
* Added a new middleware that allows users and operators to configure
|
||||
accounts and containers to use RFC-compliant (i.e., double-quoted)
|
||||
ETags. This may be useful when using Swift as an origin for some
|
||||
content delivery networks. For more information, see
|
||||
https://docs.openstack.org/swift/latest/middleware.html#etag-quoter
|
||||
|
||||
* Added `ttfb` (Time to First Byte) and `pid` (Process ID) to the set
|
||||
of available proxy-server log fields. For more information, see
|
||||
https://docs.openstack.org/swift/latest/logs.html
|
||||
|
@ -58,8 +52,9 @@ swift (2.24.0)
|
|||
* Added support for S3 versioning using the above new mode.
|
||||
|
||||
* Added a new middleware to allow accounts and containers to opt-in to
|
||||
RFC-compliant ETags. For more information, see the documentation at
|
||||
https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.etag_quoter
|
||||
RFC-compliant ETags. This may be useful when using Swift as an origin
|
||||
for some content delivery networks. For more information, see the
|
||||
documentation at https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.etag_quoter
|
||||
Clients should be aware of the fact that ETags may be quoted for RFC
|
||||
compliance; this may become the default behavior in some future release.
|
||||
|
||||
|
|
|
@ -21,6 +21,16 @@ Swift is nothing without the community behind it. We'd love to welcome you to
|
|||
our community. Come find us in #openstack-swift on freenode IRC or on the
|
||||
OpenStack dev mailing list.
|
||||
|
||||
For general information on contributing to OpenStack, please check out the
|
||||
`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
|
||||
It covers all the basics that are common to all OpenStack projects: the accounts
|
||||
you need, the basics of interacting with our Gerrit review system, how we
|
||||
communicate as a community, etc.
|
||||
|
||||
If you want more Swift related project documentation make sure you checkout
|
||||
the Swift developer (contributor) documentation at
|
||||
https://docs.openstack.org/swift/latest/
|
||||
|
||||
Filing a Bug
|
||||
~~~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -369,7 +369,7 @@ Endeavor to leave a positive or negative score on every change you review.
|
|||
Use your best judgment.
|
||||
|
||||
A note on Swift Core Maintainers
|
||||
================================
|
||||
--------------------------------
|
||||
|
||||
Swift Core maintainers may provide positive reviews scores that *look*
|
||||
different from your reviews - a "+2" instead of a "+1".
|
||||
|
|
|
@ -25,21 +25,18 @@
|
|||
|
||||
import datetime
|
||||
import os
|
||||
from swift import __version__
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
import openstackdocstheme
|
||||
|
||||
html_theme = 'openstackdocs'
|
||||
html_theme_path = [openstackdocstheme.get_html_theme_path()]
|
||||
html_theme_options = {
|
||||
"sidebar_mode": "toc",
|
||||
}
|
||||
|
||||
extensions = [
|
||||
'os_api_ref',
|
||||
'openstackdocstheme'
|
||||
]
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
|
@ -68,19 +65,6 @@ master_doc = 'index'
|
|||
project = u'Object Storage API Reference'
|
||||
copyright = u'2010-present, OpenStack Foundation'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = __version__.rsplit('.', 1)[0]
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = __version__
|
||||
|
||||
# html_context allows us to pass arbitrary values into the html template
|
||||
html_context = {'bug_tag': 'api-ref',
|
||||
'bug_project': 'swift'}
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
|
@ -108,7 +92,12 @@ add_module_names = False
|
|||
show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = 'native'
|
||||
|
||||
# openstackdocstheme options
|
||||
openstackdocs_repo_name = 'openstack/swift'
|
||||
openstackdocs_bug_project = 'swift'
|
||||
openstackdocs_bug_tag = 'api-ref'
|
||||
|
||||
# -- Options for man page output ----------------------------------------------
|
||||
|
||||
|
@ -152,25 +141,6 @@ pygments_style = 'sphinx'
|
|||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
# html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
if 'SOURCE_DATE_EPOCH' in os.environ:
|
||||
now = float(os.environ.get('SOURCE_DATE_EPOCH'))
|
||||
html_last_updated_fmt = datetime.datetime.utcfromtimestamp(now).isoformat()
|
||||
else:
|
||||
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'",
|
||||
"--date=local", "-n1"]
|
||||
try:
|
||||
html_last_updated_fmt = subprocess.Popen(
|
||||
git_cmd, stdout=subprocess.PIPE).communicate()[0]
|
||||
except OSError:
|
||||
warnings.warn('Cannot get last updated time from git repository. '
|
||||
'Not setting "html_last_updated_fmt".')
|
||||
else:
|
||||
if not isinstance(html_last_updated_fmt, str):
|
||||
# for py3
|
||||
html_last_updated_fmt = html_last_updated_fmt.decode('ascii')
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
|
|
@ -52,6 +52,8 @@ if __name__ == '__main__':
|
|||
help='Specify which policy to use')
|
||||
parser.add_option('-d', '--swift-dir', default='/etc/swift',
|
||||
dest='swift_dir', help='Path to swift directory')
|
||||
parser.add_option('-Q', '--quoted', action='store_true',
|
||||
help='Assume swift paths are quoted')
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if set_swift_dir(options.swift_dir):
|
||||
|
|
|
@ -28,6 +28,8 @@ if __name__ == '__main__':
|
|||
dest='swift_dir', help='Path to swift directory')
|
||||
parser.add_argument('--devices', default='/srv/node',
|
||||
dest='devices', help='Path to swift device directory')
|
||||
parser.add_argument('--device', default=None, dest='device',
|
||||
help='Device name to relink (default: all)')
|
||||
parser.add_argument('--skip-mount-check', default=False,
|
||||
help='Don\'t test if disk is mounted',
|
||||
action="store_true", dest='skip_mount_check')
|
||||
|
|
|
@ -2,10 +2,9 @@
|
|||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
# this is required for the docs build jobs
|
||||
sphinx>=1.6.2,<2.0.0;python_version=='2.7' # BSD
|
||||
sphinx>=1.6.2;python_version>='3.4' # BSD
|
||||
openstackdocstheme>=1.30.0 # Apache-2.0
|
||||
reno>=1.8.0 # Apache-2.0
|
||||
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||
reno>=3.1.0 # Apache-2.0
|
||||
os-api-ref>=1.0.0 # Apache-2.0
|
||||
python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
|
||||
|
|
|
@ -80,6 +80,9 @@ use = egg:swift#copy
|
|||
[filter:listing_formats]
|
||||
use = egg:swift#listing_formats
|
||||
|
||||
[filter:domain_remap]
|
||||
use = egg:swift#domain_remap
|
||||
|
||||
[filter:symlink]
|
||||
use = egg:swift#symlink
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
import datetime
|
||||
import logging
|
||||
import os
|
||||
from swift import __version__
|
||||
import sys
|
||||
|
||||
# NOTE(amotoki): Our current doc build job uses an older version of
|
||||
|
@ -80,15 +79,6 @@ else:
|
|||
now = datetime.date.today()
|
||||
copyright = u'%d, OpenStack Foundation' % now.year
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = __version__.rsplit('.', 1)[0]
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = __version__
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
# language = None
|
||||
|
@ -122,7 +112,7 @@ exclude_trees = []
|
|||
show_authors = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = 'native'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
modindex_common_prefix = ['swift.']
|
||||
|
@ -172,11 +162,6 @@ html_theme_options = {
|
|||
# robots.txt.
|
||||
html_extra_path = ['_extra']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
@ -248,6 +233,8 @@ latex_documents = [
|
|||
latex_use_xindy = False
|
||||
|
||||
# -- Options for openstackdocstheme -------------------------------------------
|
||||
repository_name = 'openstack/swift'
|
||||
bug_project = 'swift'
|
||||
bug_tag = ''
|
||||
openstackdocs_repo_name = 'openstack/swift'
|
||||
openstackdocs_pdf_link = True
|
||||
openstackdocs_auto_name = False
|
||||
openstackdocs_bug_project = 'swift'
|
||||
openstackdocs_bug_tag = ''
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
.. include:: ../../../CONTRIBUTING.rst
|
||||
|
||||
Community
|
||||
=========
|
||||
|
||||
Communication
|
||||
-------------
|
||||
IRC
|
||||
People working on the Swift project may be found in the
|
||||
``#openstack-swift`` channel on Freenode during working hours
|
||||
in their timezone. The channel is logged, so if you ask a question
|
||||
when no one is around, you can check the log to see if it's been
|
||||
answered: http://eavesdrop.openstack.org/irclogs/%23openstack-swift/
|
||||
|
||||
weekly meeting
|
||||
This is a Swift team meeting. The discussion in this meeting is about
|
||||
all things related to the Swift project:
|
||||
|
||||
- time: http://eavesdrop.openstack.org/#Swift_Team_Meeting
|
||||
- agenda: https://wiki.openstack.org/wiki/Meetings/Swift
|
||||
|
||||
mailing list
|
||||
We use the openstack-discuss@lists.openstack.org mailing list for
|
||||
asynchronous discussions or to communicate with other OpenStack teams.
|
||||
Use the prefix ``[swift]`` in your subject line (it's a high-volume
|
||||
list, so most people use email filters).
|
||||
|
||||
More information about the mailing list, including how to subscribe
|
||||
and read the archives, can be found at:
|
||||
http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
|
||||
|
||||
Contacting the Core Team
|
||||
------------------------
|
||||
|
||||
The swift-core team is an active group of contributors who are responsible
|
||||
for directing and maintaining the Swift project. As a new contributor, your
|
||||
interaction with this group will be mostly through code reviews, because
|
||||
only members of swift-core can approve a code change to be merged into the
|
||||
code repository. But the swift-core team also spend time on IRC so feel
|
||||
free to drop in to ask questions or just to meet us.
|
||||
|
||||
.. note::
|
||||
Although your contribution will require reviews by members of
|
||||
swift-core, these aren't the only people whose reviews matter.
|
||||
Anyone with a gerrit account can post reviews, so you can ask
|
||||
other developers you know to review your code ... and you can
|
||||
review theirs. (A good way to learn your way around the codebase
|
||||
is to review other people's patches.)
|
||||
|
||||
If you're thinking, "I'm new at this, how can I possibly provide
|
||||
a helpful review?", take a look at `How to Review Changes the
|
||||
OpenStack Way
|
||||
<https://docs.openstack.org/project-team-guide/review-the-openstack-way.html>`_.
|
||||
|
||||
Or for more specifically in a Swift context read :doc:`review_guidelines`
|
||||
|
||||
You can learn more about the role of core reviewers in the OpenStack
|
||||
governance documentation:
|
||||
https://docs.openstack.org/contributors/common/governance.html#core-reviewer
|
||||
|
||||
The membership list of swift-core is maintained in gerrit:
|
||||
https://review.opendev.org/#/admin/groups/24,members
|
||||
|
||||
You can also find the members of the swift-core team at the Swift weekly
|
||||
meetings.
|
||||
|
||||
Getting Your Patch Merged
|
||||
-------------------------
|
||||
Understanding how reviewers review and what they look for will help getting
|
||||
your code merged. See `Swift Review Guidelines <contributor/review_guidelines>`_
|
||||
for how we review code.
|
||||
|
||||
Keep in mind that reviewers are also human; if something feels stalled, then
|
||||
come and poke us on IRC or add it to our meeting agenda.
|
||||
|
||||
Project Team Lead Duties
|
||||
------------------------
|
||||
All common PTL duties are enumerated in the `PTL guide
|
||||
<https://docs.openstack.org/project-team-guide/ptl.html>`_.
|
|
@ -0,0 +1 @@
|
|||
.. include:: ../../../REVIEW_GUIDELINES.rst
|
|
@ -66,6 +66,15 @@ Overview and Concepts
|
|||
ring_partpower
|
||||
associated_projects
|
||||
|
||||
Contributor Documentation
|
||||
=========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
contributor/contributing
|
||||
contributor/review_guidelines
|
||||
|
||||
Developer Documentation
|
||||
=======================
|
||||
|
||||
|
|
|
@ -60,8 +60,6 @@ Amazon S3 operations
|
|||
+------------------------------------------------+------------------+--------------+
|
||||
| `PUT Bucket acl`_ | Core-API | Yes |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `Object tagging`_ | Core-API | Yes |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `Versioning`_ | Versioning | Yes |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `Bucket notification`_ | Notifications | No |
|
||||
|
@ -78,6 +76,8 @@ Amazon S3 operations
|
|||
+------------------------------------------------+------------------+--------------+
|
||||
| `Delete Multiple Objects`_ | Advanced Feature | Yes |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `Object tagging`_ | Advanced Feature | No |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `GET Object torrent`_ | Advanced Feature | No |
|
||||
+------------------------------------------------+------------------+--------------+
|
||||
| `Bucket inventory`_ | Advanced Feature | No |
|
||||
|
|
|
@ -470,7 +470,10 @@ use = egg:swift#s3api
|
|||
# With either tempauth or your custom auth:
|
||||
# - Put s3api just before your auth filter(s) in the pipeline
|
||||
# With keystone:
|
||||
# - Put s3api and s3token before keystoneauth in the pipeline
|
||||
# - Put s3api and s3token before keystoneauth in the pipeline, but after
|
||||
# auth_token
|
||||
# If you have ratelimit enabled for Swift requests, you may want to place a
|
||||
# second copy after auth to also ratelimit S3 requests.
|
||||
#
|
||||
# Swift has no concept of the S3's resource owner; the resources
|
||||
# (i.e. containers and objects) created via the Swift API have no owner
|
||||
|
|
|
@ -44,8 +44,6 @@ netifaces==0.8
|
|||
nose==1.3.7
|
||||
nosehtmloutput==0.0.3
|
||||
nosexcover==1.0.10
|
||||
openstackdocstheme==1.30.0
|
||||
os-api-ref==1.0.0
|
||||
os-testr==0.8.0
|
||||
oslo.config==4.0.0
|
||||
oslo.i18n==3.20.0
|
||||
|
@ -68,15 +66,12 @@ python-swiftclient==3.2.0
|
|||
python-openstackclient==3.12.0
|
||||
pytz==2018.3
|
||||
PyYAML==3.12
|
||||
reno==1.8.0
|
||||
requests==2.14.2
|
||||
requests-mock==1.2.0
|
||||
rfc3986==1.1.0
|
||||
six==1.10.0
|
||||
smmap2==2.0.3
|
||||
snowballstemmer==1.2.1
|
||||
Sphinx==1.6.2
|
||||
sphinxcontrib-websupport==1.0.1
|
||||
stestr==2.0.0
|
||||
stevedore==1.28.0
|
||||
testtools==2.3.0
|
||||
|
|
|
@ -116,7 +116,7 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
|||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = 'native'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
|
@ -174,11 +174,6 @@ html_theme = 'openstackdocs'
|
|||
#
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#
|
||||
|
@ -352,6 +347,7 @@ htmlhelp_basename = 'SwiftReleaseNotesdoc'
|
|||
locale_dirs = ['locale/']
|
||||
|
||||
# -- Options for openstackdocstheme -------------------------------------------
|
||||
repository_name = 'openstack/swift'
|
||||
bug_project = 'swift'
|
||||
bug_tag = ''
|
||||
openstackdocs_repo_name = 'openstack/swift'
|
||||
openstackdocs_auto_name = False
|
||||
openstackdocs_bug_project = 'swift'
|
||||
openstackdocs_bug_tag = ''
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,6 @@
|
|||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
dnspython>=1.15.0;python_version=='2.7' # http://www.dnspython.org/LICENSE
|
||||
eventlet>=0.25.0 # MIT
|
||||
greenlet>=0.3.2
|
||||
netifaces>=0.8,!=0.10.0,!=0.10.1
|
||||
|
@ -13,6 +12,14 @@ six>=1.10.0
|
|||
xattr>=0.4;sys_platform!='win32' # MIT
|
||||
PyECLib>=1.3.1 # BSD
|
||||
cryptography>=2.0.2 # BSD/Apache-2.0
|
||||
ipaddress>=1.0.16;python_version<'3.3' # PSF
|
||||
fusepy>=2.0.4
|
||||
protobuf>=3.9.1
|
||||
|
||||
# For python 2.7, the following requirements are needed; they are not
|
||||
# included since the requirments-check check will fail otherwise since
|
||||
# global requirements do not support these anymore.
|
||||
# Fortunately, these packages come in as dependencies from others and
|
||||
# thus the py27 jobs still work.
|
||||
#
|
||||
# dnspython>=1.15.0;python_version=='2.7' # http://www.dnspython.org/LICENSE
|
||||
# ipaddress>=1.0.16;python_version<'3.3' # PSF
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
- name: Set S3 endpoint
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_storage_url
|
||||
value: http://localhost:8080
|
||||
become: true
|
||||
|
||||
- name: Create primary S3 user
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
credential create --type ec2 --project swiftprojecttest1 swiftusertest1
|
||||
'{"access": "s3-user1", "secret": "s3-secret1"}'
|
||||
- name: Add primary S3 user to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_access_key
|
||||
value: s3-user1
|
||||
become: true
|
||||
- name: Add primary S3 user secret to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_secret_key
|
||||
value: s3-secret1
|
||||
become: true
|
||||
|
||||
- name: Clear secondary S3 user from test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_access_key2
|
||||
value: ""
|
||||
become: true
|
||||
|
||||
- name: Create restricted S3 user
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
credential create --type ec2 --project swiftprojecttest1 swiftusertest3
|
||||
'{"access": "s3-user3", "secret": "s3-secret3"}'
|
||||
- name: Add restricted S3 user to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_access_key3
|
||||
value: s3-user3
|
||||
become: true
|
||||
- name: Add restricted S3 user secret to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: s3_secret_key3
|
||||
value: s3-secret3
|
||||
become: true
|
||||
|
||||
- name: Create service role
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
role create swift_service
|
||||
- name: Create service project
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
project create swiftprojecttest5
|
||||
- name: Create service user
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
user create --project swiftprojecttest5 swiftusertest5 --password testing5
|
||||
- name: Assign service role
|
||||
shell: >
|
||||
openstack --os-auth-url http://localhost/identity
|
||||
--os-project-domain-id default --os-project-name admin
|
||||
--os-user-domain-id default --os-username admin
|
||||
--os-password secretadmin
|
||||
role add --project swiftprojecttest5 --user swiftusertest5 swift_service
|
||||
|
||||
- name: Add service_roles to proxy-server.conf
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: filter:keystoneauth
|
||||
option: SERVICE_KEY_service_roles
|
||||
value: swift_service
|
||||
become: true
|
||||
- name: Update reseller prefixes in proxy-server.conf
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: filter:keystoneauth
|
||||
option: reseller_prefix
|
||||
value: AUTH, SERVICE_KEY
|
||||
become: true
|
||||
|
||||
- name: Add service account to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: account5
|
||||
value: swiftprojecttest5
|
||||
become: true
|
||||
- name: Add service user to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: username5
|
||||
value: swiftusertest5
|
||||
become: true
|
||||
- name: Add service password to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: password5
|
||||
value: testing5
|
||||
become: true
|
||||
- name: Add service prefix to test.conf
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
option: service_prefix
|
||||
value: SERVICE_KEY
|
||||
become: true
|
|
@ -0,0 +1,47 @@
|
|||
- name: Configure service auth prefix for tempauth tests
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: filter:tempauth
|
||||
option: reseller_prefix
|
||||
value: TEMPAUTH, SERVICE_TA
|
||||
become: true
|
||||
|
||||
- name: Configure service group for tempauth tests
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: filter:tempauth
|
||||
option: SERVICE_TA_require_group
|
||||
value: service
|
||||
become: true
|
||||
|
||||
- name: Configure service account for tempauth tests
|
||||
ini_file:
|
||||
path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
|
||||
section: func_test
|
||||
option: account5
|
||||
value: test5
|
||||
become: true
|
||||
|
||||
- name: Configure service username for tempauth tests
|
||||
ini_file:
|
||||
path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
|
||||
section: func_test
|
||||
option: username5
|
||||
value: tester5
|
||||
become: true
|
||||
|
||||
- name: Configure service user password for tempauth tests
|
||||
ini_file:
|
||||
path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
|
||||
section: func_test
|
||||
option: password5
|
||||
value: testing5
|
||||
become: true
|
||||
|
||||
- name: Configure service prefix for tempauth tests
|
||||
ini_file:
|
||||
path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
|
||||
section: func_test
|
||||
option: service_prefix
|
||||
value: SERVICE_TA
|
||||
become: true
|
|
@ -1,8 +1,8 @@
|
|||
- name: Add more middlewares to pipeline
|
||||
- name: Add domain_remap and etag-quoter to pipeline
|
||||
replace:
|
||||
path: "/etc/swift/proxy-server.conf"
|
||||
regexp: "cache listing_formats"
|
||||
replace: "cache domain_remap etag-quoter listing_formats"
|
||||
path: "/etc/swift/proxy-server.conf"
|
||||
regexp: "cache listing_formats"
|
||||
replace: "cache domain_remap etag-quoter listing_formats"
|
||||
become: true
|
||||
|
||||
- name: Set domain_remap domain
|
||||
|
@ -13,7 +13,7 @@
|
|||
value: example.com
|
||||
become: true
|
||||
|
||||
- name: Set storage_domain in test.conf
|
||||
- name: Set storage_domain in test.conf (for Keystone tests)
|
||||
ini_file:
|
||||
path: /etc/swift/test.conf
|
||||
section: func_test
|
||||
|
@ -21,6 +21,14 @@
|
|||
value: example.com
|
||||
become: true
|
||||
|
||||
- name: Set storage_domain in test/sample.conf (for tempauth tests)
|
||||
ini_file:
|
||||
path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
|
||||
section: func_test
|
||||
option: storage_domain
|
||||
value: example.com
|
||||
become: true
|
||||
|
||||
- name: Enable object versioning
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
|
@ -29,6 +37,14 @@
|
|||
value: true
|
||||
become: true
|
||||
|
||||
- name: Configure s3api force_swift_request_proxy_log
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: filter:s3api
|
||||
option: force_swift_request_proxy_log
|
||||
value: true
|
||||
become: true
|
||||
|
||||
- name: Copy ring for Policy-1
|
||||
copy:
|
||||
remote_src: true
|
||||
|
|
|
@ -19,6 +19,7 @@ classifier =
|
|||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.6
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
|
||||
[pbr]
|
||||
skip_authors = True
|
||||
|
|
|
@ -57,6 +57,8 @@ def parse_get_node_args(options, args):
|
|||
else:
|
||||
raise InfoSystemExit('Ring file does not exist')
|
||||
|
||||
if options.quoted:
|
||||
args = [urllib.parse.unquote(arg) for arg in args]
|
||||
if len(args) == 1:
|
||||
args = args[0].strip('/').split('/', 2)
|
||||
|
||||
|
@ -614,15 +616,15 @@ def print_item_locations(ring, ring_name=None, account=None, container=None,
|
|||
ring = POLICIES.get_object_ring(policy_index, swift_dir)
|
||||
ring_name = (POLICIES.get_by_name(policy_name)).ring_name
|
||||
|
||||
if account is None and (container is not None or obj is not None):
|
||||
if (container or obj) and not account:
|
||||
print('No account specified')
|
||||
raise InfoSystemExit()
|
||||
|
||||
if container is None and obj is not None:
|
||||
if obj and not container:
|
||||
print('No container specified')
|
||||
raise InfoSystemExit()
|
||||
|
||||
if account is None and part is None:
|
||||
if not account and not part:
|
||||
print('No target specified')
|
||||
raise InfoSystemExit()
|
||||
|
||||
|
@ -654,8 +656,11 @@ def print_item_locations(ring, ring_name=None, account=None, container=None,
|
|||
print('Warning: account specified ' +
|
||||
'but ring not named "account"')
|
||||
|
||||
print('\nAccount \t%s' % account)
|
||||
print('Container\t%s' % container)
|
||||
print('Object \t%s\n\n' % obj)
|
||||
if account:
|
||||
print('\nAccount \t%s' % urllib.parse.quote(account))
|
||||
if container:
|
||||
print('Container\t%s' % urllib.parse.quote(container))
|
||||
if obj:
|
||||
print('Object \t%s\n\n' % urllib.parse.quote(obj))
|
||||
print_ring_locations(ring, loc, account, container, obj, part, all_nodes,
|
||||
policy_index=policy_index)
|
||||
|
|
|
@ -14,8 +14,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import fcntl
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from functools import partial
|
||||
from swift.common.storage_policy import POLICIES
|
||||
from swift.common.exceptions import DiskFileDeleted, DiskFileNotExist, \
|
||||
DiskFileQuarantined
|
||||
|
@ -24,10 +28,126 @@ from swift.common.utils import replace_partition_in_path, \
|
|||
from swift.obj import diskfile
|
||||
|
||||
|
||||
LOCK_FILE = '.relink.{datadir}.lock'
|
||||
STATE_FILE = 'relink.{datadir}.json'
|
||||
STATE_TMP_FILE = '.relink.{datadir}.json.tmp'
|
||||
STEP_RELINK = 'relink'
|
||||
STEP_CLEANUP = 'cleanup'
|
||||
|
||||
|
||||
def devices_filter(device, _, devices):
|
||||
if device:
|
||||
devices = [d for d in devices if d == device]
|
||||
|
||||
return set(devices)
|
||||
|
||||
|
||||
def hook_pre_device(locks, states, datadir, device_path):
|
||||
lock_file = os.path.join(device_path, LOCK_FILE.format(datadir=datadir))
|
||||
|
||||
fd = os.open(lock_file, os.O_CREAT | os.O_WRONLY)
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
locks[0] = fd
|
||||
|
||||
state_file = os.path.join(device_path, STATE_FILE.format(datadir=datadir))
|
||||
states.clear()
|
||||
try:
|
||||
with open(state_file, 'rt') as f:
|
||||
tmp = json.load(f)
|
||||
states.update(tmp)
|
||||
except ValueError:
|
||||
# Invalid JSON: remove the file to restart from scratch
|
||||
os.unlink(state_file)
|
||||
except IOError as err:
|
||||
# Ignore file not found error
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def hook_post_device(locks, _):
|
||||
os.close(locks[0])
|
||||
locks[0] = None
|
||||
|
||||
|
||||
def partitions_filter(states, step, part_power, next_part_power,
|
||||
datadir_path, partitions):
|
||||
# Remove all non partitions first (eg: auditor_status_ALL.json)
|
||||
partitions = [p for p in partitions if p.isdigit()]
|
||||
|
||||
if not (step == STEP_CLEANUP and part_power == next_part_power):
|
||||
# This is not a cleanup after cancel, partitions in the upper half are
|
||||
# new partitions, there is nothing to relink/cleanup from there
|
||||
partitions = [p for p in partitions
|
||||
if int(p) < 2 ** next_part_power / 2]
|
||||
|
||||
# Format: { 'part': [relinked, cleaned] }
|
||||
if states:
|
||||
missing = list(set(partitions) - set(states.keys()))
|
||||
if missing:
|
||||
# All missing partitions was created after the first run of
|
||||
# relink, so after the new ring was distribued, so they already
|
||||
# are hardlinked in both partitions, but they will need to
|
||||
# cleaned.. Just update the state file.
|
||||
for part in missing:
|
||||
states[part] = [True, False]
|
||||
if step == STEP_RELINK:
|
||||
partitions = [str(p) for p, (r, c) in states.items() if not r]
|
||||
elif step == STEP_CLEANUP:
|
||||
partitions = [str(p) for p, (r, c) in states.items() if not c]
|
||||
else:
|
||||
states.update({str(p): [False, False] for p in partitions})
|
||||
|
||||
# Always scan the partitions in reverse order to minimize the amount of IO
|
||||
# (it actually only matters for relink, not for cleanup).
|
||||
#
|
||||
# Initial situation:
|
||||
# objects/0/000/00000000000000000000000000000000/12345.data
|
||||
# -> relinked to objects/1/000/10000000000000000000000000000000/12345.data
|
||||
#
|
||||
# If the relinker then scan partition 1, it will listdir that object while
|
||||
# it's unnecessary. By working in reverse order of partitions, this is
|
||||
# avoided.
|
||||
partitions = sorted(partitions, key=lambda x: int(x), reverse=True)
|
||||
|
||||
return partitions
|
||||
|
||||
|
||||
# Save states when a partition is done
|
||||
def hook_post_partition(states, step,
|
||||
partition_path):
|
||||
part = os.path.basename(os.path.abspath(partition_path))
|
||||
datadir_path = os.path.dirname(os.path.abspath(partition_path))
|
||||
device_path = os.path.dirname(os.path.abspath(datadir_path))
|
||||
datadir_name = os.path.basename(os.path.abspath(datadir_path))
|
||||
state_tmp_file = os.path.join(device_path,
|
||||
STATE_TMP_FILE.format(datadir=datadir_name))
|
||||
state_file = os.path.join(device_path,
|
||||
STATE_FILE.format(datadir=datadir_name))
|
||||
|
||||
if step == STEP_RELINK:
|
||||
states[part][0] = True
|
||||
elif step == STEP_CLEANUP:
|
||||
states[part][1] = True
|
||||
with open(state_tmp_file, 'wt') as f:
|
||||
json.dump(states, f)
|
||||
os.fsync(f.fileno())
|
||||
os.rename(state_tmp_file, state_file)
|
||||
|
||||
|
||||
def hashes_filter(next_part_power, suff_path, hashes):
|
||||
hashes = list(hashes)
|
||||
for hsh in hashes:
|
||||
fname = os.path.join(suff_path, hsh, 'fake-file-name')
|
||||
if replace_partition_in_path(fname, next_part_power) == fname:
|
||||
hashes.remove(hsh)
|
||||
return hashes
|
||||
|
||||
|
||||
def relink(swift_dir='/etc/swift',
|
||||
devices='/srv/node',
|
||||
skip_mount_check=False,
|
||||
logger=logging.getLogger()):
|
||||
logger=logging.getLogger(),
|
||||
device=None):
|
||||
mount_check = not skip_mount_check
|
||||
run = False
|
||||
relinked = errors = 0
|
||||
|
@ -41,10 +161,31 @@ def relink(swift_dir='/etc/swift',
|
|||
logging.info('Relinking files for policy %s under %s',
|
||||
policy.name, devices)
|
||||
run = True
|
||||
datadir = diskfile.get_data_dir(policy)
|
||||
|
||||
locks = [None]
|
||||
states = {}
|
||||
relink_devices_filter = partial(devices_filter, device)
|
||||
relink_hook_pre_device = partial(hook_pre_device, locks, states,
|
||||
datadir)
|
||||
relink_hook_post_device = partial(hook_post_device, locks)
|
||||
relink_partition_filter = partial(partitions_filter,
|
||||
states, STEP_RELINK,
|
||||
part_power, next_part_power)
|
||||
relink_hook_post_partition = partial(hook_post_partition,
|
||||
states, STEP_RELINK)
|
||||
relink_hashes_filter = partial(hashes_filter, next_part_power)
|
||||
|
||||
locations = audit_location_generator(
|
||||
devices,
|
||||
diskfile.get_data_dir(policy),
|
||||
mount_check=mount_check)
|
||||
datadir,
|
||||
mount_check=mount_check,
|
||||
devices_filter=relink_devices_filter,
|
||||
hook_pre_device=relink_hook_pre_device,
|
||||
hook_post_device=relink_hook_post_device,
|
||||
partitions_filter=relink_partition_filter,
|
||||
hook_post_partition=relink_hook_post_partition,
|
||||
hashes_filter=relink_hashes_filter)
|
||||
for fname, _, _ in locations:
|
||||
newfname = replace_partition_in_path(fname, next_part_power)
|
||||
try:
|
||||
|
@ -67,7 +208,8 @@ def relink(swift_dir='/etc/swift',
|
|||
def cleanup(swift_dir='/etc/swift',
|
||||
devices='/srv/node',
|
||||
skip_mount_check=False,
|
||||
logger=logging.getLogger()):
|
||||
logger=logging.getLogger(),
|
||||
device=None):
|
||||
mount_check = not skip_mount_check
|
||||
conf = {'devices': devices, 'mount_check': mount_check}
|
||||
diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf))
|
||||
|
@ -83,10 +225,31 @@ def cleanup(swift_dir='/etc/swift',
|
|||
logging.info('Cleaning up files for policy %s under %s',
|
||||
policy.name, devices)
|
||||
run = True
|
||||
datadir = diskfile.get_data_dir(policy)
|
||||
|
||||
locks = [None]
|
||||
states = {}
|
||||
cleanup_devices_filter = partial(devices_filter, device)
|
||||
cleanup_hook_pre_device = partial(hook_pre_device, locks, states,
|
||||
datadir)
|
||||
cleanup_hook_post_device = partial(hook_post_device, locks)
|
||||
cleanup_partition_filter = partial(partitions_filter,
|
||||
states, STEP_CLEANUP,
|
||||
part_power, next_part_power)
|
||||
cleanup_hook_post_partition = partial(hook_post_partition,
|
||||
states, STEP_CLEANUP)
|
||||
cleanup_hashes_filter = partial(hashes_filter, next_part_power)
|
||||
|
||||
locations = audit_location_generator(
|
||||
devices,
|
||||
diskfile.get_data_dir(policy),
|
||||
mount_check=mount_check)
|
||||
datadir,
|
||||
mount_check=mount_check,
|
||||
devices_filter=cleanup_devices_filter,
|
||||
hook_pre_device=cleanup_hook_pre_device,
|
||||
hook_post_device=cleanup_hook_post_device,
|
||||
partitions_filter=cleanup_partition_filter,
|
||||
hook_post_partition=cleanup_hook_post_partition,
|
||||
hashes_filter=cleanup_hashes_filter)
|
||||
for fname, device, partition in locations:
|
||||
expected_fname = replace_partition_in_path(fname, part_power)
|
||||
if fname == expected_fname:
|
||||
|
@ -152,8 +315,10 @@ def main(args):
|
|||
|
||||
if args.action == 'relink':
|
||||
return relink(
|
||||
args.swift_dir, args.devices, args.skip_mount_check, logger)
|
||||
args.swift_dir, args.devices, args.skip_mount_check, logger,
|
||||
device=args.device)
|
||||
|
||||
if args.action == 'cleanup':
|
||||
return cleanup(
|
||||
args.swift_dir, args.devices, args.skip_mount_check, logger)
|
||||
args.swift_dir, args.devices, args.skip_mount_check, logger,
|
||||
device=args.device)
|
||||
|
|
|
@ -53,6 +53,9 @@ PICKLE_PROTOCOL = 2
|
|||
# records will be merged.
|
||||
PENDING_CAP = 131072
|
||||
|
||||
SQLITE_ARG_LIMIT = 999
|
||||
RECLAIM_PAGE_SIZE = 10000
|
||||
|
||||
|
||||
def utf8encode(*args):
|
||||
return [(s.encode('utf8') if isinstance(s, six.text_type) else s)
|
||||
|
@ -981,16 +984,48 @@ class DatabaseBroker(object):
|
|||
with lock_parent_directory(self.pending_file,
|
||||
self.pending_timeout):
|
||||
self._commit_puts()
|
||||
with self.get() as conn:
|
||||
self._reclaim(conn, age_timestamp, sync_timestamp)
|
||||
self._reclaim_metadata(conn, age_timestamp)
|
||||
conn.commit()
|
||||
marker = ''
|
||||
finished = False
|
||||
while not finished:
|
||||
with self.get() as conn:
|
||||
marker = self._reclaim(conn, age_timestamp, marker)
|
||||
if not marker:
|
||||
finished = True
|
||||
self._reclaim_other_stuff(
|
||||
conn, age_timestamp, sync_timestamp)
|
||||
conn.commit()
|
||||
|
||||
def _reclaim(self, conn, age_timestamp, sync_timestamp):
|
||||
conn.execute('''
|
||||
DELETE FROM %s WHERE deleted = 1 AND %s < ?
|
||||
''' % (self.db_contains_type, self.db_reclaim_timestamp),
|
||||
(age_timestamp,))
|
||||
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
|
||||
"""
|
||||
This is only called once at the end of reclaim after _reclaim has been
|
||||
called for each page.
|
||||
"""
|
||||
self._reclaim_sync(conn, sync_timestamp)
|
||||
self._reclaim_metadata(conn, age_timestamp)
|
||||
|
||||
def _reclaim(self, conn, age_timestamp, marker):
|
||||
clean_batch_qry = '''
|
||||
DELETE FROM %s WHERE deleted = 1
|
||||
AND name > ? AND %s < ?
|
||||
''' % (self.db_contains_type, self.db_reclaim_timestamp)
|
||||
curs = conn.execute('''
|
||||
SELECT name FROM %s WHERE deleted = 1
|
||||
AND name > ?
|
||||
ORDER BY NAME LIMIT 1 OFFSET ?
|
||||
''' % (self.db_contains_type,), (marker, RECLAIM_PAGE_SIZE))
|
||||
row = curs.fetchone()
|
||||
if row:
|
||||
# do a single book-ended DELETE and bounce out
|
||||
end_marker = row[0]
|
||||
conn.execute(clean_batch_qry + ' AND name <= ?', (
|
||||
marker, age_timestamp, end_marker))
|
||||
else:
|
||||
# delete off the end and reset marker to indicate we're done
|
||||
end_marker = ''
|
||||
conn.execute(clean_batch_qry, (marker, age_timestamp))
|
||||
return end_marker
|
||||
|
||||
def _reclaim_sync(self, conn, sync_timestamp):
|
||||
try:
|
||||
conn.execute('''
|
||||
DELETE FROM outgoing_sync WHERE updated_at < ?
|
||||
|
|
|
@ -160,7 +160,7 @@ class MemcacheRing(object):
|
|||
def __init__(self, servers, connect_timeout=CONN_TIMEOUT,
|
||||
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
|
||||
tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False,
|
||||
max_conns=2):
|
||||
max_conns=2, logger=None):
|
||||
self._ring = {}
|
||||
self._errors = dict(((serv, []) for serv in servers))
|
||||
self._error_limited = dict(((serv, 0) for serv in servers))
|
||||
|
@ -178,18 +178,23 @@ class MemcacheRing(object):
|
|||
self._pool_timeout = pool_timeout
|
||||
self._allow_pickle = allow_pickle
|
||||
self._allow_unpickle = allow_unpickle or allow_pickle
|
||||
if logger is None:
|
||||
self.logger = logging.getLogger()
|
||||
else:
|
||||
self.logger = logger
|
||||
|
||||
def _exception_occurred(self, server, e, action='talking',
|
||||
sock=None, fp=None, got_connection=True):
|
||||
if isinstance(e, Timeout):
|
||||
logging.error("Timeout %(action)s to memcached: %(server)s",
|
||||
{'action': action, 'server': server})
|
||||
elif isinstance(e, (socket.error, MemcacheConnectionError)):
|
||||
logging.error("Error %(action)s to memcached: %(server)s: %(err)s",
|
||||
{'action': action, 'server': server, 'err': e})
|
||||
else:
|
||||
logging.exception("Error %(action)s to memcached: %(server)s",
|
||||
self.logger.error("Timeout %(action)s to memcached: %(server)s",
|
||||
{'action': action, 'server': server})
|
||||
elif isinstance(e, (socket.error, MemcacheConnectionError)):
|
||||
self.logger.error(
|
||||
"Error %(action)s to memcached: %(server)s: %(err)s",
|
||||
{'action': action, 'server': server, 'err': e})
|
||||
else:
|
||||
self.logger.exception("Error %(action)s to memcached: %(server)s",
|
||||
{'action': action, 'server': server})
|
||||
try:
|
||||
if fp:
|
||||
fp.close()
|
||||
|
@ -213,7 +218,7 @@ class MemcacheRing(object):
|
|||
if err > now - ERROR_LIMIT_TIME]
|
||||
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
|
||||
self._error_limited[server] = now + ERROR_LIMIT_DURATION
|
||||
logging.error('Error limiting server %s', server)
|
||||
self.logger.error('Error limiting server %s', server)
|
||||
|
||||
def _get_conns(self, key):
|
||||
"""
|
||||
|
|
|
@ -19,6 +19,7 @@ from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
|
|||
|
||||
from swift.common.memcached import (MemcacheRing, CONN_TIMEOUT, POOL_TIMEOUT,
|
||||
IO_TIMEOUT, TRY_COUNT)
|
||||
from swift.common.utils import get_logger
|
||||
|
||||
|
||||
class MemcacheMiddleware(object):
|
||||
|
@ -28,6 +29,7 @@ class MemcacheMiddleware(object):
|
|||
|
||||
def __init__(self, app, conf):
|
||||
self.app = app
|
||||
self.logger = get_logger(conf, log_route='memcache')
|
||||
self.memcache_servers = conf.get('memcache_servers')
|
||||
serialization_format = conf.get('memcache_serialization_support')
|
||||
try:
|
||||
|
@ -102,7 +104,8 @@ class MemcacheMiddleware(object):
|
|||
io_timeout=io_timeout,
|
||||
allow_pickle=(serialization_format == 0),
|
||||
allow_unpickle=(serialization_format <= 1),
|
||||
max_conns=max_conns)
|
||||
max_conns=max_conns,
|
||||
logger=self.logger)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
env['swift.cache'] = self.memcache
|
||||
|
|
|
@ -242,6 +242,10 @@ class RateLimitMiddleware(object):
|
|||
if not self.memcache_client:
|
||||
return None
|
||||
|
||||
if req.environ.get('swift.ratelimit.handled'):
|
||||
return None
|
||||
req.environ['swift.ratelimit.handled'] = True
|
||||
|
||||
try:
|
||||
account_info = get_account_info(req.environ, self.app,
|
||||
swift_source='RL')
|
||||
|
|
|
@ -26,7 +26,7 @@ from swift.common.middleware.versioned_writes.object_versioning import \
|
|||
from swift.common.middleware.s3api.utils import S3Timestamp, sysmeta_header
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.s3response import S3NotImplemented, \
|
||||
InvalidRange, NoSuchKey, InvalidArgument, HTTPNoContent, \
|
||||
InvalidRange, NoSuchKey, NoSuchVersion, InvalidArgument, HTTPNoContent, \
|
||||
PreconditionFailed
|
||||
|
||||
|
||||
|
@ -88,7 +88,15 @@ class ObjectController(Controller):
|
|||
if version_id not in ('null', None) and \
|
||||
'object_versioning' not in get_swift_info():
|
||||
raise S3NotImplemented()
|
||||
|
||||
query = {} if version_id is None else {'version-id': version_id}
|
||||
if version_id not in ('null', None):
|
||||
container_info = req.get_container_info(self.app)
|
||||
if not container_info.get(
|
||||
'sysmeta', {}).get('versions-container', ''):
|
||||
# Versioning has never been enabled
|
||||
raise NoSuchVersion(object_name, version_id)
|
||||
|
||||
resp = req.get_response(self.app, query=query)
|
||||
|
||||
if req.method == 'HEAD':
|
||||
|
@ -193,17 +201,25 @@ class ObjectController(Controller):
|
|||
'object_versioning' not in get_swift_info():
|
||||
raise S3NotImplemented()
|
||||
|
||||
version_id = req.params.get('versionId')
|
||||
if version_id not in ('null', None):
|
||||
container_info = req.get_container_info(self.app)
|
||||
if not container_info.get(
|
||||
'sysmeta', {}).get('versions-container', ''):
|
||||
# Versioning has never been enabled
|
||||
return HTTPNoContent(headers={'x-amz-version-id': version_id})
|
||||
|
||||
try:
|
||||
try:
|
||||
query = req.gen_multipart_manifest_delete_query(
|
||||
self.app, version=req.params.get('versionId'))
|
||||
self.app, version=version_id)
|
||||
except NoSuchKey:
|
||||
query = {}
|
||||
|
||||
req.headers['Content-Type'] = None # Ignore client content-type
|
||||
|
||||
if 'versionId' in req.params:
|
||||
query['version-id'] = req.params['versionId']
|
||||
if version_id is not None:
|
||||
query['version-id'] = version_id
|
||||
query['symlink'] = 'get'
|
||||
|
||||
resp = req.get_response(self.app, query=query)
|
||||
|
|
|
@ -205,7 +205,8 @@ from swift.common.utils import get_logger, register_swift_info, split_path, \
|
|||
MD5_OF_EMPTY_STRING, close_if_possible, closing_if_possible, \
|
||||
config_true_value, drain_and_close
|
||||
from swift.common.constraints import check_account_format
|
||||
from swift.common.wsgi import WSGIContext, make_subrequest
|
||||
from swift.common.wsgi import WSGIContext, make_subrequest, \
|
||||
make_pre_authed_request
|
||||
from swift.common.request_helpers import get_sys_meta_prefix, \
|
||||
check_path_header, get_container_update_override_key, \
|
||||
update_ignore_range_header
|
||||
|
@ -442,7 +443,9 @@ class SymlinkObjectContext(WSGIContext):
|
|||
content_type='text/plain')
|
||||
|
||||
def _recursive_get_head(self, req, target_etag=None,
|
||||
follow_softlinks=True):
|
||||
follow_softlinks=True, orig_req=None):
|
||||
if not orig_req:
|
||||
orig_req = req
|
||||
resp = self._app_call(req.environ)
|
||||
|
||||
def build_traversal_req(symlink_target):
|
||||
|
@ -457,9 +460,20 @@ class SymlinkObjectContext(WSGIContext):
|
|||
'/', version, account,
|
||||
symlink_target.lstrip('/'))
|
||||
self._last_target_path = target_path
|
||||
new_req = make_subrequest(
|
||||
req.environ, path=target_path, method=req.method,
|
||||
headers=req.headers, swift_source='SYM')
|
||||
|
||||
subreq_headers = dict(req.headers)
|
||||
if self._response_header_value(ALLOW_RESERVED_NAMES):
|
||||
# this symlink's sysmeta says it can point to reserved names,
|
||||
# we're infering that some piece of middleware had previously
|
||||
# authorized this request because users can't access reserved
|
||||
# names directly
|
||||
subreq_meth = make_pre_authed_request
|
||||
subreq_headers['X-Backend-Allow-Reserved-Names'] = 'true'
|
||||
else:
|
||||
subreq_meth = make_subrequest
|
||||
new_req = subreq_meth(orig_req.environ, path=target_path,
|
||||
method=req.method, headers=subreq_headers,
|
||||
swift_source='SYM')
|
||||
new_req.headers.pop('X-Backend-Storage-Policy-Index', None)
|
||||
return new_req
|
||||
|
||||
|
@ -484,11 +498,8 @@ class SymlinkObjectContext(WSGIContext):
|
|||
if not config_true_value(
|
||||
self._response_header_value(SYMLOOP_EXTEND)):
|
||||
self._loop_count += 1
|
||||
if config_true_value(
|
||||
self._response_header_value(ALLOW_RESERVED_NAMES)):
|
||||
new_req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
|
||||
|
||||
return self._recursive_get_head(new_req, target_etag=resp_etag)
|
||||
return self._recursive_get_head(new_req, target_etag=resp_etag,
|
||||
orig_req=req)
|
||||
else:
|
||||
final_etag = self._response_header_value('etag')
|
||||
if final_etag and target_etag and target_etag != final_etag:
|
||||
|
|
|
@ -152,7 +152,7 @@ from cgi import parse_header
|
|||
from six.moves.urllib.parse import unquote
|
||||
|
||||
from swift.common.constraints import MAX_FILE_SIZE, valid_api_version, \
|
||||
ACCOUNT_LISTING_LIMIT
|
||||
ACCOUNT_LISTING_LIMIT, CONTAINER_LISTING_LIMIT
|
||||
from swift.common.http import is_success, is_client_error, HTTP_NOT_FOUND, \
|
||||
HTTP_CONFLICT
|
||||
from swift.common.request_helpers import get_sys_meta_prefix, \
|
||||
|
@ -1191,7 +1191,7 @@ class ContainerContext(ObjectVersioningContext):
|
|||
'hash': item['hash'],
|
||||
'last_modified': item['last_modified'],
|
||||
})
|
||||
limit = constrain_req_limit(req, ACCOUNT_LISTING_LIMIT)
|
||||
limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
|
||||
body = build_listing(
|
||||
null_listing, subdir_listing, broken_listing,
|
||||
reverse=config_true_value(params.get('reverse', 'no')),
|
||||
|
@ -1256,7 +1256,7 @@ class ContainerContext(ObjectVersioningContext):
|
|||
'last_modified': item['last_modified'],
|
||||
})
|
||||
|
||||
limit = constrain_req_limit(req, ACCOUNT_LISTING_LIMIT)
|
||||
limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
|
||||
body = build_listing(
|
||||
null_listing, versions_listing,
|
||||
subdir_listing, broken_listing,
|
||||
|
|
|
@ -43,7 +43,6 @@ from email.utils import parsedate
|
|||
import re
|
||||
import random
|
||||
import functools
|
||||
import inspect
|
||||
from io import BytesIO
|
||||
|
||||
import six
|
||||
|
@ -1563,23 +1562,15 @@ def wsgify(func):
|
|||
return a Response object into WSGI callables. Also catches any raised
|
||||
HTTPExceptions and treats them as a returned Response.
|
||||
"""
|
||||
argspec = inspect.getargspec(func)
|
||||
if argspec.args and argspec.args[0] == 'self':
|
||||
@functools.wraps(func)
|
||||
def _wsgify_self(self, env, start_response):
|
||||
try:
|
||||
return func(self, Request(env))(env, start_response)
|
||||
except HTTPException as err_resp:
|
||||
return err_resp(env, start_response)
|
||||
return _wsgify_self
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def _wsgify_bare(env, start_response):
|
||||
try:
|
||||
return func(Request(env))(env, start_response)
|
||||
except HTTPException as err_resp:
|
||||
return err_resp(env, start_response)
|
||||
return _wsgify_bare
|
||||
@functools.wraps(func)
|
||||
def _wsgify(*args):
|
||||
env, start_response = args[-2:]
|
||||
new_args = args[:-2] + (Request(env), )
|
||||
try:
|
||||
return func(*new_args)(env, start_response)
|
||||
except HTTPException as err_resp:
|
||||
return err_resp(env, start_response)
|
||||
return _wsgify
|
||||
|
||||
|
||||
class StatusMap(object):
|
||||
|
|
|
@ -3152,11 +3152,26 @@ def remove_directory(path):
|
|||
|
||||
|
||||
def audit_location_generator(devices, datadir, suffix='',
|
||||
mount_check=True, logger=None):
|
||||
mount_check=True, logger=None,
|
||||
devices_filter=None, partitions_filter=None,
|
||||
suffixes_filter=None, hashes_filter=None,
|
||||
hook_pre_device=None, hook_post_device=None,
|
||||
hook_pre_partition=None, hook_post_partition=None,
|
||||
hook_pre_suffix=None, hook_post_suffix=None,
|
||||
hook_pre_hash=None, hook_post_hash=None):
|
||||
"""
|
||||
Given a devices path and a data directory, yield (path, device,
|
||||
partition) for all files in that directory
|
||||
|
||||
(devices|partitions|suffixes|hashes)_filter are meant to modify the list of
|
||||
elements that will be iterated. eg: they can be used to exclude some
|
||||
elements based on a custom condition defined by the caller.
|
||||
|
||||
hook_pre_(device|partition|suffix|hash) are called before yielding the
|
||||
element, hook_pos_(device|partition|suffix|hash) are called after the
|
||||
element was yielded. They are meant to do some pre/post processing.
|
||||
eg: saving a progress status.
|
||||
|
||||
:param devices: parent directory of the devices to be audited
|
||||
:param datadir: a directory located under self.devices. This should be
|
||||
one of the DATADIR constants defined in the account,
|
||||
|
@ -3165,11 +3180,31 @@ def audit_location_generator(devices, datadir, suffix='',
|
|||
:param mount_check: Flag to check if a mount check should be performed
|
||||
on devices
|
||||
:param logger: a logger object
|
||||
:devices_filter: a callable taking (devices, [list of devices]) as
|
||||
parameters and returning a [list of devices]
|
||||
:partitions_filter: a callable taking (datadir_path, [list of parts]) as
|
||||
parameters and returning a [list of parts]
|
||||
:suffixes_filter: a callable taking (part_path, [list of suffixes]) as
|
||||
parameters and returning a [list of suffixes]
|
||||
:hashes_filter: a callable taking (suff_path, [list of hashes]) as
|
||||
parameters and returning a [list of hashes]
|
||||
:hook_pre_device: a callable taking device_path as parameter
|
||||
:hook_post_device: a callable taking device_path as parameter
|
||||
:hook_pre_partition: a callable taking part_path as parameter
|
||||
:hook_post_partition: a callable taking part_path as parameter
|
||||
:hook_pre_suffix: a callable taking suff_path as parameter
|
||||
:hook_post_suffix: a callable taking suff_path as parameter
|
||||
:hook_pre_hash: a callable taking hash_path as parameter
|
||||
:hook_post_hash: a callable taking hash_path as parameter
|
||||
"""
|
||||
device_dir = listdir(devices)
|
||||
# randomize devices in case of process restart before sweep completed
|
||||
shuffle(device_dir)
|
||||
if devices_filter:
|
||||
device_dir = devices_filter(devices, device_dir)
|
||||
for device in device_dir:
|
||||
if hook_pre_device:
|
||||
hook_pre_device(os.path.join(devices, device))
|
||||
if mount_check and not ismount(os.path.join(devices, device)):
|
||||
if logger:
|
||||
logger.warning(
|
||||
|
@ -3183,24 +3218,36 @@ def audit_location_generator(devices, datadir, suffix='',
|
|||
logger.warning(_('Skipping %(datadir)s because %(err)s'),
|
||||
{'datadir': datadir_path, 'err': e})
|
||||
continue
|
||||
if partitions_filter:
|
||||
partitions = partitions_filter(datadir_path, partitions)
|
||||
for partition in partitions:
|
||||
part_path = os.path.join(datadir_path, partition)
|
||||
if hook_pre_partition:
|
||||
hook_pre_partition(part_path)
|
||||
try:
|
||||
suffixes = listdir(part_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOTDIR:
|
||||
raise
|
||||
continue
|
||||
if suffixes_filter:
|
||||
suffixes = suffixes_filter(part_path, suffixes)
|
||||
for asuffix in suffixes:
|
||||
suff_path = os.path.join(part_path, asuffix)
|
||||
if hook_pre_suffix:
|
||||
hook_pre_suffix(suff_path)
|
||||
try:
|
||||
hashes = listdir(suff_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOTDIR:
|
||||
raise
|
||||
continue
|
||||
if hashes_filter:
|
||||
hashes = hashes_filter(suff_path, hashes)
|
||||
for hsh in hashes:
|
||||
hash_path = os.path.join(suff_path, hsh)
|
||||
if hook_pre_hash:
|
||||
hook_pre_hash(hash_path)
|
||||
try:
|
||||
files = sorted(listdir(hash_path), reverse=True)
|
||||
except OSError as e:
|
||||
|
@ -3212,6 +3259,14 @@ def audit_location_generator(devices, datadir, suffix='',
|
|||
continue
|
||||
path = os.path.join(hash_path, fname)
|
||||
yield path, device, partition
|
||||
if hook_post_hash:
|
||||
hook_post_hash(hash_path)
|
||||
if hook_post_suffix:
|
||||
hook_post_suffix(suff_path)
|
||||
if hook_post_partition:
|
||||
hook_post_partition(part_path)
|
||||
if hook_post_device:
|
||||
hook_post_device(os.path.join(devices, device))
|
||||
|
||||
|
||||
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
|
||||
|
@ -4814,6 +4869,8 @@ class ShardRange(object):
|
|||
value.
|
||||
:param epoch: optional epoch timestamp which represents the time at which
|
||||
sharding was enabled for a container.
|
||||
:param reported: optional indicator that this shard and its stats have
|
||||
been reported to the root container.
|
||||
"""
|
||||
FOUND = 10
|
||||
CREATED = 20
|
||||
|
@ -4864,7 +4921,8 @@ class ShardRange(object):
|
|||
|
||||
def __init__(self, name, timestamp, lower=MIN, upper=MAX,
|
||||
object_count=0, bytes_used=0, meta_timestamp=None,
|
||||
deleted=False, state=None, state_timestamp=None, epoch=None):
|
||||
deleted=False, state=None, state_timestamp=None, epoch=None,
|
||||
reported=False):
|
||||
self.account = self.container = self._timestamp = \
|
||||
self._meta_timestamp = self._state_timestamp = self._epoch = None
|
||||
self._lower = ShardRange.MIN
|
||||
|
@ -4883,6 +4941,7 @@ class ShardRange(object):
|
|||
self.state = self.FOUND if state is None else state
|
||||
self.state_timestamp = state_timestamp
|
||||
self.epoch = epoch
|
||||
self.reported = reported
|
||||
|
||||
@classmethod
|
||||
def _encode(cls, value):
|
||||
|
@ -5063,8 +5122,14 @@ class ShardRange(object):
|
|||
cast to an int, or if meta_timestamp is neither None nor can be
|
||||
cast to a :class:`~swift.common.utils.Timestamp`.
|
||||
"""
|
||||
self.object_count = int(object_count)
|
||||
self.bytes_used = int(bytes_used)
|
||||
if self.object_count != int(object_count):
|
||||
self.object_count = int(object_count)
|
||||
self.reported = False
|
||||
|
||||
if self.bytes_used != int(bytes_used):
|
||||
self.bytes_used = int(bytes_used)
|
||||
self.reported = False
|
||||
|
||||
if meta_timestamp is None:
|
||||
self.meta_timestamp = Timestamp.now()
|
||||
else:
|
||||
|
@ -5145,6 +5210,14 @@ class ShardRange(object):
|
|||
def epoch(self, epoch):
|
||||
self._epoch = self._to_timestamp(epoch)
|
||||
|
||||
@property
|
||||
def reported(self):
|
||||
return self._reported
|
||||
|
||||
@reported.setter
|
||||
def reported(self, value):
|
||||
self._reported = bool(value)
|
||||
|
||||
def update_state(self, state, state_timestamp=None):
|
||||
"""
|
||||
Set state to the given value and optionally update the state_timestamp
|
||||
|
@ -5161,6 +5234,7 @@ class ShardRange(object):
|
|||
self.state = state
|
||||
if state_timestamp is not None:
|
||||
self.state_timestamp = state_timestamp
|
||||
self.reported = False
|
||||
return True
|
||||
|
||||
@property
|
||||
|
@ -5283,6 +5357,7 @@ class ShardRange(object):
|
|||
yield 'state', self.state
|
||||
yield 'state_timestamp', self.state_timestamp.internal
|
||||
yield 'epoch', self.epoch.internal if self.epoch is not None else None
|
||||
yield 'reported', 1 if self.reported else 0
|
||||
|
||||
def copy(self, timestamp=None, **kwargs):
|
||||
"""
|
||||
|
@ -5314,7 +5389,8 @@ class ShardRange(object):
|
|||
params['name'], params['timestamp'], params['lower'],
|
||||
params['upper'], params['object_count'], params['bytes_used'],
|
||||
params['meta_timestamp'], params['deleted'], params['state'],
|
||||
params['state_timestamp'], params['epoch'])
|
||||
params['state_timestamp'], params['epoch'],
|
||||
params.get('reported', 0))
|
||||
|
||||
|
||||
def find_shard_range(item, ranges):
|
||||
|
|
|
@ -34,9 +34,7 @@ from swift.common.utils import Timestamp, encode_timestamps, \
|
|||
get_db_files, parse_db_filename, make_db_file_path, split_path, \
|
||||
RESERVED_BYTE
|
||||
from swift.common.db import DatabaseBroker, utf8encode, BROKER_TIMEOUT, \
|
||||
zero_like, DatabaseAlreadyExists
|
||||
|
||||
SQLITE_ARG_LIMIT = 999
|
||||
zero_like, DatabaseAlreadyExists, SQLITE_ARG_LIMIT
|
||||
|
||||
DATADIR = 'containers'
|
||||
|
||||
|
@ -62,7 +60,7 @@ SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
|
|||
# tuples and vice-versa
|
||||
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
|
||||
'bytes_used', 'meta_timestamp', 'deleted', 'state',
|
||||
'state_timestamp', 'epoch')
|
||||
'state_timestamp', 'epoch', 'reported')
|
||||
|
||||
POLICY_STAT_TABLE_CREATE = '''
|
||||
CREATE TABLE policy_stat (
|
||||
|
@ -269,6 +267,7 @@ def merge_shards(shard_data, existing):
|
|||
if existing['timestamp'] < shard_data['timestamp']:
|
||||
# note that currently we do not roll forward any meta or state from
|
||||
# an item that was created at older time, newer created time trumps
|
||||
shard_data['reported'] = 0 # reset the latch
|
||||
return True
|
||||
elif existing['timestamp'] > shard_data['timestamp']:
|
||||
return False
|
||||
|
@ -285,6 +284,18 @@ def merge_shards(shard_data, existing):
|
|||
else:
|
||||
new_content = True
|
||||
|
||||
# We can latch the reported flag
|
||||
if existing['reported'] and \
|
||||
existing['object_count'] == shard_data['object_count'] and \
|
||||
existing['bytes_used'] == shard_data['bytes_used'] and \
|
||||
existing['state'] == shard_data['state'] and \
|
||||
existing['epoch'] == shard_data['epoch']:
|
||||
shard_data['reported'] = 1
|
||||
else:
|
||||
shard_data.setdefault('reported', 0)
|
||||
if shard_data['reported'] and not existing['reported']:
|
||||
new_content = True
|
||||
|
||||
if (existing['state_timestamp'] == shard_data['state_timestamp']
|
||||
and shard_data['state'] > existing['state']):
|
||||
new_content = True
|
||||
|
@ -597,7 +608,8 @@ class ContainerBroker(DatabaseBroker):
|
|||
deleted INTEGER DEFAULT 0,
|
||||
state INTEGER,
|
||||
state_timestamp TEXT,
|
||||
epoch TEXT
|
||||
epoch TEXT,
|
||||
reported INTEGER DEFAULT 0
|
||||
);
|
||||
""" % SHARD_RANGE_TABLE)
|
||||
|
||||
|
@ -1430,10 +1442,13 @@ class ContainerBroker(DatabaseBroker):
|
|||
# sqlite3.OperationalError: cannot start a transaction
|
||||
# within a transaction
|
||||
conn.rollback()
|
||||
if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
|
||||
raise
|
||||
self.create_shard_range_table(conn)
|
||||
return _really_merge_items(conn)
|
||||
if 'no such column: reported' in str(err):
|
||||
self._migrate_add_shard_range_reported(conn)
|
||||
return _really_merge_items(conn)
|
||||
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
|
||||
self.create_shard_range_table(conn)
|
||||
return _really_merge_items(conn)
|
||||
raise
|
||||
|
||||
def get_reconciler_sync(self):
|
||||
with self.get() as conn:
|
||||
|
@ -1581,9 +1596,20 @@ class ContainerBroker(DatabaseBroker):
|
|||
CONTAINER_STAT_VIEW_SCRIPT +
|
||||
'COMMIT;')
|
||||
|
||||
def _reclaim(self, conn, age_timestamp, sync_timestamp):
|
||||
super(ContainerBroker, self)._reclaim(conn, age_timestamp,
|
||||
sync_timestamp)
|
||||
def _migrate_add_shard_range_reported(self, conn):
|
||||
"""
|
||||
Add the reported column to the 'shard_range' table.
|
||||
"""
|
||||
conn.executescript('''
|
||||
BEGIN;
|
||||
ALTER TABLE %s
|
||||
ADD COLUMN reported INTEGER DEFAULT 0;
|
||||
COMMIT;
|
||||
''' % SHARD_RANGE_TABLE)
|
||||
|
||||
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
|
||||
super(ContainerBroker, self)._reclaim_other_stuff(
|
||||
conn, age_timestamp, sync_timestamp)
|
||||
# populate instance cache, but use existing conn to avoid deadlock
|
||||
# when it has a pending update
|
||||
self._populate_instance_cache(conn=conn)
|
||||
|
@ -1630,7 +1656,7 @@ class ContainerBroker(DatabaseBroker):
|
|||
elif states is not None:
|
||||
included_states.add(states)
|
||||
|
||||
def do_query(conn):
|
||||
def do_query(conn, use_reported_column=True):
|
||||
condition = ''
|
||||
conditions = []
|
||||
params = []
|
||||
|
@ -1648,21 +1674,27 @@ class ContainerBroker(DatabaseBroker):
|
|||
params.append(self.path)
|
||||
if conditions:
|
||||
condition = ' WHERE ' + ' AND '.join(conditions)
|
||||
if use_reported_column:
|
||||
columns = SHARD_RANGE_KEYS
|
||||
else:
|
||||
columns = SHARD_RANGE_KEYS[:-1] + ('0 as reported', )
|
||||
sql = '''
|
||||
SELECT %s
|
||||
FROM %s%s;
|
||||
''' % (', '.join(SHARD_RANGE_KEYS), SHARD_RANGE_TABLE, condition)
|
||||
''' % (', '.join(columns), SHARD_RANGE_TABLE, condition)
|
||||
data = conn.execute(sql, params)
|
||||
data.row_factory = None
|
||||
return [row for row in data]
|
||||
|
||||
try:
|
||||
with self.maybe_get(connection) as conn:
|
||||
with self.maybe_get(connection) as conn:
|
||||
try:
|
||||
return do_query(conn)
|
||||
except sqlite3.OperationalError as err:
|
||||
if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
|
||||
except sqlite3.OperationalError as err:
|
||||
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
|
||||
return []
|
||||
if 'no such column: reported' in str(err):
|
||||
return do_query(conn, use_reported_column=False)
|
||||
raise
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def resolve_shard_range_states(cls, states):
|
||||
|
|
|
@ -155,6 +155,8 @@ class ContainerController(BaseStorageServer):
|
|||
conf['auto_create_account_prefix']
|
||||
else:
|
||||
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
|
||||
self.shards_account_prefix = (
|
||||
self.auto_create_account_prefix + 'shards_')
|
||||
if config_true_value(conf.get('allow_versions', 'f')):
|
||||
self.save_headers.append('x-versions-location')
|
||||
if 'allow_versions' in conf:
|
||||
|
@ -375,14 +377,12 @@ class ContainerController(BaseStorageServer):
|
|||
# auto create accounts)
|
||||
obj_policy_index = self.get_and_validate_policy_index(req) or 0
|
||||
broker = self._get_container_broker(drive, part, account, container)
|
||||
if account.startswith(self.auto_create_account_prefix) and obj and \
|
||||
not os.path.exists(broker.db_file):
|
||||
try:
|
||||
broker.initialize(req_timestamp.internal, obj_policy_index)
|
||||
except DatabaseAlreadyExists:
|
||||
pass
|
||||
if not os.path.exists(broker.db_file):
|
||||
if obj:
|
||||
self._maybe_autocreate(broker, req_timestamp, account,
|
||||
obj_policy_index, req)
|
||||
elif not os.path.exists(broker.db_file):
|
||||
return HTTPNotFound()
|
||||
|
||||
if obj: # delete object
|
||||
# redirect if a shard range exists for the object name
|
||||
redirect = self._redirect_to_shard(req, broker, obj)
|
||||
|
@ -449,11 +449,25 @@ class ContainerController(BaseStorageServer):
|
|||
broker.update_status_changed_at(timestamp)
|
||||
return recreated
|
||||
|
||||
def _should_autocreate(self, account, req):
|
||||
auto_create_header = req.headers.get('X-Backend-Auto-Create')
|
||||
if auto_create_header:
|
||||
# If the caller included an explicit X-Backend-Auto-Create header,
|
||||
# assume they know the behavior they want
|
||||
return config_true_value(auto_create_header)
|
||||
if account.startswith(self.shards_account_prefix):
|
||||
# we have to specical case this subset of the
|
||||
# auto_create_account_prefix because we don't want the updater
|
||||
# accidently auto-creating shards; only the sharder creates
|
||||
# shards and it will explicitly tell the server to do so
|
||||
return False
|
||||
return account.startswith(self.auto_create_account_prefix)
|
||||
|
||||
def _maybe_autocreate(self, broker, req_timestamp, account,
|
||||
policy_index):
|
||||
policy_index, req):
|
||||
created = False
|
||||
if account.startswith(self.auto_create_account_prefix) and \
|
||||
not os.path.exists(broker.db_file):
|
||||
should_autocreate = self._should_autocreate(account, req)
|
||||
if should_autocreate and not os.path.exists(broker.db_file):
|
||||
if policy_index is None:
|
||||
raise HTTPBadRequest(
|
||||
'X-Backend-Storage-Policy-Index header is required')
|
||||
|
@ -506,8 +520,8 @@ class ContainerController(BaseStorageServer):
|
|||
# obj put expects the policy_index header, default is for
|
||||
# legacy support during upgrade.
|
||||
obj_policy_index = requested_policy_index or 0
|
||||
self._maybe_autocreate(broker, req_timestamp, account,
|
||||
obj_policy_index)
|
||||
self._maybe_autocreate(
|
||||
broker, req_timestamp, account, obj_policy_index, req)
|
||||
# redirect if a shard exists for this object name
|
||||
response = self._redirect_to_shard(req, broker, obj)
|
||||
if response:
|
||||
|
@ -531,8 +545,8 @@ class ContainerController(BaseStorageServer):
|
|||
for sr in json.loads(req.body)]
|
||||
except (ValueError, KeyError, TypeError) as err:
|
||||
return HTTPBadRequest('Invalid body: %r' % err)
|
||||
created = self._maybe_autocreate(broker, req_timestamp, account,
|
||||
requested_policy_index)
|
||||
created = self._maybe_autocreate(
|
||||
broker, req_timestamp, account, requested_policy_index, req)
|
||||
self._update_metadata(req, broker, req_timestamp, 'PUT')
|
||||
if shard_ranges:
|
||||
# TODO: consider writing the shard ranges into the pending
|
||||
|
@ -805,7 +819,7 @@ class ContainerController(BaseStorageServer):
|
|||
requested_policy_index = self.get_and_validate_policy_index(req)
|
||||
broker = self._get_container_broker(drive, part, account, container)
|
||||
self._maybe_autocreate(broker, req_timestamp, account,
|
||||
requested_policy_index)
|
||||
requested_policy_index, req)
|
||||
try:
|
||||
objs = json.load(req.environ['wsgi.input'])
|
||||
except ValueError as err:
|
||||
|
|
|
@ -618,7 +618,8 @@ class ContainerSharder(ContainerReplicator):
|
|||
|
||||
def _send_shard_ranges(self, account, container, shard_ranges,
|
||||
headers=None):
|
||||
body = json.dumps([dict(sr) for sr in shard_ranges]).encode('ascii')
|
||||
body = json.dumps([dict(sr, reported=0)
|
||||
for sr in shard_ranges]).encode('ascii')
|
||||
part, nodes = self.ring.get_nodes(account, container)
|
||||
headers = headers or {}
|
||||
headers.update({'X-Backend-Record-Type': RECORD_TYPE_SHARD,
|
||||
|
@ -1148,7 +1149,8 @@ class ContainerSharder(ContainerReplicator):
|
|||
'X-Backend-Storage-Policy-Index': broker.storage_policy_index,
|
||||
'X-Container-Sysmeta-Shard-Quoted-Root': quote(
|
||||
broker.root_path),
|
||||
'X-Container-Sysmeta-Sharding': True}
|
||||
'X-Container-Sysmeta-Sharding': 'True',
|
||||
'X-Backend-Auto-Create': 'True'}
|
||||
# NB: we *used* to send along
|
||||
# 'X-Container-Sysmeta-Shard-Root': broker.root_path
|
||||
# but that isn't safe for container names with nulls or newlines
|
||||
|
@ -1468,7 +1470,7 @@ class ContainerSharder(ContainerReplicator):
|
|||
|
||||
def _update_root_container(self, broker):
|
||||
own_shard_range = broker.get_own_shard_range(no_default=True)
|
||||
if not own_shard_range:
|
||||
if not own_shard_range or own_shard_range.reported:
|
||||
return
|
||||
|
||||
# persist the reported shard metadata
|
||||
|
@ -1478,9 +1480,12 @@ class ContainerSharder(ContainerReplicator):
|
|||
include_own=True,
|
||||
include_deleted=True)
|
||||
# send everything
|
||||
self._send_shard_ranges(
|
||||
broker.root_account, broker.root_container,
|
||||
shard_ranges)
|
||||
if self._send_shard_ranges(
|
||||
broker.root_account, broker.root_container, shard_ranges):
|
||||
# on success, mark ourselves as reported so we don't keep
|
||||
# hammering the root
|
||||
own_shard_range.reported = True
|
||||
broker.merge_shard_ranges(own_shard_range)
|
||||
|
||||
def _process_broker(self, broker, node, part):
|
||||
broker.get_info() # make sure account/container are populated
|
||||
|
|
|
@ -14,12 +14,13 @@
|
|||
# limitations under the License.
|
||||
|
||||
import six.moves.cPickle as pickle
|
||||
import errno
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from swift import gettext_ as _
|
||||
from random import random
|
||||
from random import random, shuffle
|
||||
|
||||
from eventlet import spawn, Timeout
|
||||
|
||||
|
@ -230,7 +231,9 @@ class ObjectUpdater(Daemon):
|
|||
'to a valid policy (%(error)s)') % {
|
||||
'directory': asyncdir, 'error': e})
|
||||
continue
|
||||
for prefix in self._listdir(async_pending):
|
||||
prefix_dirs = self._listdir(async_pending)
|
||||
shuffle(prefix_dirs)
|
||||
for prefix in prefix_dirs:
|
||||
prefix_path = os.path.join(async_pending, prefix)
|
||||
if not os.path.isdir(prefix_path):
|
||||
continue
|
||||
|
@ -271,7 +274,11 @@ class ObjectUpdater(Daemon):
|
|||
if obj_hash == last_obj_hash:
|
||||
self.stats.unlinks += 1
|
||||
self.logger.increment('unlinks')
|
||||
os.unlink(update_path)
|
||||
try:
|
||||
os.unlink(update_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
else:
|
||||
last_obj_hash = obj_hash
|
||||
yield {'device': device, 'policy': policy,
|
||||
|
|
|
@ -322,12 +322,16 @@ def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
|
|||
pipeline = pipeline.replace(
|
||||
"proxy-logging proxy-server",
|
||||
"keymaster encryption proxy-logging proxy-server")
|
||||
pipeline = pipeline.replace(
|
||||
"cache listing_formats",
|
||||
"cache etag-quoter listing_formats")
|
||||
conf.set(section, 'pipeline', pipeline)
|
||||
root_secret = base64.b64encode(os.urandom(32))
|
||||
if not six.PY2:
|
||||
root_secret = root_secret.decode('ascii')
|
||||
conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
|
||||
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
|
||||
conf.set('filter:etag-quoter', 'enable_by_default', 'true')
|
||||
except NoSectionError as err:
|
||||
msg = 'Error problem with proxy conf file %s: %s' % \
|
||||
(proxy_conf_file, err)
|
||||
|
@ -512,8 +516,6 @@ def _load_losf_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
|
|||
conf_loaders = {
|
||||
'encryption': _load_encryption,
|
||||
'ec': _load_ec_as_default_policy,
|
||||
'domain_remap_staticweb': _load_domain_remap_staticweb,
|
||||
's3api': _load_s3api,
|
||||
'losf': _load_losf_as_default_policy,
|
||||
}
|
||||
|
||||
|
@ -552,6 +554,11 @@ def in_process_setup(the_object_server=object_server):
|
|||
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
|
||||
_info('prepared swift.conf: %s' % swift_conf)
|
||||
|
||||
# load s3api and staticweb configs
|
||||
proxy_conf, swift_conf = _load_s3api(proxy_conf, swift_conf)
|
||||
proxy_conf, swift_conf = _load_domain_remap_staticweb(proxy_conf,
|
||||
swift_conf)
|
||||
|
||||
# Call the associated method for the value of
|
||||
# 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
|
||||
conf_loader_label = os.environ.get(
|
||||
|
@ -621,6 +628,7 @@ def in_process_setup(the_object_server=object_server):
|
|||
# Below are values used by the functional test framework, as well as
|
||||
# by the various in-process swift servers
|
||||
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0/' % prolis.getsockname()[1],
|
||||
's3_storage_url': 'http://%s:%d/' % prolis.getsockname(),
|
||||
# Primary functional test account (needs admin access to the
|
||||
# account)
|
||||
'account': 'test',
|
||||
|
@ -902,6 +910,8 @@ def setup_package():
|
|||
443 if parsed.scheme == 'https' else 80),
|
||||
'auth_prefix': parsed.path,
|
||||
})
|
||||
config.setdefault('s3_storage_url',
|
||||
urlunsplit(parsed[:2] + ('', None, None)))
|
||||
elif 'auth_host' in config:
|
||||
scheme = 'http'
|
||||
if config_true_value(config.get('auth_ssl', 'no')):
|
||||
|
@ -914,6 +924,8 @@ def setup_package():
|
|||
auth_prefix += 'v1.0'
|
||||
config['auth_uri'] = swift_test_auth = urlunsplit(
|
||||
(scheme, netloc, auth_prefix, None, None))
|
||||
config.setdefault('s3_storage_url', urlunsplit(
|
||||
(scheme, netloc, '', None, None)))
|
||||
# else, neither auth_uri nor auth_host; swift_test_auth will be unset
|
||||
# and we'll skip everything later
|
||||
|
||||
|
|
|
@ -37,7 +37,11 @@ class S3ApiBase(unittest.TestCase):
|
|||
if 's3api' not in tf.cluster_info:
|
||||
raise tf.SkipTest('s3api middleware is not enabled')
|
||||
try:
|
||||
self.conn = Connection()
|
||||
self.conn = Connection(
|
||||
tf.config['s3_access_key'], tf.config['s3_secret_key'],
|
||||
user_id='%s:%s' % (tf.config['account'],
|
||||
tf.config['username']))
|
||||
|
||||
self.conn.reset()
|
||||
except Exception:
|
||||
message = '%s got an error during initialize process.\n\n%s' % \
|
||||
|
@ -67,7 +71,8 @@ class S3ApiBaseBoto3(S3ApiBase):
|
|||
if 's3api' not in tf.cluster_info:
|
||||
raise tf.SkipTest('s3api middleware is not enabled')
|
||||
try:
|
||||
self.conn = get_boto3_conn()
|
||||
self.conn = get_boto3_conn(
|
||||
tf.config['s3_access_key'], tf.config['s3_secret_key'])
|
||||
self.endpoint_url = self.conn._endpoint.host
|
||||
self.access_key = self.conn._request_signer._credentials.access_key
|
||||
self.region = self.conn._client_config.region_name
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
import logging
|
||||
import os
|
||||
from six.moves.urllib.parse import urlparse
|
||||
import test.functional as tf
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
|
@ -46,9 +47,9 @@ class Connection(object):
|
|||
"""
|
||||
Connection class used for S3 functional testing.
|
||||
"""
|
||||
def __init__(self, aws_access_key='test:tester',
|
||||
aws_secret_key='testing',
|
||||
user_id='test:tester'):
|
||||
def __init__(self, aws_access_key,
|
||||
aws_secret_key,
|
||||
user_id=None):
|
||||
"""
|
||||
Initialize method.
|
||||
|
||||
|
@ -64,15 +65,16 @@ class Connection(object):
|
|||
"""
|
||||
self.aws_access_key = aws_access_key
|
||||
self.aws_secret_key = aws_secret_key
|
||||
self.user_id = user_id
|
||||
# NOTE: auth_host and auth_port can be different from storage location
|
||||
self.host = tf.config['auth_host']
|
||||
self.port = int(tf.config['auth_port'])
|
||||
self.user_id = user_id or aws_access_key
|
||||
parsed = urlparse(tf.config['s3_storage_url'])
|
||||
self.host = parsed.hostname
|
||||
self.port = parsed.port
|
||||
self.conn = \
|
||||
S3Connection(aws_access_key, aws_secret_key, is_secure=False,
|
||||
S3Connection(aws_access_key, aws_secret_key,
|
||||
is_secure=(parsed.scheme == 'https'),
|
||||
host=self.host, port=self.port,
|
||||
calling_format=OrdinaryCallingFormat())
|
||||
self.conn.auth_region_name = 'us-east-1'
|
||||
self.conn.auth_region_name = tf.config.get('s3_region', 'us-east-1')
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
|
@ -140,22 +142,26 @@ class Connection(object):
|
|||
url = self.conn.generate_url(expires_in, method, bucket, obj)
|
||||
if os.environ.get('S3_USE_SIGV4') == "True":
|
||||
# V4 signatures are known-broken in boto, but we can work around it
|
||||
if url.startswith('https://'):
|
||||
if url.startswith('https://') and not tf.config[
|
||||
's3_storage_url'].startswith('https://'):
|
||||
url = 'http://' + url[8:]
|
||||
return url, {'Host': '%(host)s:%(port)d:%(port)d' % {
|
||||
'host': self.host, 'port': self.port}}
|
||||
if self.port is None:
|
||||
return url, {}
|
||||
else:
|
||||
return url, {'Host': '%(host)s:%(port)d:%(port)d' % {
|
||||
'host': self.host, 'port': self.port}}
|
||||
return url, {}
|
||||
|
||||
|
||||
def get_boto3_conn(aws_access_key='test:tester', aws_secret_key='testing'):
|
||||
host = tf.config['auth_host']
|
||||
port = int(tf.config['auth_port'])
|
||||
def get_boto3_conn(aws_access_key, aws_secret_key):
|
||||
endpoint_url = tf.config['s3_storage_url']
|
||||
config = boto3.session.Config(s3={'addressing_style': 'path'})
|
||||
return boto3.client(
|
||||
's3', aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
config=config, region_name='us-east-1', use_ssl=False,
|
||||
endpoint_url='http://{}:{}'.format(host, port))
|
||||
config=config, region_name=tf.config.get('s3_region', 'us-east-1'),
|
||||
use_ssl=endpoint_url.startswith('https:'),
|
||||
endpoint_url=endpoint_url)
|
||||
|
||||
|
||||
def tear_down_s3(conn):
|
||||
|
|
|
@ -93,7 +93,7 @@ class TestS3Acl(S3ApiBase):
|
|||
|
||||
def test_put_bucket_acl_error(self):
|
||||
req_headers = {'x-amz-acl': 'public-read'}
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('PUT', self.bucket,
|
||||
headers=req_headers, query='acl')
|
||||
|
@ -110,7 +110,7 @@ class TestS3Acl(S3ApiBase):
|
|||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
|
||||
def test_get_bucket_acl_error(self):
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('GET', self.bucket, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -126,7 +126,7 @@ class TestS3Acl(S3ApiBase):
|
|||
def test_get_object_acl_error(self):
|
||||
self.conn.make_request('PUT', self.bucket, self.obj)
|
||||
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('GET', self.bucket, self.obj,
|
||||
query='acl')
|
||||
|
|
|
@ -42,11 +42,15 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
|
|||
self.assertIn('ETag', obj)
|
||||
self.assertIn('Size', obj)
|
||||
self.assertEqual(obj['StorageClass'], 'STANDARD')
|
||||
if expect_owner:
|
||||
if not expect_owner:
|
||||
self.assertNotIn('Owner', obj)
|
||||
elif tf.cluster_info['s3api'].get('s3_acl'):
|
||||
self.assertEqual(obj['Owner']['ID'], self.access_key)
|
||||
self.assertEqual(obj['Owner']['DisplayName'], self.access_key)
|
||||
else:
|
||||
self.assertNotIn('Owner', obj)
|
||||
self.assertIn('Owner', obj)
|
||||
self.assertIn('ID', obj['Owner'])
|
||||
self.assertIn('DisplayName', obj['Owner'])
|
||||
|
||||
def test_bucket(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -128,7 +132,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
|
|||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
|
@ -201,7 +205,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
|
|||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.list_objects(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
|
@ -388,7 +392,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
|
|||
ctx.exception.response[
|
||||
'ResponseMetadata']['HTTPHeaders']['content-length'], '0')
|
||||
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.head_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
|
@ -419,7 +423,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
|
|||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.delete_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
|
|
|
@ -134,7 +134,7 @@ class TestS3ApiMultiDelete(S3ApiBase):
|
|||
content_md5 = calculate_md5(xml)
|
||||
query = 'delete'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, body=xml,
|
||||
headers={
|
||||
|
|
|
@ -304,9 +304,8 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self.assertTrue(lines[0].startswith(b'<?xml'), body)
|
||||
self.assertTrue(lines[0].endswith(b'?>'), body)
|
||||
elem = fromstring(body, 'CompleteMultipartUploadResult')
|
||||
# TODO: use tf.config value
|
||||
self.assertEqual(
|
||||
'http://%s:%s/bucket/obj1' % (self.conn.host, self.conn.port),
|
||||
'%s/bucket/obj1' % tf.config['s3_storage_url'].rstrip('/'),
|
||||
elem.find('Location').text)
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
self.assertEqual(elem.find('Key').text, key)
|
||||
|
@ -428,7 +427,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -442,7 +441,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -462,7 +461,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -500,7 +499,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', bucket, key,
|
||||
headers={
|
||||
|
@ -541,7 +540,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', bucket, key, query=query)
|
||||
|
@ -568,7 +567,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self._upload_part(bucket, key, upload_id)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -612,7 +611,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self.assertEqual(get_error_code(body), 'EntityTooSmall')
|
||||
|
||||
# invalid credentials
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
|
@ -881,6 +880,8 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
def test_object_multi_upload_part_copy_version(self):
|
||||
if 'object_versioning' not in tf.cluster_info:
|
||||
self.skipTest('Object Versioning not enabled')
|
||||
bucket = 'bucket'
|
||||
keys = ['obj1']
|
||||
uploads = []
|
||||
|
|
|
@ -147,7 +147,7 @@ class TestS3ApiObject(S3ApiBase):
|
|||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_put_object_error(self):
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', self.bucket, 'object')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -166,7 +166,7 @@ class TestS3ApiObject(S3ApiBase):
|
|||
dst_obj = 'dst_object'
|
||||
|
||||
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -197,7 +197,7 @@ class TestS3ApiObject(S3ApiBase):
|
|||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', self.bucket, obj)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
@ -216,7 +216,7 @@ class TestS3ApiObject(S3ApiBase):
|
|||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('HEAD', self.bucket, obj)
|
||||
self.assertEqual(status, 403)
|
||||
|
@ -239,7 +239,7 @@ class TestS3ApiObject(S3ApiBase):
|
|||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', self.bucket, obj)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
|
|
@ -69,7 +69,7 @@ class TestS3ApiService(S3ApiBase):
|
|||
self.assertTrue(b.find('CreationDate') is not None)
|
||||
|
||||
def test_service_error_signature_not_match(self):
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
|
||||
status, headers, body = auth_error_conn.make_request('GET')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
|
|
@ -1726,7 +1726,7 @@ class TestObject(unittest.TestCase):
|
|||
if 'etag_quoter' not in tf.cluster_info:
|
||||
raise SkipTest("etag-quoter middleware is not enabled")
|
||||
|
||||
def do_head(expect_quoted=False):
|
||||
def do_head(expect_quoted=None):
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', '%s/%s/%s' % (
|
||||
parsed.path, self.container, self.obj), '',
|
||||
|
@ -1736,6 +1736,11 @@ class TestObject(unittest.TestCase):
|
|||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
if expect_quoted is None:
|
||||
expect_quoted = tf.cluster_info.get('etag_quoter', {}).get(
|
||||
'enable_by_default', False)
|
||||
|
||||
expected_etag = hashlib.md5(b'test').hexdigest()
|
||||
if expect_quoted:
|
||||
expected_etag = '"%s"' % expected_etag
|
||||
|
@ -1771,7 +1776,7 @@ class TestObject(unittest.TestCase):
|
|||
post_container('')
|
||||
do_head(expect_quoted=True)
|
||||
post_container('f')
|
||||
do_head()
|
||||
do_head(expect_quoted=False)
|
||||
finally:
|
||||
# Don't leave a dirty account
|
||||
post_account('')
|
||||
|
|
|
@ -26,6 +26,7 @@ from six.moves.urllib.parse import quote, unquote
|
|||
|
||||
import test.functional as tf
|
||||
|
||||
from swift.common.swob import normalize_etag
|
||||
from swift.common.utils import MD5_OF_EMPTY_STRING, config_true_value
|
||||
from swift.common.middleware.versioned_writes.object_versioning import \
|
||||
DELETE_MARKER_CONTENT_TYPE
|
||||
|
@ -331,6 +332,80 @@ class TestObjectVersioning(TestObjectVersioningBase):
|
|||
# listing, though, we'll only ever have the two entries.
|
||||
self.assertTotalVersions(container, 2)
|
||||
|
||||
def test_get_if_match(self):
|
||||
body = b'data'
|
||||
oname = Utils.create_name()
|
||||
obj = self.env.unversioned_container.file(oname)
|
||||
resp = obj.write(body, return_resp=True)
|
||||
etag = resp.getheader('etag')
|
||||
self.assertEqual(md5(body).hexdigest(), normalize_etag(etag))
|
||||
|
||||
# un-versioned object is cool with with if-match
|
||||
self.assertEqual(body, obj.read(hdrs={'if-match': etag}))
|
||||
with self.assertRaises(ResponseError) as cm:
|
||||
obj.read(hdrs={'if-match': 'not-the-etag'})
|
||||
self.assertEqual(412, cm.exception.status)
|
||||
|
||||
v_obj = self.env.container.file(oname)
|
||||
resp = v_obj.write(body, return_resp=True)
|
||||
self.assertEqual(resp.getheader('etag'), etag)
|
||||
|
||||
# versioned object is too with with if-match
|
||||
self.assertEqual(body, v_obj.read(hdrs={
|
||||
'if-match': normalize_etag(etag)}))
|
||||
# works quoted, too
|
||||
self.assertEqual(body, v_obj.read(hdrs={
|
||||
'if-match': '"%s"' % normalize_etag(etag)}))
|
||||
with self.assertRaises(ResponseError) as cm:
|
||||
v_obj.read(hdrs={'if-match': 'not-the-etag'})
|
||||
self.assertEqual(412, cm.exception.status)
|
||||
|
||||
def test_container_acls(self):
|
||||
if tf.skip3:
|
||||
raise SkipTest('Username3 not set')
|
||||
|
||||
obj = self.env.container.file(Utils.create_name())
|
||||
resp = obj.write(b"data", return_resp=True)
|
||||
version_id = resp.getheader('x-object-version-id')
|
||||
self.assertIsNotNone(version_id)
|
||||
|
||||
with self.assertRaises(ResponseError) as cm:
|
||||
obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token})
|
||||
self.assertEqual(403, cm.exception.status)
|
||||
|
||||
# Container ACLs work more or less like they always have
|
||||
self.env.container.update_metadata(
|
||||
hdrs={'X-Container-Read': self.env.conn3.user_acl})
|
||||
self.assertEqual(b"data", obj.read(hdrs={
|
||||
'X-Auth-Token': self.env.conn3.storage_token}))
|
||||
|
||||
# But the version-specifc GET still requires a swift owner
|
||||
with self.assertRaises(ResponseError) as cm:
|
||||
obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token},
|
||||
parms={'version-id': version_id})
|
||||
self.assertEqual(403, cm.exception.status)
|
||||
|
||||
# If it's pointing to a symlink that points elsewhere, that still needs
|
||||
# to be authed
|
||||
tgt_name = Utils.create_name()
|
||||
self.env.unversioned_container.file(tgt_name).write(b'link')
|
||||
sym_tgt_header = quote(unquote('%s/%s' % (
|
||||
self.env.unversioned_container.name, tgt_name)))
|
||||
obj.write(hdrs={'X-Symlink-Target': sym_tgt_header})
|
||||
|
||||
# So, user1's good...
|
||||
self.assertEqual(b'link', obj.read())
|
||||
# ...but user3 can't
|
||||
with self.assertRaises(ResponseError) as cm:
|
||||
obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token})
|
||||
self.assertEqual(403, cm.exception.status)
|
||||
|
||||
# unless we add the acl to the unversioned_container
|
||||
self.env.unversioned_container.update_metadata(
|
||||
hdrs={'X-Container-Read': self.env.conn3.user_acl})
|
||||
self.assertEqual(b'link', obj.read(
|
||||
hdrs={'X-Auth-Token': self.env.conn3.storage_token}))
|
||||
|
||||
def _test_overwriting_setup(self, obj_name=None):
|
||||
# sanity
|
||||
container = self.env.container
|
||||
|
@ -919,13 +994,13 @@ class TestObjectVersioning(TestObjectVersioningBase):
|
|||
'Content-Type': 'text/jibberish32'
|
||||
}, return_resp=True)
|
||||
v1_version_id = resp.getheader('x-object-version-id')
|
||||
v1_etag = resp.getheader('etag')
|
||||
v1_etag = normalize_etag(resp.getheader('etag'))
|
||||
|
||||
resp = obj.write(b'version2', hdrs={
|
||||
'Content-Type': 'text/jibberish33'
|
||||
}, return_resp=True)
|
||||
v2_version_id = resp.getheader('x-object-version-id')
|
||||
v2_etag = resp.getheader('etag')
|
||||
v2_etag = normalize_etag(resp.getheader('etag'))
|
||||
|
||||
# sanity
|
||||
self.assertEqual(b'version2', obj.read())
|
||||
|
@ -992,7 +1067,7 @@ class TestObjectVersioning(TestObjectVersioningBase):
|
|||
self.assertEqual(b'version1', obj.read())
|
||||
obj_info = obj.info()
|
||||
self.assertEqual('text/jibberish32', obj_info['content_type'])
|
||||
self.assertEqual(v1_etag, obj_info['etag'])
|
||||
self.assertEqual(v1_etag, normalize_etag(obj_info['etag']))
|
||||
|
||||
def test_delete_with_version_api_old_object(self):
|
||||
versioned_obj_name = Utils.create_name()
|
||||
|
@ -2308,7 +2383,7 @@ class TestSloWithVersioning(TestObjectVersioningBase):
|
|||
expected = {
|
||||
'bytes': file_info['content_length'],
|
||||
'content_type': 'application/octet-stream',
|
||||
'hash': manifest_info['etag'],
|
||||
'hash': normalize_etag(manifest_info['etag']),
|
||||
'name': 'my-slo-manifest',
|
||||
'slo_etag': file_info['etag'],
|
||||
'version_symlink': True,
|
||||
|
@ -2340,7 +2415,7 @@ class TestSloWithVersioning(TestObjectVersioningBase):
|
|||
expected = {
|
||||
'bytes': file_info['content_length'],
|
||||
'content_type': 'application/octet-stream',
|
||||
'hash': manifest_info['etag'],
|
||||
'hash': normalize_etag(manifest_info['etag']),
|
||||
'name': 'my-slo-manifest',
|
||||
'slo_etag': file_info['etag'],
|
||||
'version_symlink': True,
|
||||
|
@ -2688,16 +2763,11 @@ class TestVersioningContainerTempurl(TestObjectVersioningBase):
|
|||
obj.write(b"version2")
|
||||
|
||||
# get v2 object (reading from versions container)
|
||||
# cross container tempurl does not work for container tempurl key
|
||||
try:
|
||||
obj.read(parms=get_parms, cfg={'no_auth_token': True})
|
||||
except ResponseError as e:
|
||||
self.assertEqual(e.status, 401)
|
||||
else:
|
||||
self.fail('request did not error')
|
||||
try:
|
||||
obj.info(parms=get_parms, cfg={'no_auth_token': True})
|
||||
except ResponseError as e:
|
||||
self.assertEqual(e.status, 401)
|
||||
else:
|
||||
self.fail('request did not error')
|
||||
# versioning symlink allows us to bypass the normal
|
||||
# container-tempurl-key scoping
|
||||
contents = obj.read(parms=get_parms, cfg={'no_auth_token': True})
|
||||
self.assert_status([200])
|
||||
self.assertEqual(contents, b"version2")
|
||||
# HEAD works, too
|
||||
obj.info(parms=get_parms, cfg={'no_auth_token': True})
|
||||
self.assert_status([200])
|
||||
|
|
|
@ -23,6 +23,8 @@ from copy import deepcopy
|
|||
|
||||
import six
|
||||
|
||||
from swift.common.swob import normalize_etag
|
||||
|
||||
import test.functional as tf
|
||||
from test.functional import cluster_info, SkipTest
|
||||
from test.functional.tests import Utils, Base, Base2, BaseEnv
|
||||
|
@ -299,8 +301,14 @@ class TestSlo(Base):
|
|||
# a POST.
|
||||
file_item.initialize(parms={'multipart-manifest': 'get'})
|
||||
manifest_etag = file_item.etag
|
||||
self.assertFalse(manifest_etag.startswith('"'))
|
||||
self.assertFalse(manifest_etag.endswith('"'))
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertTrue(manifest_etag.startswith('"'))
|
||||
self.assertTrue(manifest_etag.endswith('"'))
|
||||
# ...but in the listing, it'll be stripped
|
||||
manifest_etag = manifest_etag[1:-1]
|
||||
else:
|
||||
self.assertFalse(manifest_etag.startswith('"'))
|
||||
self.assertFalse(manifest_etag.endswith('"'))
|
||||
|
||||
file_item.initialize()
|
||||
slo_etag = file_item.etag
|
||||
|
@ -715,6 +723,8 @@ class TestSlo(Base):
|
|||
source_contents = source.read(parms={'multipart-manifest': 'get'})
|
||||
source_json = json.loads(source_contents)
|
||||
manifest_etag = hashlib.md5(source_contents).hexdigest()
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
manifest_etag = '"%s"' % manifest_etag
|
||||
self.assertEqual(manifest_etag, source.etag)
|
||||
|
||||
source.initialize()
|
||||
|
@ -752,14 +762,14 @@ class TestSlo(Base):
|
|||
actual = names['manifest-abcde']
|
||||
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
||||
self.assertEqual('application/octet-stream', actual['content_type'])
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
self.assertIn('copied-abcde-manifest-only', names)
|
||||
actual = names['copied-abcde-manifest-only']
|
||||
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
||||
self.assertEqual('application/octet-stream', actual['content_type'])
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
# Test copy manifest including data segments
|
||||
|
@ -789,6 +799,8 @@ class TestSlo(Base):
|
|||
source_contents = source.read(parms={'multipart-manifest': 'get'})
|
||||
source_json = json.loads(source_contents)
|
||||
manifest_etag = hashlib.md5(source_contents).hexdigest()
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
manifest_etag = '"%s"' % manifest_etag
|
||||
self.assertEqual(manifest_etag, source.etag)
|
||||
|
||||
source.initialize()
|
||||
|
@ -831,14 +843,14 @@ class TestSlo(Base):
|
|||
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
||||
self.assertEqual('application/octet-stream', actual['content_type'])
|
||||
# the container listing should have the etag of the manifest contents
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
self.assertIn('copied-abcde-manifest-only', names)
|
||||
actual = names['copied-abcde-manifest-only']
|
||||
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
||||
self.assertEqual('image/jpeg', actual['content_type'])
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
def test_slo_copy_the_manifest_account(self):
|
||||
|
@ -1098,12 +1110,7 @@ class TestSlo(Base):
|
|||
manifest = self.env.container.file("manifest-db")
|
||||
got_body = manifest.read(parms={'multipart-manifest': 'get',
|
||||
'format': 'raw'})
|
||||
body_md5 = hashlib.md5(got_body).hexdigest()
|
||||
headers = dict(
|
||||
(h.lower(), v)
|
||||
for h, v in manifest.conn.response.getheaders())
|
||||
self.assertIn('etag', headers)
|
||||
self.assertEqual(headers['etag'], body_md5)
|
||||
self.assert_etag(hashlib.md5(got_body).hexdigest())
|
||||
|
||||
# raw format should have the actual manifest object content-type
|
||||
self.assertEqual('application/octet-stream', manifest.content_type)
|
||||
|
|
|
@ -25,6 +25,7 @@ from six.moves import urllib
|
|||
from uuid import uuid4
|
||||
|
||||
from swift.common.http import is_success
|
||||
from swift.common.swob import normalize_etag
|
||||
from swift.common.utils import json, MD5_OF_EMPTY_STRING
|
||||
from swift.common.middleware.slo import SloGetContext
|
||||
from test.functional import check_response, retry, requires_acls, \
|
||||
|
@ -1135,7 +1136,7 @@ class TestSymlink(Base):
|
|||
etag=self.env.tgt_etag)
|
||||
|
||||
# overwrite tgt object
|
||||
old_tgt_etag = self.env.tgt_etag
|
||||
old_tgt_etag = normalize_etag(self.env.tgt_etag)
|
||||
self.env._create_tgt_object(body='updated target body')
|
||||
|
||||
# sanity
|
||||
|
@ -1380,7 +1381,7 @@ class TestSymlink(Base):
|
|||
object_list[0]['symlink_path'])
|
||||
obj_info = object_list[0]
|
||||
self.assertIn('symlink_etag', obj_info)
|
||||
self.assertEqual(self.env.tgt_etag,
|
||||
self.assertEqual(normalize_etag(self.env.tgt_etag),
|
||||
obj_info['symlink_etag'])
|
||||
self.assertEqual(int(self.env.tgt_length),
|
||||
obj_info['symlink_bytes'])
|
||||
|
@ -1550,7 +1551,7 @@ class TestSymlinkSlo(Base):
|
|||
'symlink_path': '/v1/%s/%s/manifest-abcde' % (
|
||||
self.account_name, self.env.container2.name),
|
||||
'symlink_bytes': 4 * 2 ** 20 + 1,
|
||||
'symlink_etag': manifest_etag,
|
||||
'symlink_etag': normalize_etag(manifest_etag),
|
||||
})
|
||||
|
||||
def test_static_link_target_slo_manifest_wrong_etag(self):
|
||||
|
@ -1740,7 +1741,11 @@ class TestSymlinkToSloSegments(Base):
|
|||
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
||||
self.assertEqual('application/octet-stream',
|
||||
f_dict['content_type'])
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get(
|
||||
'enable_by_default'):
|
||||
self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
|
||||
else:
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
self.assertEqual(slo_etag, f_dict['slo_etag'])
|
||||
break
|
||||
else:
|
||||
|
@ -1759,7 +1764,11 @@ class TestSymlinkToSloSegments(Base):
|
|||
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
||||
self.assertEqual(file_item.content_type,
|
||||
f_dict['content_type'])
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get(
|
||||
'enable_by_default'):
|
||||
self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
|
||||
else:
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
self.assertEqual(slo_etag, f_dict['slo_etag'])
|
||||
break
|
||||
else:
|
||||
|
@ -1778,7 +1787,11 @@ class TestSymlinkToSloSegments(Base):
|
|||
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
||||
self.assertEqual(file_item.content_type,
|
||||
f_dict['content_type'])
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get(
|
||||
'enable_by_default'):
|
||||
self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
|
||||
else:
|
||||
self.assertEqual(manifest_etag, f_dict['hash'])
|
||||
self.assertEqual(slo_etag, f_dict['slo_etag'])
|
||||
break
|
||||
else:
|
||||
|
@ -1811,6 +1824,8 @@ class TestSymlinkToSloSegments(Base):
|
|||
source_contents = source.read(parms={'multipart-manifest': 'get'})
|
||||
source_json = json.loads(source_contents)
|
||||
manifest_etag = hashlib.md5(source_contents).hexdigest()
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
manifest_etag = '"%s"' % manifest_etag
|
||||
|
||||
source.initialize()
|
||||
slo_etag = source.etag
|
||||
|
@ -1857,14 +1872,20 @@ class TestSymlinkToSloSegments(Base):
|
|||
actual = names['manifest-linkto-ab']
|
||||
self.assertEqual(2 * 1024 * 1024, actual['bytes'])
|
||||
self.assertEqual('application/octet-stream', actual['content_type'])
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual(manifest_etag, '"%s"' % actual['hash'])
|
||||
else:
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
self.assertIn('copied-ab-manifest-only', names)
|
||||
actual = names['copied-ab-manifest-only']
|
||||
self.assertEqual(2 * 1024 * 1024, actual['bytes'])
|
||||
self.assertEqual('application/octet-stream', actual['content_type'])
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual(manifest_etag, '"%s"' % actual['hash'])
|
||||
else:
|
||||
self.assertEqual(manifest_etag, actual['hash'])
|
||||
self.assertEqual(slo_etag, actual['slo_etag'])
|
||||
|
||||
|
||||
|
@ -2000,13 +2021,13 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-Match': 'bogus'}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
def testIfMatchMultipleEtags(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2022,13 +2043,13 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
def testIfNoneMatch(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2044,13 +2065,13 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-None-Match': md5}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfNoneMatchMultipleEtags(self):
|
||||
|
@ -2067,14 +2088,14 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-None-Match':
|
||||
'"bogus1", "bogus2", "%s"' % md5}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfModifiedSince(self):
|
||||
|
@ -2091,19 +2112,19 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assertTrue(file_symlink.info(hdrs=hdrs, parms=self.env.parms))
|
||||
|
||||
hdrs = {'If-Modified-Since': self.env.time_new}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
self.assertRaises(ResponseError, file_symlink.info, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfUnmodifiedSince(self):
|
||||
|
@ -2120,18 +2141,18 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assertTrue(file_symlink.info(hdrs=hdrs, parms=self.env.parms))
|
||||
|
||||
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assertRaises(ResponseError, file_symlink.info, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
def testIfMatchAndUnmodified(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2148,21 +2169,21 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
else:
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-Match': 'bogus',
|
||||
'If-Unmodified-Since': self.env.time_new}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-Match': md5,
|
||||
'If-Unmodified-Since': self.env.time_old_f3}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
def testLastModified(self):
|
||||
file_item = self.env.container.file(Utils.create_name())
|
||||
|
@ -2186,7 +2207,7 @@ class TestSymlinkTargetObjectComparison(Base):
|
|||
hdrs = {'If-Modified-Since': last_modified}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
hdrs = {'If-Unmodified-Since': last_modified}
|
||||
|
@ -2227,20 +2248,20 @@ class TestSymlinkComparison(TestSymlinkTargetObjectComparison):
|
|||
body = file_symlink.read(hdrs=hdrs, parms=self.env.parms)
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
hdrs = {'If-Modified-Since': last_modified}
|
||||
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
|
||||
parms=self.env.parms)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
hdrs = {'If-Unmodified-Since': last_modified}
|
||||
body = file_symlink.read(hdrs=hdrs, parms=self.env.parms)
|
||||
self.assertEqual(b'', body)
|
||||
self.assert_status(200)
|
||||
self.assert_header('etag', md5)
|
||||
self.assert_etag(md5)
|
||||
|
||||
|
||||
class TestSymlinkAccountTempurl(Base):
|
||||
|
|
|
@ -684,7 +684,11 @@ class TestObjectVersioning(Base):
|
|||
prev_version = versions_container.file(versioned_obj_name)
|
||||
prev_version_info = prev_version.info(parms={'symlink': 'get'})
|
||||
self.assertEqual(b"aaaaa", prev_version.read())
|
||||
self.assertEqual(MD5_OF_EMPTY_STRING, prev_version_info['etag'])
|
||||
symlink_etag = prev_version_info['etag']
|
||||
if symlink_etag.startswith('"') and symlink_etag.endswith('"') and \
|
||||
symlink_etag[1:-1]:
|
||||
symlink_etag = symlink_etag[1:-1]
|
||||
self.assertEqual(MD5_OF_EMPTY_STRING, symlink_etag)
|
||||
self.assertEqual(sym_tgt_header,
|
||||
prev_version_info['x_symlink_target'])
|
||||
return symlink, tgt_a
|
||||
|
@ -698,7 +702,10 @@ class TestObjectVersioning(Base):
|
|||
symlink.delete()
|
||||
sym_info = symlink.info(parms={'symlink': 'get'})
|
||||
self.assertEqual(b"aaaaa", symlink.read())
|
||||
self.assertEqual(MD5_OF_EMPTY_STRING, sym_info['etag'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual('"%s"' % MD5_OF_EMPTY_STRING, sym_info['etag'])
|
||||
else:
|
||||
self.assertEqual(MD5_OF_EMPTY_STRING, sym_info['etag'])
|
||||
self.assertEqual(
|
||||
quote(unquote('%s/%s' % (self.env.container.name, target.name))),
|
||||
sym_info['x_symlink_target'])
|
||||
|
|
|
@ -27,6 +27,7 @@ import uuid
|
|||
from copy import deepcopy
|
||||
import eventlet
|
||||
from swift.common.http import is_success, is_client_error
|
||||
from swift.common.swob import normalize_etag
|
||||
from email.utils import parsedate
|
||||
|
||||
if six.PY2:
|
||||
|
@ -131,6 +132,13 @@ class Base(unittest.TestCase):
|
|||
'Expected header name %r not found in response.' % header_name)
|
||||
self.assertEqual(expected_value, actual_value)
|
||||
|
||||
def assert_etag(self, unquoted_value):
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
expected = '"%s"' % unquoted_value
|
||||
else:
|
||||
expected = unquoted_value
|
||||
self.assert_header('etag', expected)
|
||||
|
||||
|
||||
class Base2(object):
|
||||
@classmethod
|
||||
|
@ -874,7 +882,11 @@ class TestContainer(Base):
|
|||
for actual in file_list:
|
||||
name = actual['name']
|
||||
self.assertIn(name, expected)
|
||||
self.assertEqual(expected[name]['etag'], actual['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual(expected[name]['etag'],
|
||||
'"%s"' % actual['hash'])
|
||||
else:
|
||||
self.assertEqual(expected[name]['etag'], actual['hash'])
|
||||
self.assertEqual(
|
||||
expected[name]['content_type'], actual['content_type'])
|
||||
self.assertEqual(
|
||||
|
@ -1365,6 +1377,8 @@ class TestFile(Base):
|
|||
'x-delete-at': mock.ANY,
|
||||
'x-trans-id': mock.ANY,
|
||||
'x-openstack-request-id': mock.ANY}
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
expected_headers['etag'] = '"%s"' % expected_headers['etag']
|
||||
unexpected_headers = ['connection', 'x-delete-after']
|
||||
do_test(put_headers, {}, expected_headers, unexpected_headers)
|
||||
|
||||
|
@ -1420,7 +1434,7 @@ class TestFile(Base):
|
|||
self.fail('Failed to find %s in listing' % dest_filename)
|
||||
|
||||
self.assertEqual(file_item.size, obj['bytes'])
|
||||
self.assertEqual(file_item.etag, obj['hash'])
|
||||
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
|
||||
self.assertEqual(file_item.content_type, obj['content_type'])
|
||||
|
||||
file_copy = cont.file(dest_filename)
|
||||
|
@ -1470,7 +1484,7 @@ class TestFile(Base):
|
|||
self.fail('Failed to find %s in listing' % dest_filename)
|
||||
|
||||
self.assertEqual(file_item.size, obj['bytes'])
|
||||
self.assertEqual(file_item.etag, obj['hash'])
|
||||
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
|
||||
self.assertEqual(
|
||||
'application/test-changed', obj['content_type'])
|
||||
|
||||
|
@ -1505,7 +1519,7 @@ class TestFile(Base):
|
|||
self.fail('Failed to find %s in listing' % dest_filename)
|
||||
|
||||
self.assertEqual(file_item.size, obj['bytes'])
|
||||
self.assertEqual(file_item.etag, obj['hash'])
|
||||
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
|
||||
self.assertEqual(
|
||||
'application/test-updated', obj['content_type'])
|
||||
|
||||
|
@ -2088,7 +2102,7 @@ class TestFile(Base):
|
|||
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
|
||||
self.assert_header('content-range', 'bytes %d-%d/%d' % (
|
||||
file_length - i, file_length - 1, file_length))
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
range_string = 'bytes=%d-' % (i)
|
||||
|
@ -2102,7 +2116,7 @@ class TestFile(Base):
|
|||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(416)
|
||||
self.assert_header('content-range', 'bytes */%d' % file_length)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
|
||||
|
@ -2416,14 +2430,16 @@ class TestFile(Base):
|
|||
file_item.content_type = content_type
|
||||
file_item.write_random(self.env.file_size)
|
||||
|
||||
md5 = file_item.md5
|
||||
expected_etag = file_item.md5
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
expected_etag = '"%s"' % expected_etag
|
||||
|
||||
file_item = self.env.container.file(file_name)
|
||||
info = file_item.info()
|
||||
|
||||
self.assert_status(200)
|
||||
self.assertEqual(info['content_length'], self.env.file_size)
|
||||
self.assertEqual(info['etag'], md5)
|
||||
self.assertEqual(info['etag'], expected_etag)
|
||||
self.assertEqual(info['content_type'], content_type)
|
||||
self.assertIn('last_modified', info)
|
||||
|
||||
|
@ -2612,14 +2628,7 @@ class TestFile(Base):
|
|||
file_item = self.env.container.file(Utils.create_name())
|
||||
|
||||
data = io.BytesIO(file_item.write_random(512))
|
||||
etag = File.compute_md5sum(data)
|
||||
|
||||
headers = dict((h.lower(), v)
|
||||
for h, v in self.env.conn.response.getheaders())
|
||||
self.assertIn('etag', headers.keys())
|
||||
|
||||
header_etag = headers['etag'].strip('"')
|
||||
self.assertEqual(etag, header_etag)
|
||||
self.assert_etag(File.compute_md5sum(data))
|
||||
|
||||
def testChunkedPut(self):
|
||||
if (tf.web_front_end == 'apache2'):
|
||||
|
@ -2645,7 +2654,7 @@ class TestFile(Base):
|
|||
self.assertEqual(data, file_item.read())
|
||||
|
||||
info = file_item.info()
|
||||
self.assertEqual(etag, info['etag'])
|
||||
self.assertEqual(normalize_etag(info['etag']), etag)
|
||||
|
||||
def test_POST(self):
|
||||
# verify consistency between object and container listing metadata
|
||||
|
@ -2670,7 +2679,10 @@ class TestFile(Base):
|
|||
self.fail('Failed to find file %r in listing' % file_name)
|
||||
self.assertEqual(1024, f_dict['bytes'])
|
||||
self.assertEqual('text/foobar', f_dict['content_type'])
|
||||
self.assertEqual(etag, f_dict['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual(etag, '"%s"' % f_dict['hash'])
|
||||
else:
|
||||
self.assertEqual(etag, f_dict['hash'])
|
||||
put_last_modified = f_dict['last_modified']
|
||||
|
||||
# now POST updated content-type to each file
|
||||
|
@ -2697,7 +2709,10 @@ class TestFile(Base):
|
|||
self.assertEqual(1024, f_dict['bytes'])
|
||||
self.assertEqual('image/foobarbaz', f_dict['content_type'])
|
||||
self.assertLess(put_last_modified, f_dict['last_modified'])
|
||||
self.assertEqual(etag, f_dict['hash'])
|
||||
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
||||
self.assertEqual(etag, '"%s"' % f_dict['hash'])
|
||||
else:
|
||||
self.assertEqual(etag, f_dict['hash'])
|
||||
|
||||
|
||||
class TestFileUTF8(Base2, TestFile):
|
||||
|
@ -2742,7 +2757,7 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-Match': 'bogus'}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
|
||||
def testIfMatchMultipleEtags(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2752,7 +2767,7 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
|
||||
def testIfNoneMatch(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2762,7 +2777,7 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-None-Match': file_item.md5}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfNoneMatchMultipleEtags(self):
|
||||
|
@ -2774,7 +2789,7 @@ class TestFileComparison(Base):
|
|||
'"bogus1", "bogus2", "%s"' % file_item.md5}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfModifiedSince(self):
|
||||
|
@ -2786,11 +2801,11 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-Modified-Since': self.env.time_new}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
def testIfUnmodifiedSince(self):
|
||||
|
@ -2802,10 +2817,10 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
|
||||
def testIfMatchAndUnmodified(self):
|
||||
for file_item in self.env.files:
|
||||
|
@ -2817,13 +2832,13 @@ class TestFileComparison(Base):
|
|||
'If-Unmodified-Since': self.env.time_new}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
|
||||
hdrs = {'If-Match': file_item.md5,
|
||||
'If-Unmodified-Since': self.env.time_old_f3}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(412)
|
||||
self.assert_header('etag', file_item.md5)
|
||||
self.assert_etag(file_item.md5)
|
||||
|
||||
def testLastModified(self):
|
||||
file_name = Utils.create_name()
|
||||
|
@ -2844,7 +2859,7 @@ class TestFileComparison(Base):
|
|||
hdrs = {'If-Modified-Since': last_modified}
|
||||
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
||||
self.assert_status(304)
|
||||
self.assert_header('etag', etag)
|
||||
self.assert_etag(etag)
|
||||
self.assert_header('accept-ranges', 'bytes')
|
||||
|
||||
hdrs = {'If-Unmodified-Since': last_modified}
|
||||
|
|
|
@ -23,6 +23,7 @@ from swiftclient.exceptions import ClientException
|
|||
|
||||
from swift.common import direct_client
|
||||
from swift.common.manager import Manager
|
||||
from swift.common.swob import normalize_etag
|
||||
from test.probe.common import kill_nonprimary_server, \
|
||||
kill_server, ReplProbeTest, start_server, ECProbeTest
|
||||
|
||||
|
@ -210,7 +211,7 @@ class TestUpdateOverridesEC(ECProbeTest):
|
|||
self.assertEqual(1, len(listing))
|
||||
self.assertEqual('o1', listing[0]['name'])
|
||||
self.assertEqual(len(content), listing[0]['bytes'])
|
||||
self.assertEqual(meta['etag'], listing[0]['hash'])
|
||||
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
|
||||
self.assertEqual('test/ctype', listing[0]['content_type'])
|
||||
|
||||
def test_update_during_POST_only(self):
|
||||
|
@ -261,7 +262,7 @@ class TestUpdateOverridesEC(ECProbeTest):
|
|||
self.assertEqual(1, len(listing))
|
||||
self.assertEqual('o1', listing[0]['name'])
|
||||
self.assertEqual(len(content), listing[0]['bytes'])
|
||||
self.assertEqual(meta['etag'], listing[0]['hash'])
|
||||
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
|
||||
self.assertEqual('test/ctype', listing[0]['content_type'])
|
||||
|
||||
# Run the object-updaters to send the async pending from the PUT
|
||||
|
@ -328,7 +329,7 @@ class TestUpdateOverridesEC(ECProbeTest):
|
|||
self.assertEqual(1, len(listing))
|
||||
self.assertEqual('o1', listing[0]['name'])
|
||||
self.assertEqual(len(content), listing[0]['bytes'])
|
||||
self.assertEqual(meta['etag'], listing[0]['hash'])
|
||||
self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
|
||||
self.assertEqual('test/ctype', listing[0]['content_type'])
|
||||
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import uuid
|
|||
from swift.common.direct_client import direct_get_suffix_hashes
|
||||
from swift.common.exceptions import DiskFileDeleted
|
||||
from swift.common.internal_client import UnexpectedResponse
|
||||
from swift.common.swob import normalize_etag
|
||||
from swift.container.backend import ContainerBroker
|
||||
from swift.common import utils
|
||||
from swiftclient import client
|
||||
|
@ -129,7 +130,7 @@ class Test(ReplProbeTest):
|
|||
|
||||
def _assert_object_metadata_matches_listing(self, listing, metadata):
|
||||
self.assertEqual(listing['bytes'], int(metadata['content-length']))
|
||||
self.assertEqual(listing['hash'], metadata['etag'])
|
||||
self.assertEqual(listing['hash'], normalize_etag(metadata['etag']))
|
||||
self.assertEqual(listing['content_type'], metadata['content-type'])
|
||||
modified = Timestamp(metadata['x-timestamp']).isoformat
|
||||
self.assertEqual(listing['last_modified'], modified)
|
||||
|
|
|
@ -24,6 +24,10 @@ auth_uri = http://127.0.0.1:8080/auth/v1.0
|
|||
#auth_version = 3
|
||||
#auth_uri = http://localhost:5000/v3/
|
||||
|
||||
# Used by s3api functional tests, which don't contact auth directly
|
||||
#s3_storage_url = http://127.0.0.1:8080/
|
||||
#s3_region = us-east-1
|
||||
|
||||
# Primary functional test account (needs admin access to the account)
|
||||
account = test
|
||||
username = tester
|
||||
|
|
|
@ -180,6 +180,72 @@ class TestAccountBroker(unittest.TestCase):
|
|||
broker.delete_db(Timestamp.now().internal)
|
||||
broker.reclaim(Timestamp.now().internal, time())
|
||||
|
||||
def test_batched_reclaim(self):
|
||||
num_of_containers = 60
|
||||
container_specs = []
|
||||
now = time()
|
||||
top_of_the_minute = now - (now % 60)
|
||||
c = itertools.cycle([True, False])
|
||||
for m, is_deleted in six.moves.zip(range(num_of_containers), c):
|
||||
offset = top_of_the_minute - (m * 60)
|
||||
container_specs.append((Timestamp(offset), is_deleted))
|
||||
random.seed(now)
|
||||
random.shuffle(container_specs)
|
||||
policy_indexes = list(p.idx for p in POLICIES)
|
||||
broker = AccountBroker(':memory:', account='test_account')
|
||||
broker.initialize(Timestamp('1').internal)
|
||||
for i, container_spec in enumerate(container_specs):
|
||||
# with container12 before container2 and shuffled ts.internal we
|
||||
# shouldn't be able to accidently rely on any implicit ordering
|
||||
name = 'container%s' % i
|
||||
pidx = random.choice(policy_indexes)
|
||||
ts, is_deleted = container_spec
|
||||
if is_deleted:
|
||||
broker.put_container(name, 0, ts.internal, 0, 0, pidx)
|
||||
else:
|
||||
broker.put_container(name, ts.internal, 0, 0, 0, pidx)
|
||||
|
||||
def count_reclaimable(conn, reclaim_age):
|
||||
return conn.execute(
|
||||
"SELECT count(*) FROM container "
|
||||
"WHERE deleted = 1 AND delete_timestamp < ?", (reclaim_age,)
|
||||
).fetchone()[0]
|
||||
|
||||
# This is intended to divide the set of timestamps exactly in half
|
||||
# regardless of the value of now
|
||||
reclaim_age = top_of_the_minute + 1 - (num_of_containers / 2 * 60)
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(count_reclaimable(conn, reclaim_age),
|
||||
num_of_containers / 4)
|
||||
|
||||
orig__reclaim = broker._reclaim
|
||||
trace = []
|
||||
|
||||
def tracing_reclaim(conn, age_timestamp, marker):
|
||||
trace.append((age_timestamp, marker,
|
||||
count_reclaimable(conn, age_timestamp)))
|
||||
return orig__reclaim(conn, age_timestamp, marker)
|
||||
|
||||
with mock.patch.object(broker, '_reclaim', new=tracing_reclaim), \
|
||||
mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
|
||||
broker.reclaim(reclaim_age, reclaim_age)
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(count_reclaimable(conn, reclaim_age), 0)
|
||||
self.assertEqual(3, len(trace), trace)
|
||||
self.assertEqual([age for age, marker, reclaimable in trace],
|
||||
[reclaim_age] * 3)
|
||||
# markers are in-order
|
||||
self.assertLess(trace[0][1], trace[1][1])
|
||||
self.assertLess(trace[1][1], trace[2][1])
|
||||
# reclaimable count gradually decreases
|
||||
# generally, count1 > count2 > count3, but because of the randomness
|
||||
# we may occassionally have count1 == count2 or count2 == count3
|
||||
self.assertGreaterEqual(trace[0][2], trace[1][2])
|
||||
self.assertGreaterEqual(trace[1][2], trace[2][2])
|
||||
# technically, this might happen occasionally, but *really* rarely
|
||||
self.assertTrue(trace[0][2] > trace[1][2] or
|
||||
trace[1][2] > trace[2][2])
|
||||
|
||||
def test_delete_db_status(self):
|
||||
start = next(self.ts)
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
|
|
|
@ -497,13 +497,10 @@ Shard Ranges (3):
|
|||
print_item_locations(None, partition=part, policy_name='zero',
|
||||
swift_dir=self.testdir)
|
||||
exp_part_msg = 'Partition\t%s' % part
|
||||
exp_acct_msg = 'Account \tNone'
|
||||
exp_cont_msg = 'Container\tNone'
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_part_msg, out.getvalue())
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Account', out.getvalue())
|
||||
self.assertNotIn('Container', out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_dashed_ring_name_partition(self):
|
||||
out = StringIO()
|
||||
|
@ -513,13 +510,10 @@ Shard Ranges (3):
|
|||
ring_name='foo-bar', partition=part,
|
||||
swift_dir=self.testdir)
|
||||
exp_part_msg = 'Partition\t%s' % part
|
||||
exp_acct_msg = 'Account \tNone'
|
||||
exp_cont_msg = 'Container\tNone'
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_part_msg, out.getvalue())
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Account', out.getvalue())
|
||||
self.assertNotIn('Container', out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_account_with_ring(self):
|
||||
out = StringIO()
|
||||
|
@ -533,11 +527,9 @@ Shard Ranges (3):
|
|||
'but ring not named "account"'
|
||||
self.assertIn(exp_warning, out.getvalue())
|
||||
exp_acct_msg = 'Account \t%s' % account
|
||||
exp_cont_msg = 'Container\tNone'
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Container', out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_account_no_ring(self):
|
||||
out = StringIO()
|
||||
|
@ -546,11 +538,9 @@ Shard Ranges (3):
|
|||
print_item_locations(None, account=account,
|
||||
swift_dir=self.testdir)
|
||||
exp_acct_msg = 'Account \t%s' % account
|
||||
exp_cont_msg = 'Container\tNone'
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Container', out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_account_container_ring(self):
|
||||
out = StringIO()
|
||||
|
@ -562,10 +552,9 @@ Shard Ranges (3):
|
|||
container=container)
|
||||
exp_acct_msg = 'Account \t%s' % account
|
||||
exp_cont_msg = 'Container\t%s' % container
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_account_container_no_ring(self):
|
||||
out = StringIO()
|
||||
|
@ -576,10 +565,9 @@ Shard Ranges (3):
|
|||
container=container, swift_dir=self.testdir)
|
||||
exp_acct_msg = 'Account \t%s' % account
|
||||
exp_cont_msg = 'Container\t%s' % container
|
||||
exp_obj_msg = 'Object \tNone'
|
||||
self.assertIn(exp_acct_msg, out.getvalue())
|
||||
self.assertIn(exp_cont_msg, out.getvalue())
|
||||
self.assertIn(exp_obj_msg, out.getvalue())
|
||||
self.assertNotIn('Object', out.getvalue())
|
||||
|
||||
def test_print_item_locations_account_container_object_ring(self):
|
||||
out = StringIO()
|
||||
|
@ -691,59 +679,59 @@ Shard Ranges (3):
|
|||
def test_parse_get_node_args(self):
|
||||
# Capture error messages
|
||||
# (without any parameters)
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = ''
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
# a
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'a'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
# a c
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'a c'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
# a c o
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'a c o'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
|
||||
# a/c
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'a/c'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
# a/c/o
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'a/c/o'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
|
||||
# account container junk/test.ring.gz
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'account container junk/test.ring.gz'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
|
||||
# account container object junk/test.ring.gz
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'account container object junk/test.ring.gz'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Need to specify policy_name or <ring.gz>',
|
||||
parse_get_node_args, options, args.split())
|
||||
|
||||
# object.ring.gz(without any arguments i.e. a c o)
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'object.ring.gz'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Ring file does not exist',
|
||||
|
@ -751,55 +739,55 @@ Shard Ranges (3):
|
|||
|
||||
# Valid policy
|
||||
# -P zero
|
||||
options = Namespace(policy_name='zero', partition=None)
|
||||
options = Namespace(policy_name='zero', partition=None, quoted=None)
|
||||
args = ''
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'No target specified',
|
||||
parse_get_node_args, options, args.split())
|
||||
# -P one a/c/o
|
||||
options = Namespace(policy_name='one', partition=None)
|
||||
options = Namespace(policy_name='one', partition=None, quoted=None)
|
||||
args = 'a/c/o'
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# -P one account container photos/cat.jpg
|
||||
options = Namespace(policy_name='one', partition=None)
|
||||
options = Namespace(policy_name='one', partition=None, quoted=None)
|
||||
args = 'account container photos/cat.jpg'
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
|
||||
# -P one account/container/photos/cat.jpg
|
||||
options = Namespace(policy_name='one', partition=None)
|
||||
options = Namespace(policy_name='one', partition=None, quoted=None)
|
||||
args = 'account/container/photos/cat.jpg'
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
|
||||
# -P one account/container/junk/test.ring.gz(object endswith 'ring.gz')
|
||||
options = Namespace(policy_name='one', partition=None)
|
||||
options = Namespace(policy_name='one', partition=None, quoted=None)
|
||||
args = 'account/container/junk/test.ring.gz'
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
|
||||
# -P two a c o hooya
|
||||
options = Namespace(policy_name='two', partition=None)
|
||||
options = Namespace(policy_name='two', partition=None, quoted=None)
|
||||
args = 'a c o hooya'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Invalid arguments',
|
||||
parse_get_node_args, options, args.split())
|
||||
# -P zero -p 1
|
||||
options = Namespace(policy_name='zero', partition='1')
|
||||
options = Namespace(policy_name='zero', partition='1', quoted=None)
|
||||
args = ''
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertFalse(args)
|
||||
# -P one -p 1 a/c/o
|
||||
options = Namespace(policy_name='one', partition='1')
|
||||
options = Namespace(policy_name='one', partition='1', quoted=None)
|
||||
args = 'a/c/o'
|
||||
ring_path, args = parse_get_node_args(options, args.split())
|
||||
self.assertIsNone(ring_path)
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# -P two -p 1 a c o hooya
|
||||
options = Namespace(policy_name='two', partition='1')
|
||||
options = Namespace(policy_name='two', partition='1', quoted=None)
|
||||
args = 'a c o hooya'
|
||||
self.assertRaisesMessage(InfoSystemExit,
|
||||
'Invalid arguments',
|
||||
|
@ -853,7 +841,7 @@ Shard Ranges (3):
|
|||
|
||||
# Mock tests
|
||||
# /etc/swift/object.ring.gz(without any arguments i.e. a c o)
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = '/etc/swift/object.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -863,7 +851,7 @@ Shard Ranges (3):
|
|||
parse_get_node_args, options, args.split())
|
||||
# Similar ring_path and arguments
|
||||
# /etc/swift/object.ring.gz /etc/swift/object.ring.gz
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = '/etc/swift/object.ring.gz /etc/swift/object.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -871,7 +859,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, '/etc/swift/object.ring.gz')
|
||||
self.assertEqual(args, ['etc', 'swift', 'object.ring.gz'])
|
||||
# /etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = '/etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -880,7 +868,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(args, ['a', 'c', 'etc/swift/object.ring.gz'])
|
||||
# Invalid path as mentioned in BUG#1539275
|
||||
# /etc/swift/object.tar.gz account container object
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = '/etc/swift/object.tar.gz account container object'
|
||||
self.assertRaisesMessage(
|
||||
InfoSystemExit,
|
||||
|
@ -888,7 +876,7 @@ Shard Ranges (3):
|
|||
parse_get_node_args, options, args.split())
|
||||
|
||||
# object.ring.gz a/
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a/'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -896,7 +884,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a'])
|
||||
# object.ring.gz a/c
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a/c'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -904,7 +892,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c'])
|
||||
# object.ring.gz a/c/o
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a/c/o'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -912,7 +900,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# object.ring.gz a/c/o/junk/test.ring.gz
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a/c/o/junk/test.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -920,7 +908,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o/junk/test.ring.gz'])
|
||||
# object.ring.gz a
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -928,7 +916,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a'])
|
||||
# object.ring.gz a c
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a c'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -936,7 +924,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c'])
|
||||
# object.ring.gz a c o
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a c o'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -944,7 +932,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# object.ring.gz a c o blah blah
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a c o blah blah'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -953,7 +941,7 @@ Shard Ranges (3):
|
|||
'Invalid arguments',
|
||||
parse_get_node_args, options, args.split())
|
||||
# object.ring.gz a/c/o/blah/blah
|
||||
options = Namespace(policy_name=None)
|
||||
options = Namespace(policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a/c/o/blah/blah'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -962,7 +950,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(args, ['a', 'c', 'o/blah/blah'])
|
||||
|
||||
# object.ring.gz -p 1
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -970,7 +958,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertFalse(args)
|
||||
# object.ring.gz -p 1 a c o
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz a c o'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -978,7 +966,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# object.ring.gz -p 1 a c o forth_arg
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz a c o forth_arg'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -987,7 +975,7 @@ Shard Ranges (3):
|
|||
'Invalid arguments',
|
||||
parse_get_node_args, options, args.split())
|
||||
# object.ring.gz -p 1 a/c/o
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz a/c/o'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -995,7 +983,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# object.ring.gz -p 1 a/c/junk/test.ring.gz
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz a/c/junk/test.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1003,7 +991,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'junk/test.ring.gz'])
|
||||
# object.ring.gz -p 1 a/c/photos/cat.jpg
|
||||
options = Namespace(policy_name=None, partition='1')
|
||||
options = Namespace(policy_name=None, partition='1', quoted=None)
|
||||
args = 'object.ring.gz a/c/photos/cat.jpg'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1012,7 +1000,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(args, ['a', 'c', 'photos/cat.jpg'])
|
||||
|
||||
# --all object.ring.gz a
|
||||
options = Namespace(all=True, policy_name=None)
|
||||
options = Namespace(all=True, policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1020,7 +1008,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a'])
|
||||
# --all object.ring.gz a c
|
||||
options = Namespace(all=True, policy_name=None)
|
||||
options = Namespace(all=True, policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a c'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1028,7 +1016,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c'])
|
||||
# --all object.ring.gz a c o
|
||||
options = Namespace(all=True, policy_name=None)
|
||||
options = Namespace(all=True, policy_name=None, quoted=None)
|
||||
args = 'object.ring.gz a c o'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1036,7 +1024,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['a', 'c', 'o'])
|
||||
# object.ring.gz account container photos/cat.jpg
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'object.ring.gz account container photos/cat.jpg'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1044,7 +1032,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
|
||||
# object.ring.gz /account/container/photos/cat.jpg
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'object.ring.gz account/container/photos/cat.jpg'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1053,7 +1041,7 @@ Shard Ranges (3):
|
|||
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
|
||||
# Object name ends with 'ring.gz'
|
||||
# object.ring.gz /account/container/junk/test.ring.gz
|
||||
options = Namespace(policy_name=None, partition=None)
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = 'object.ring.gz account/container/junk/test.ring.gz'
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
|
@ -1061,6 +1049,32 @@ Shard Ranges (3):
|
|||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
|
||||
|
||||
# Object name has special characters
|
||||
# object.ring.gz /account/container/obj\nwith%0anewline
|
||||
options = Namespace(policy_name=None, partition=None, quoted=None)
|
||||
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
ring_path, args = parse_get_node_args(options, args)
|
||||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
|
||||
|
||||
options = Namespace(policy_name=None, partition=None, quoted=True)
|
||||
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
ring_path, args = parse_get_node_args(options, args)
|
||||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['account', 'container', 'obj\nwith\nnewline'])
|
||||
|
||||
options = Namespace(policy_name=None, partition=None, quoted=False)
|
||||
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
|
||||
with mock.patch('swift.cli.info.os.path.exists') as exists:
|
||||
exists.return_value = True
|
||||
ring_path, args = parse_get_node_args(options, args)
|
||||
self.assertEqual(ring_path, 'object.ring.gz')
|
||||
self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
|
||||
|
||||
|
||||
class TestPrintObj(TestCliInfoBase):
|
||||
|
||||
|
|
|
@ -189,6 +189,7 @@ class TestManageShardRanges(unittest.TestCase):
|
|||
' "meta_timestamp": "%s",' % now.internal,
|
||||
' "name": "a/c",',
|
||||
' "object_count": 0,',
|
||||
' "reported": 0,',
|
||||
' "state": "sharding",',
|
||||
' "state_timestamp": "%s",' % now.internal,
|
||||
' "timestamp": "%s",' % now.internal,
|
||||
|
@ -230,6 +231,7 @@ class TestManageShardRanges(unittest.TestCase):
|
|||
' "meta_timestamp": "%s",' % now.internal,
|
||||
' "name": "a/c",',
|
||||
' "object_count": 0,',
|
||||
' "reported": 0,',
|
||||
' "state": "sharding",',
|
||||
' "state_timestamp": "%s",' % now.internal,
|
||||
' "timestamp": "%s",' % now.internal,
|
||||
|
|
|
@ -12,6 +12,9 @@
|
|||
# limitations under the License.
|
||||
|
||||
import binascii
|
||||
import errno
|
||||
import fcntl
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import struct
|
||||
|
@ -30,6 +33,9 @@ from test.unit import FakeLogger, skip_if_no_xattrs, DEFAULT_TEST_EC_TYPE, \
|
|||
patch_policies
|
||||
|
||||
|
||||
PART_POWER = 8
|
||||
|
||||
|
||||
class TestRelinker(unittest.TestCase):
|
||||
def setUp(self):
|
||||
skip_if_no_xattrs()
|
||||
|
@ -40,7 +46,7 @@ class TestRelinker(unittest.TestCase):
|
|||
os.mkdir(self.testdir)
|
||||
os.mkdir(self.devices)
|
||||
|
||||
self.rb = ring.RingBuilder(8, 6.0, 1)
|
||||
self.rb = ring.RingBuilder(PART_POWER, 6.0, 1)
|
||||
|
||||
for i in range(6):
|
||||
ip = "127.0.0.%s" % i
|
||||
|
@ -55,10 +61,10 @@ class TestRelinker(unittest.TestCase):
|
|||
os.mkdir(self.objects)
|
||||
self._hash = utils.hash_path('a/c/o')
|
||||
digest = binascii.unhexlify(self._hash)
|
||||
part = struct.unpack_from('>I', digest)[0] >> 24
|
||||
self.part = struct.unpack_from('>I', digest)[0] >> 24
|
||||
self.next_part = struct.unpack_from('>I', digest)[0] >> 23
|
||||
self.objdir = os.path.join(
|
||||
self.objects, str(part), self._hash[-3:], self._hash)
|
||||
self.objects, str(self.part), self._hash[-3:], self._hash)
|
||||
os.makedirs(self.objdir)
|
||||
self.object_fname = "1278553064.00000.data"
|
||||
self.objname = os.path.join(self.objdir, self.object_fname)
|
||||
|
@ -97,6 +103,27 @@ class TestRelinker(unittest.TestCase):
|
|||
stat_new = os.stat(self.expected_file)
|
||||
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
|
||||
|
||||
def test_relink_device_filter(self):
|
||||
self.rb.prepare_increase_partition_power()
|
||||
self._save_ring()
|
||||
relinker.relink(self.testdir, self.devices, True,
|
||||
device=self.existing_device)
|
||||
|
||||
self.assertTrue(os.path.isdir(self.expected_dir))
|
||||
self.assertTrue(os.path.isfile(self.expected_file))
|
||||
|
||||
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
|
||||
stat_new = os.stat(self.expected_file)
|
||||
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
|
||||
|
||||
def test_relink_device_filter_invalid(self):
|
||||
self.rb.prepare_increase_partition_power()
|
||||
self._save_ring()
|
||||
relinker.relink(self.testdir, self.devices, True, device='none')
|
||||
|
||||
self.assertFalse(os.path.isdir(self.expected_dir))
|
||||
self.assertFalse(os.path.isfile(self.expected_file))
|
||||
|
||||
def _common_test_cleanup(self, relink=True):
|
||||
# Create a ring that has prev_part_power set
|
||||
self.rb.prepare_increase_partition_power()
|
||||
|
@ -121,6 +148,187 @@ class TestRelinker(unittest.TestCase):
|
|||
self.assertFalse(os.path.isfile(
|
||||
os.path.join(self.objdir, self.object_fname)))
|
||||
|
||||
def test_cleanup_device_filter(self):
|
||||
self._common_test_cleanup()
|
||||
self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
|
||||
device=self.existing_device))
|
||||
|
||||
# Old objectname should be removed, new should still exist
|
||||
self.assertTrue(os.path.isdir(self.expected_dir))
|
||||
self.assertTrue(os.path.isfile(self.expected_file))
|
||||
self.assertFalse(os.path.isfile(
|
||||
os.path.join(self.objdir, self.object_fname)))
|
||||
|
||||
def test_cleanup_device_filter_invalid(self):
|
||||
self._common_test_cleanup()
|
||||
self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
|
||||
device='none'))
|
||||
|
||||
# Old objectname should still exist, new should still exist
|
||||
self.assertTrue(os.path.isdir(self.expected_dir))
|
||||
self.assertTrue(os.path.isfile(self.expected_file))
|
||||
self.assertTrue(os.path.isfile(
|
||||
os.path.join(self.objdir, self.object_fname)))
|
||||
|
||||
def test_relink_cleanup(self):
|
||||
state_file = os.path.join(self.devices, self.existing_device,
|
||||
'relink.objects.json')
|
||||
|
||||
self.rb.prepare_increase_partition_power()
|
||||
self._save_ring()
|
||||
relinker.relink(self.testdir, self.devices, True)
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f), {str(self.part): [True, False]})
|
||||
|
||||
self.rb.increase_partition_power()
|
||||
self.rb._ring = None # Force builder to reload ring
|
||||
self._save_ring()
|
||||
relinker.cleanup(self.testdir, self.devices, True)
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f),
|
||||
{str(self.part): [True, True],
|
||||
str(self.next_part): [True, True]})
|
||||
|
||||
def test_devices_filter_filtering(self):
|
||||
# With no filtering, returns all devices
|
||||
devices = relinker.devices_filter(None, "", [self.existing_device])
|
||||
self.assertEqual(set([self.existing_device]), devices)
|
||||
|
||||
# With a matching filter, returns what is matching
|
||||
devices = relinker.devices_filter(self.existing_device, "",
|
||||
[self.existing_device, 'sda2'])
|
||||
self.assertEqual(set([self.existing_device]), devices)
|
||||
|
||||
# With a non matching filter, returns nothing
|
||||
devices = relinker.devices_filter('none', "", [self.existing_device])
|
||||
self.assertEqual(set(), devices)
|
||||
|
||||
def test_hook_pre_post_device_locking(self):
|
||||
locks = [None]
|
||||
device_path = os.path.join(self.devices, self.existing_device)
|
||||
datadir = 'object'
|
||||
lock_file = os.path.join(device_path, '.relink.%s.lock' % datadir)
|
||||
|
||||
# The first run gets the lock
|
||||
relinker.hook_pre_device(locks, {}, datadir, device_path)
|
||||
self.assertNotEqual([None], locks)
|
||||
|
||||
# A following run would block
|
||||
with self.assertRaises(IOError) as raised:
|
||||
with open(lock_file, 'a') as f:
|
||||
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
self.assertEqual(errno.EAGAIN, raised.exception.errno)
|
||||
|
||||
# Another must not get the lock, so it must return an empty list
|
||||
relinker.hook_post_device(locks, "")
|
||||
self.assertEqual([None], locks)
|
||||
|
||||
with open(lock_file, 'a') as f:
|
||||
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def test_state_file(self):
|
||||
device_path = os.path.join(self.devices, self.existing_device)
|
||||
datadir = 'objects'
|
||||
datadir_path = os.path.join(device_path, datadir)
|
||||
state_file = os.path.join(device_path, 'relink.%s.json' % datadir)
|
||||
|
||||
def call_partition_filter(step, parts):
|
||||
# Partition 312 will be ignored because it must have been created
|
||||
# by the relinker
|
||||
return relinker.partitions_filter(states, step,
|
||||
PART_POWER, PART_POWER + 1,
|
||||
datadir_path, parts)
|
||||
|
||||
# Start relinking
|
||||
states = {}
|
||||
|
||||
# Load the states: As it starts, it must be empty
|
||||
locks = [None]
|
||||
relinker.hook_pre_device(locks, states, datadir, device_path)
|
||||
self.assertEqual({}, states)
|
||||
os.close(locks[0]) # Release the lock
|
||||
|
||||
# Partition 312 is ignored because it must have been created with the
|
||||
# next_part_power, so it does not need to be relinked
|
||||
# 96 and 227 are reverse ordered
|
||||
# auditor_status_ALL.json is ignored because it's not a partition
|
||||
self.assertEqual(['227', '96'],
|
||||
call_partition_filter(relinker.STEP_RELINK,
|
||||
['96', '227', '312',
|
||||
'auditor_status.json']))
|
||||
self.assertEqual(states, {'96': [False, False], '227': [False, False]})
|
||||
|
||||
# Ack partition 96
|
||||
relinker.hook_post_partition(states, relinker.STEP_RELINK,
|
||||
os.path.join(datadir_path, '96'))
|
||||
self.assertEqual(states, {'96': [True, False], '227': [False, False]})
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f), {'96': [True, False],
|
||||
'227': [False, False]})
|
||||
|
||||
# Restart relinking after only part 96 was done
|
||||
self.assertEqual(['227'],
|
||||
call_partition_filter(relinker.STEP_RELINK,
|
||||
['96', '227', '312']))
|
||||
self.assertEqual(states, {'96': [True, False], '227': [False, False]})
|
||||
|
||||
# Ack partition 227
|
||||
relinker.hook_post_partition(states, relinker.STEP_RELINK,
|
||||
os.path.join(datadir_path, '227'))
|
||||
self.assertEqual(states, {'96': [True, False], '227': [True, False]})
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f), {'96': [True, False],
|
||||
'227': [True, False]})
|
||||
|
||||
# If the process restarts, it reload the state
|
||||
locks = [None]
|
||||
states = {}
|
||||
relinker.hook_pre_device(locks, states, datadir, device_path)
|
||||
self.assertEqual(states, {'96': [True, False], '227': [True, False]})
|
||||
os.close(locks[0]) # Release the lock
|
||||
|
||||
# Start cleanup
|
||||
self.assertEqual(['227', '96'],
|
||||
call_partition_filter(relinker.STEP_CLEANUP,
|
||||
['96', '227', '312']))
|
||||
# Ack partition 227
|
||||
relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
|
||||
os.path.join(datadir_path, '227'))
|
||||
self.assertEqual(states, {'96': [True, False], '227': [True, True]})
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f), {'96': [True, False],
|
||||
'227': [True, True]})
|
||||
|
||||
# Restart cleanup after only part 227 was done
|
||||
self.assertEqual(['96'],
|
||||
call_partition_filter(relinker.STEP_CLEANUP,
|
||||
['96', '227', '312']))
|
||||
self.assertEqual(states, {'96': [True, False], '227': [True, True]})
|
||||
|
||||
# Ack partition 96
|
||||
relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
|
||||
os.path.join(datadir_path, '96'))
|
||||
self.assertEqual(states, {'96': [True, True], '227': [True, True]})
|
||||
with open(state_file, 'rt') as f:
|
||||
self.assertEqual(json.load(f), {'96': [True, True],
|
||||
'227': [True, True]})
|
||||
|
||||
# At the end, the state is still accurate
|
||||
locks = [None]
|
||||
states = {}
|
||||
relinker.hook_pre_device(locks, states, datadir, device_path)
|
||||
self.assertEqual(states, {'96': [True, True], '227': [True, True]})
|
||||
os.close(locks[0]) # Release the lock
|
||||
|
||||
# If the file gets corrupted, restart from scratch
|
||||
with open(state_file, 'wt') as f:
|
||||
f.write('NOT JSON')
|
||||
locks = [None]
|
||||
states = {}
|
||||
relinker.hook_pre_device(locks, states, datadir, device_path)
|
||||
self.assertEqual(states, {})
|
||||
os.close(locks[0]) # Release the lock
|
||||
|
||||
def test_cleanup_not_yet_relinked(self):
|
||||
self._common_test_cleanup(relink=False)
|
||||
self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True))
|
||||
|
@ -176,3 +384,7 @@ class TestRelinker(unittest.TestCase):
|
|||
|
||||
self.assertIn('failed audit and was quarantined',
|
||||
self.logger.get_lines_for_level('warning')[0])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -34,7 +34,7 @@ from swift.common.middleware.s3api.subresource import ACL, User, encode_acl, \
|
|||
from swift.common.middleware.s3api.etree import fromstring
|
||||
from swift.common.middleware.s3api.utils import mktime, S3Timestamp
|
||||
from swift.common.middleware.versioned_writes.object_versioning import \
|
||||
DELETE_MARKER_CONTENT_TYPE
|
||||
DELETE_MARKER_CONTENT_TYPE, SYSMETA_VERSIONS_CONT, SYSMETA_VERSIONS_ENABLED
|
||||
|
||||
|
||||
class TestS3ApiObj(S3ApiTestCase):
|
||||
|
@ -402,6 +402,10 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
|
||||
@s3acl
|
||||
def test_object_GET_version_id(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
|
||||
{SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket'}, None)
|
||||
|
||||
# GET current version
|
||||
req = Request.blank('/bucket/object?versionId=null',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
|
@ -452,6 +456,28 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '404')
|
||||
|
||||
@s3acl(versioning_enabled=False)
|
||||
def test_object_GET_with_version_id_but_not_enabled(self):
|
||||
# Version not found
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
req = Request.blank('/bucket/object?versionId=A',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '404')
|
||||
elem = fromstring(body, 'Error')
|
||||
self.assertEqual(elem.find('Code').text, 'NoSuchVersion')
|
||||
self.assertEqual(elem.find('Key').text, 'object')
|
||||
self.assertEqual(elem.find('VersionId').text, 'A')
|
||||
expected_calls = []
|
||||
if not self.swift.s3_acl:
|
||||
expected_calls.append(('HEAD', '/v1/AUTH_test/bucket'))
|
||||
# NB: No actual backend GET!
|
||||
self.assertEqual(expected_calls, self.swift.calls)
|
||||
|
||||
@s3acl
|
||||
def test_object_PUT_error(self):
|
||||
code = self._test_method_error('PUT', '/bucket/object',
|
||||
|
@ -1100,6 +1126,9 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
def test_object_DELETE_old_version_id(self):
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
|
||||
{SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket'}, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': '1574360804.34906'}
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293',
|
||||
|
@ -1111,6 +1140,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1118,6 +1148,11 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
], self.swift.calls)
|
||||
|
||||
def test_object_DELETE_current_version_id(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': 'null'}
|
||||
|
@ -1142,6 +1177,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1152,6 +1188,22 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
'?version-id=1574341899.21751'),
|
||||
], self.swift.calls)
|
||||
|
||||
@s3acl(versioning_enabled=False)
|
||||
def test_object_DELETE_with_version_id_but_not_enabled(self):
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
req = Request.blank('/bucket/object?versionId=1574358170.12293',
|
||||
method='DELETE', headers={
|
||||
'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
expected_calls = []
|
||||
if not self.swift.s3_acl:
|
||||
expected_calls.append(('HEAD', '/v1/AUTH_test/bucket'))
|
||||
# NB: No actual backend DELETE!
|
||||
self.assertEqual(expected_calls, self.swift.calls)
|
||||
|
||||
def test_object_DELETE_version_id_not_implemented(self):
|
||||
req = Request.blank('/bucket/object?versionId=1574358170.12293',
|
||||
method='DELETE', headers={
|
||||
|
@ -1164,6 +1216,11 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
self.assertEqual(status.split()[0], '501', body)
|
||||
|
||||
def test_object_DELETE_current_version_id_is_delete_marker(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': 'null'}
|
||||
|
@ -1184,6 +1241,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1193,6 +1251,11 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
], self.swift.calls)
|
||||
|
||||
def test_object_DELETE_current_version_id_is_missing(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': 'null'}
|
||||
|
@ -1223,6 +1286,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1236,6 +1300,11 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
], self.swift.calls)
|
||||
|
||||
def test_object_DELETE_current_version_id_GET_error(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': 'null'}
|
||||
|
@ -1251,6 +1320,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '500')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1260,6 +1330,11 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
], self.swift.calls)
|
||||
|
||||
def test_object_DELETE_current_version_id_PUT_error(self):
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, self.response_headers, None)
|
||||
resp_headers = {'X-Object-Current-Version-Id': 'null'}
|
||||
|
@ -1283,6 +1358,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '500')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574358170.12293'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
|
@ -1325,10 +1401,13 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
'X-Object-Version-Id': '1574701081.61553'}
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPNoContent, resp_headers, None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {
|
||||
'X-Container-Sysmeta-Versions-Enabled': True},
|
||||
None)
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
|
||||
SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
|
||||
SYSMETA_VERSIONS_ENABLED: True},
|
||||
None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/\x00versions\x00bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPNotFound, self.response_headers, None)
|
||||
req = Request.blank('/bucket/object?versionId=1574701081.61553',
|
||||
|
@ -1338,10 +1417,12 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
self.assertEqual([
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574701081.61553'),
|
||||
('HEAD', '/v1/AUTH_test'),
|
||||
('HEAD', '/v1/AUTH_test/bucket'),
|
||||
('HEAD', '/v1/AUTH_test/\x00versions\x00bucket'),
|
||||
('DELETE', '/v1/AUTH_test/bucket/object'
|
||||
'?symlink=get&version-id=1574701081.61553'),
|
||||
], self.swift.calls)
|
||||
|
|
|
@ -34,13 +34,16 @@ from test.unit.common.middleware.s3api import FakeSwift
|
|||
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
|
||||
|
||||
|
||||
def s3acl(func=None, s3acl_only=False):
|
||||
def s3acl(func=None, s3acl_only=False, versioning_enabled=True):
|
||||
"""
|
||||
NOTE: s3acl decorator needs an instance of s3api testing framework.
|
||||
(i.e. An instance for first argument is necessary)
|
||||
"""
|
||||
if func is None:
|
||||
return functools.partial(s3acl, s3acl_only=s3acl_only)
|
||||
return functools.partial(
|
||||
s3acl,
|
||||
s3acl_only=s3acl_only,
|
||||
versioning_enabled=versioning_enabled)
|
||||
|
||||
@functools.wraps(func)
|
||||
def s3acl_decorator(*args, **kwargs):
|
||||
|
@ -57,9 +60,14 @@ def s3acl(func=None, s3acl_only=False):
|
|||
# @patch(xxx)
|
||||
# def test_xxxx(self)
|
||||
|
||||
fake_info = {'status': 204}
|
||||
if versioning_enabled:
|
||||
fake_info['sysmeta'] = {
|
||||
'versions-container': '\x00versions\x00bucket',
|
||||
}
|
||||
|
||||
with patch('swift.common.middleware.s3api.s3request.'
|
||||
'get_container_info',
|
||||
return_value={'status': 204}):
|
||||
'get_container_info', return_value=fake_info):
|
||||
func(*args, **kwargs)
|
||||
except AssertionError:
|
||||
# Make traceback message to clarify the assertion
|
||||
|
|
|
@ -72,12 +72,20 @@ class FakeMemcache(object):
|
|||
|
||||
|
||||
class FakeApp(object):
|
||||
skip_handled_check = False
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
assert self.skip_handled_check or env.get('swift.ratelimit.handled')
|
||||
start_response('200 OK', [])
|
||||
return [b'Some Content']
|
||||
|
||||
|
||||
class FakeReq(object):
|
||||
def __init__(self, method, env=None):
|
||||
self.method = method
|
||||
self.environ = env or {}
|
||||
|
||||
|
||||
def start_response(*args):
|
||||
pass
|
||||
|
||||
|
@ -160,36 +168,29 @@ class TestRateLimit(unittest.TestCase):
|
|||
{'object_count': '5'}
|
||||
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
the_app.memcache_client = fake_memcache
|
||||
req = lambda: None
|
||||
req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
|
||||
environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
lambda *args, **kwargs: {}):
|
||||
req.method = 'DELETE'
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', None, None)), 0)
|
||||
req.method = 'PUT'
|
||||
FakeReq('DELETE', environ), 'a', None, None)), 0)
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', None)), 1)
|
||||
req.method = 'DELETE'
|
||||
FakeReq('PUT', environ), 'a', 'c', None)), 1)
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', None)), 1)
|
||||
req.method = 'GET'
|
||||
FakeReq('DELETE', environ), 'a', 'c', None)), 1)
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', 'o')), 0)
|
||||
req.method = 'PUT'
|
||||
FakeReq('GET', environ), 'a', 'c', 'o')), 0)
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', 'o')), 1)
|
||||
FakeReq('PUT', environ), 'a', 'c', 'o')), 1)
|
||||
|
||||
req.method = 'PUT'
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', None, global_ratelimit=10)), 2)
|
||||
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2)
|
||||
self.assertEqual(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', None, global_ratelimit=10)[1],
|
||||
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1],
|
||||
('ratelimit/global-write/a', 10))
|
||||
|
||||
req.method = 'PUT'
|
||||
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
|
||||
req, 'a', 'c', None, global_ratelimit='notafloat')), 1)
|
||||
FakeReq('PUT', environ), 'a', 'c', None,
|
||||
global_ratelimit='notafloat')), 1)
|
||||
|
||||
def test_memcached_container_info_dict(self):
|
||||
mdict = headers_to_container_info({'x-container-object-count': '45'})
|
||||
|
@ -204,9 +205,8 @@ class TestRateLimit(unittest.TestCase):
|
|||
{'container_size': 5}
|
||||
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
the_app.memcache_client = fake_memcache
|
||||
req = lambda: None
|
||||
req.method = 'PUT'
|
||||
req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}
|
||||
req = FakeReq('PUT', {
|
||||
'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache})
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
lambda *args, **kwargs: {}):
|
||||
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
|
||||
|
@ -227,8 +227,8 @@ class TestRateLimit(unittest.TestCase):
|
|||
req = Request.blank('/v1/a%s/c' % meth)
|
||||
req.method = meth
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
make_app_call = lambda: self.test_ratelimit(
|
||||
req.environ.copy(), start_response)
|
||||
begin = time.time()
|
||||
self._run(make_app_call, num_calls, current_rate,
|
||||
check_time=bool(exp_time))
|
||||
|
@ -244,7 +244,7 @@ class TestRateLimit(unittest.TestCase):
|
|||
req.method = 'PUT'
|
||||
req.environ['swift.cache'] = FakeMemcache()
|
||||
req.environ['swift.cache'].init_incr_return_neg = True
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ,
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ.copy(),
|
||||
start_response)
|
||||
begin = time.time()
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
|
@ -260,15 +260,15 @@ class TestRateLimit(unittest.TestCase):
|
|||
'account_whitelist': 'a',
|
||||
'account_blacklist': 'b'}
|
||||
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
req = Request.blank('/')
|
||||
with mock.patch.object(self.test_ratelimit,
|
||||
'memcache_client', FakeMemcache()):
|
||||
self.assertEqual(
|
||||
self.test_ratelimit.handle_ratelimit(req, 'a', 'c', 'o'),
|
||||
self.test_ratelimit.handle_ratelimit(
|
||||
Request.blank('/'), 'a', 'c', 'o'),
|
||||
None)
|
||||
self.assertEqual(
|
||||
self.test_ratelimit.handle_ratelimit(
|
||||
req, 'b', 'c', 'o').status_int,
|
||||
Request.blank('/'), 'b', 'c', 'o').status_int,
|
||||
497)
|
||||
|
||||
def test_ratelimit_whitelist_sysmeta(self):
|
||||
|
@ -331,7 +331,7 @@ class TestRateLimit(unittest.TestCase):
|
|||
self.parent = parent
|
||||
|
||||
def run(self):
|
||||
self.result = self.parent.test_ratelimit(req.environ,
|
||||
self.result = self.parent.test_ratelimit(req.environ.copy(),
|
||||
start_response)
|
||||
|
||||
def get_fake_ratelimit(*args, **kwargs):
|
||||
|
@ -370,18 +370,17 @@ class TestRateLimit(unittest.TestCase):
|
|||
# simulates 4 requests coming in at same time, then sleeping
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
lambda *args, **kwargs: {}):
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
print(repr(r))
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Some Content')
|
||||
|
||||
def test_ratelimit_max_rate_double_container(self):
|
||||
|
@ -404,17 +403,17 @@ class TestRateLimit(unittest.TestCase):
|
|||
# simulates 4 requests coming in at same time, then sleeping
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
lambda *args, **kwargs: {}):
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Some Content')
|
||||
|
||||
def test_ratelimit_max_rate_double_container_listing(self):
|
||||
|
@ -437,17 +436,17 @@ class TestRateLimit(unittest.TestCase):
|
|||
lambda *args, **kwargs: {}):
|
||||
time_override = [0, 0, 0, 0, None]
|
||||
# simulates 4 requests coming in at same time, then sleeping
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Slow down')
|
||||
mock_sleep(.1)
|
||||
r = self.test_ratelimit(req.environ, start_response)
|
||||
r = self.test_ratelimit(req.environ.copy(), start_response)
|
||||
self.assertEqual(r[0], b'Some Content')
|
||||
mc = self.test_ratelimit.memcache_client
|
||||
try:
|
||||
|
@ -466,9 +465,6 @@ class TestRateLimit(unittest.TestCase):
|
|||
|
||||
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
the_app.memcache_client = fake_memcache
|
||||
req = lambda: None
|
||||
req.method = 'PUT'
|
||||
req.environ = {}
|
||||
|
||||
class rate_caller(threading.Thread):
|
||||
|
||||
|
@ -478,8 +474,8 @@ class TestRateLimit(unittest.TestCase):
|
|||
|
||||
def run(self):
|
||||
for j in range(num_calls):
|
||||
self.result = the_app.handle_ratelimit(req, self.myname,
|
||||
'c', None)
|
||||
self.result = the_app.handle_ratelimit(
|
||||
FakeReq('PUT'), self.myname, 'c', None)
|
||||
|
||||
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
|
||||
lambda *args, **kwargs: {}):
|
||||
|
@ -541,7 +537,9 @@ class TestRateLimit(unittest.TestCase):
|
|||
current_rate = 13
|
||||
num_calls = 5
|
||||
conf_dict = {'account_ratelimit': current_rate}
|
||||
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
fake_app = FakeApp()
|
||||
fake_app.skip_handled_check = True
|
||||
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(fake_app)
|
||||
req = Request.blank('/v1/a')
|
||||
req.environ['swift.cache'] = None
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ,
|
||||
|
@ -551,6 +549,24 @@ class TestRateLimit(unittest.TestCase):
|
|||
time_took = time.time() - begin
|
||||
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
|
||||
|
||||
def test_already_handled(self):
|
||||
current_rate = 13
|
||||
num_calls = 5
|
||||
conf_dict = {'container_listing_ratelimit_0': current_rate}
|
||||
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
|
||||
fake_cache = FakeMemcache()
|
||||
fake_cache.set(
|
||||
get_cache_key('a', 'c'),
|
||||
{'object_count': 1})
|
||||
req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache})
|
||||
req.environ['swift.ratelimit.handled'] = True
|
||||
make_app_call = lambda: self.test_ratelimit(req.environ,
|
||||
start_response)
|
||||
begin = time.time()
|
||||
self._run(make_app_call, num_calls, current_rate, check_time=False)
|
||||
time_took = time.time() - begin
|
||||
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
|
||||
|
||||
def test_restarting_memcache(self):
|
||||
current_rate = 2
|
||||
num_calls = 5
|
||||
|
|
|
@ -24,6 +24,7 @@ from swift.common import swob
|
|||
from swift.common.middleware import symlink, copy, versioned_writes, \
|
||||
listing_formats
|
||||
from swift.common.swob import Request
|
||||
from swift.common.request_helpers import get_reserved_name
|
||||
from swift.common.utils import MD5_OF_EMPTY_STRING, get_swift_info
|
||||
from test.unit.common.middleware.helpers import FakeSwift
|
||||
from test.unit.common.middleware.test_versioned_writes import FakeCache
|
||||
|
@ -618,6 +619,55 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
self.assertEqual(req_headers, calls[1].headers)
|
||||
self.assertFalse(calls[2:])
|
||||
|
||||
def test_get_symlink_to_reserved_object(self):
|
||||
cont = get_reserved_name('versioned')
|
||||
obj = get_reserved_name('symlink', '9999998765.99999')
|
||||
symlink_target = "%s/%s" % (cont, obj)
|
||||
version_path = '/v1/a/%s' % symlink_target
|
||||
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
|
||||
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
|
||||
symlink.ALLOW_RESERVED_NAMES: 'true',
|
||||
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
|
||||
'x-object-sysmeta-symlink-target-bytes': '0',
|
||||
})
|
||||
self.app.register('GET', version_path, swob.HTTPOk, {})
|
||||
req = Request.blank('/v1/a/versioned/symlink', headers={
|
||||
'Range': 'foo', 'If-Match': 'bar'})
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertIn(('Content-Location', version_path), headers)
|
||||
self.assertEqual(len(self.authorized), 1)
|
||||
self.assertNotIn('X-Backend-Allow-Reserved-Names',
|
||||
self.app.calls_with_headers[0])
|
||||
call_headers = self.app.calls_with_headers[1].headers
|
||||
self.assertEqual('true', call_headers[
|
||||
'X-Backend-Allow-Reserved-Names'])
|
||||
self.assertEqual('foo', call_headers['Range'])
|
||||
self.assertEqual('bar', call_headers['If-Match'])
|
||||
|
||||
def test_get_symlink_to_reserved_symlink(self):
|
||||
cont = get_reserved_name('versioned')
|
||||
obj = get_reserved_name('symlink', '9999998765.99999')
|
||||
symlink_target = "%s/%s" % (cont, obj)
|
||||
version_path = '/v1/a/%s' % symlink_target
|
||||
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
|
||||
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
|
||||
symlink.ALLOW_RESERVED_NAMES: 'true',
|
||||
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
|
||||
'x-object-sysmeta-symlink-target-bytes': '0',
|
||||
})
|
||||
self.app.register('GET', version_path, swob.HTTPOk, {
|
||||
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: 'unversioned/obj',
|
||||
'ETag': MD5_OF_EMPTY_STRING,
|
||||
})
|
||||
self.app.register('GET', '/v1/a/unversioned/obj', swob.HTTPOk, {
|
||||
})
|
||||
req = Request.blank('/v1/a/versioned/symlink')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertIn(('Content-Location', '/v1/a/unversioned/obj'), headers)
|
||||
self.assertEqual(len(self.authorized), 2)
|
||||
|
||||
def test_symlink_too_deep(self):
|
||||
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
|
||||
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
|
||||
|
|
|
@ -1154,7 +1154,7 @@ class TestDatabaseBroker(unittest.TestCase):
|
|||
return broker
|
||||
|
||||
# only testing _reclaim_metadata here
|
||||
@patch.object(DatabaseBroker, '_reclaim')
|
||||
@patch.object(DatabaseBroker, '_reclaim', return_value='')
|
||||
def test_metadata(self, mock_reclaim):
|
||||
# Initializes a good broker for us
|
||||
broker = self.get_replication_info_tester(metadata=True)
|
||||
|
|
|
@ -20,6 +20,7 @@ from collections import defaultdict
|
|||
import errno
|
||||
from hashlib import md5
|
||||
import io
|
||||
import logging
|
||||
import six
|
||||
import socket
|
||||
import time
|
||||
|
@ -184,9 +185,14 @@ class TestMemcached(unittest.TestCase):
|
|||
|
||||
def setUp(self):
|
||||
self.logger = debug_logger()
|
||||
patcher = mock.patch('swift.common.memcached.logging', self.logger)
|
||||
self.addCleanup(patcher.stop)
|
||||
patcher.start()
|
||||
|
||||
def test_logger_kwarg(self):
|
||||
server_socket = '%s:%s' % ('[::1]', 11211)
|
||||
client = memcached.MemcacheRing([server_socket])
|
||||
self.assertIs(client.logger, logging.getLogger())
|
||||
|
||||
client = memcached.MemcacheRing([server_socket], logger=self.logger)
|
||||
self.assertIs(client.logger, self.logger)
|
||||
|
||||
def test_get_conns(self):
|
||||
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
@ -202,7 +208,8 @@ class TestMemcached(unittest.TestCase):
|
|||
sock2ipport = '%s:%s' % (sock2ip, memcached.DEFAULT_MEMCACHED_PORT)
|
||||
# We're deliberately using sock2ip (no port) here to test that the
|
||||
# default port is used.
|
||||
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip])
|
||||
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip],
|
||||
logger=self.logger)
|
||||
one = two = True
|
||||
while one or two: # Run until we match hosts one and two
|
||||
key = uuid4().hex.encode('ascii')
|
||||
|
@ -230,7 +237,8 @@ class TestMemcached(unittest.TestCase):
|
|||
sock.listen(1)
|
||||
sock_addr = sock.getsockname()
|
||||
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
||||
memcache_client = memcached.MemcacheRing([server_socket])
|
||||
memcache_client = memcached.MemcacheRing([server_socket],
|
||||
logger=self.logger)
|
||||
key = uuid4().hex.encode('ascii')
|
||||
for conn in memcache_client._get_conns(key):
|
||||
peer_sockaddr = conn[2].getpeername()
|
||||
|
@ -251,7 +259,8 @@ class TestMemcached(unittest.TestCase):
|
|||
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
||||
server_host = '[%s]' % sock_addr[0]
|
||||
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
|
||||
memcache_client = memcached.MemcacheRing([server_host])
|
||||
memcache_client = memcached.MemcacheRing([server_host],
|
||||
logger=self.logger)
|
||||
key = uuid4().hex.encode('ascii')
|
||||
for conn in memcache_client._get_conns(key):
|
||||
peer_sockaddr = conn[2].getpeername()
|
||||
|
@ -265,7 +274,7 @@ class TestMemcached(unittest.TestCase):
|
|||
with self.assertRaises(ValueError):
|
||||
# IPv6 address with missing [] is invalid
|
||||
server_socket = '%s:%s' % ('::1', 11211)
|
||||
memcached.MemcacheRing([server_socket])
|
||||
memcached.MemcacheRing([server_socket], logger=self.logger)
|
||||
|
||||
def test_get_conns_hostname(self):
|
||||
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
|
||||
|
@ -279,7 +288,8 @@ class TestMemcached(unittest.TestCase):
|
|||
addrinfo.return_value = [(socket.AF_INET,
|
||||
socket.SOCK_STREAM, 0, '',
|
||||
('127.0.0.1', sock_addr[1]))]
|
||||
memcache_client = memcached.MemcacheRing([server_socket])
|
||||
memcache_client = memcached.MemcacheRing([server_socket],
|
||||
logger=self.logger)
|
||||
key = uuid4().hex.encode('ascii')
|
||||
for conn in memcache_client._get_conns(key):
|
||||
peer_sockaddr = conn[2].getpeername()
|
||||
|
@ -304,7 +314,8 @@ class TestMemcached(unittest.TestCase):
|
|||
addrinfo.return_value = [(socket.AF_INET6,
|
||||
socket.SOCK_STREAM, 0, '',
|
||||
('::1', sock_addr[1]))]
|
||||
memcache_client = memcached.MemcacheRing([server_socket])
|
||||
memcache_client = memcached.MemcacheRing([server_socket],
|
||||
logger=self.logger)
|
||||
key = uuid4().hex.encode('ascii')
|
||||
for conn in memcache_client._get_conns(key):
|
||||
peer_sockaddr = conn[2].getpeername()
|
||||
|
@ -317,7 +328,8 @@ class TestMemcached(unittest.TestCase):
|
|||
sock.close()
|
||||
|
||||
def test_set_get_json(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -350,7 +362,8 @@ class TestMemcached(unittest.TestCase):
|
|||
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
||||
|
||||
def test_get_failed_connection_mid_request(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -365,18 +378,17 @@ class TestMemcached(unittest.TestCase):
|
|||
# force the logging through the DebugLogger instead of the nose
|
||||
# handler. This will use stdout, so we can assert that no stack trace
|
||||
# is logged.
|
||||
logger = debug_logger()
|
||||
with patch("sys.stdout", fake_stdout),\
|
||||
patch('swift.common.memcached.logging', logger):
|
||||
with patch("sys.stdout", fake_stdout):
|
||||
mock.read_return_empty_str = True
|
||||
self.assertIsNone(memcache_client.get('some_key'))
|
||||
log_lines = logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
self.assertIn('Error talking to memcached', log_lines[0])
|
||||
self.assertFalse(log_lines[1:])
|
||||
self.assertNotIn("Traceback", fake_stdout.getvalue())
|
||||
|
||||
def test_incr(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -396,7 +408,8 @@ class TestMemcached(unittest.TestCase):
|
|||
self.assertTrue(mock.close_called)
|
||||
|
||||
def test_incr_failed_connection_mid_request(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -411,19 +424,18 @@ class TestMemcached(unittest.TestCase):
|
|||
# force the logging through the DebugLogger instead of the nose
|
||||
# handler. This will use stdout, so we can assert that no stack trace
|
||||
# is logged.
|
||||
logger = debug_logger()
|
||||
with patch("sys.stdout", fake_stdout), \
|
||||
patch('swift.common.memcached.logging', logger):
|
||||
with patch("sys.stdout", fake_stdout):
|
||||
mock.read_return_empty_str = True
|
||||
self.assertRaises(memcached.MemcacheConnectionError,
|
||||
memcache_client.incr, 'some_key', delta=1)
|
||||
log_lines = logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
self.assertIn('Error talking to memcached', log_lines[0])
|
||||
self.assertFalse(log_lines[1:])
|
||||
self.assertNotIn('Traceback', fake_stdout.getvalue())
|
||||
|
||||
def test_incr_w_timeout(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -455,7 +467,8 @@ class TestMemcached(unittest.TestCase):
|
|||
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'10')})
|
||||
|
||||
def test_decr(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -473,7 +486,7 @@ class TestMemcached(unittest.TestCase):
|
|||
|
||||
def test_retry(self):
|
||||
memcache_client = memcached.MemcacheRing(
|
||||
['1.2.3.4:11211', '1.2.3.5:11211'])
|
||||
['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
|
||||
mock1 = ExplodingMockMemcached()
|
||||
mock2 = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
|
@ -500,7 +513,8 @@ class TestMemcached(unittest.TestCase):
|
|||
[])
|
||||
|
||||
def test_delete(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -510,7 +524,8 @@ class TestMemcached(unittest.TestCase):
|
|||
self.assertIsNone(memcache_client.get('some_key'))
|
||||
|
||||
def test_multi(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -560,7 +575,8 @@ class TestMemcached(unittest.TestCase):
|
|||
|
||||
def test_multi_delete(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
|
||||
'1.2.3.5:11211'])
|
||||
'1.2.3.5:11211'],
|
||||
logger=self.logger)
|
||||
mock1 = MockMemcached()
|
||||
mock2 = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
|
@ -598,7 +614,8 @@ class TestMemcached(unittest.TestCase):
|
|||
|
||||
def test_serialization(self):
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
allow_pickle=True)
|
||||
allow_pickle=True,
|
||||
logger=self.logger)
|
||||
mock = MockMemcached()
|
||||
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
||||
[(mock, mock)] * 2)
|
||||
|
@ -643,7 +660,8 @@ class TestMemcached(unittest.TestCase):
|
|||
mock_sock.connect = wait_connect
|
||||
|
||||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
||||
connect_timeout=10)
|
||||
connect_timeout=10,
|
||||
logger=self.logger)
|
||||
# sanity
|
||||
self.assertEqual(1, len(memcache_client._client_cache))
|
||||
for server, pool in memcache_client._client_cache.items():
|
||||
|
@ -702,7 +720,8 @@ class TestMemcached(unittest.TestCase):
|
|||
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
|
||||
'1.2.3.5:11211'],
|
||||
io_timeout=0.5,
|
||||
pool_timeout=0.1)
|
||||
pool_timeout=0.1,
|
||||
logger=self.logger)
|
||||
|
||||
# Hand out a couple slow connections to 1.2.3.5, leaving 1.2.3.4
|
||||
# fast. All ten (10) clients should try to talk to .5 first, and
|
||||
|
|
|
@ -914,6 +914,22 @@ class TestRequest(unittest.TestCase):
|
|||
self.assertEqual(used_req[0].path, '/hi/there')
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
|
||||
def test_wsgify_method(self):
|
||||
class _wsgi_class(object):
|
||||
def __init__(self):
|
||||
self.used_req = []
|
||||
|
||||
@swob.wsgify
|
||||
def __call__(self, req):
|
||||
self.used_req.append(req)
|
||||
return swob.Response(b'200 OK')
|
||||
|
||||
req = swob.Request.blank('/hi/there')
|
||||
handler = _wsgi_class()
|
||||
resp = req.get_response(handler)
|
||||
self.assertIs(handler.used_req[0].environ, req.environ)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
|
||||
def test_wsgify_raise(self):
|
||||
used_req = []
|
||||
|
||||
|
|
|
@ -6080,6 +6080,136 @@ class TestAuditLocationGenerator(unittest.TestCase):
|
|||
self.assertEqual(list(locations),
|
||||
[(obj_path, "drive", "partition2")])
|
||||
|
||||
def test_hooks(self):
|
||||
with temptree([]) as tmpdir:
|
||||
logger = FakeLogger()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
partition = os.path.join(data, "partition1")
|
||||
os.makedirs(partition)
|
||||
suffix = os.path.join(partition, "suffix1")
|
||||
os.makedirs(suffix)
|
||||
hash_path = os.path.join(suffix, "hash1")
|
||||
os.makedirs(hash_path)
|
||||
obj_path = os.path.join(hash_path, "obj1.dat")
|
||||
with open(obj_path, "w"):
|
||||
pass
|
||||
meta_path = os.path.join(hash_path, "obj1.meta")
|
||||
with open(meta_path, "w"):
|
||||
pass
|
||||
hook_pre_device = MagicMock()
|
||||
hook_post_device = MagicMock()
|
||||
hook_pre_partition = MagicMock()
|
||||
hook_post_partition = MagicMock()
|
||||
hook_pre_suffix = MagicMock()
|
||||
hook_post_suffix = MagicMock()
|
||||
hook_pre_hash = MagicMock()
|
||||
hook_post_hash = MagicMock()
|
||||
locations = utils.audit_location_generator(
|
||||
tmpdir, "data", ".dat", mount_check=False, logger=logger,
|
||||
hook_pre_device=hook_pre_device,
|
||||
hook_post_device=hook_post_device,
|
||||
hook_pre_partition=hook_pre_partition,
|
||||
hook_post_partition=hook_post_partition,
|
||||
hook_pre_suffix=hook_pre_suffix,
|
||||
hook_post_suffix=hook_post_suffix,
|
||||
hook_pre_hash=hook_pre_hash,
|
||||
hook_post_hash=hook_post_hash
|
||||
)
|
||||
list(locations)
|
||||
hook_pre_device.assert_called_once_with(os.path.join(tmpdir,
|
||||
"drive"))
|
||||
hook_post_device.assert_called_once_with(os.path.join(tmpdir,
|
||||
"drive"))
|
||||
hook_pre_partition.assert_called_once_with(partition)
|
||||
hook_post_partition.assert_called_once_with(partition)
|
||||
hook_pre_suffix.assert_called_once_with(suffix)
|
||||
hook_post_suffix.assert_called_once_with(suffix)
|
||||
hook_pre_hash.assert_called_once_with(hash_path)
|
||||
hook_post_hash.assert_called_once_with(hash_path)
|
||||
|
||||
def test_filters(self):
|
||||
with temptree([]) as tmpdir:
|
||||
logger = FakeLogger()
|
||||
data = os.path.join(tmpdir, "drive", "data")
|
||||
os.makedirs(data)
|
||||
partition = os.path.join(data, "partition1")
|
||||
os.makedirs(partition)
|
||||
suffix = os.path.join(partition, "suffix1")
|
||||
os.makedirs(suffix)
|
||||
hash_path = os.path.join(suffix, "hash1")
|
||||
os.makedirs(hash_path)
|
||||
obj_path = os.path.join(hash_path, "obj1.dat")
|
||||
with open(obj_path, "w"):
|
||||
pass
|
||||
meta_path = os.path.join(hash_path, "obj1.meta")
|
||||
with open(meta_path, "w"):
|
||||
pass
|
||||
|
||||
def audit_location_generator(**kwargs):
|
||||
return utils.audit_location_generator(
|
||||
tmpdir, "data", ".dat", mount_check=False, logger=logger,
|
||||
**kwargs)
|
||||
|
||||
# Return the list of devices
|
||||
|
||||
with patch('os.listdir', side_effect=os.listdir) as m_listdir:
|
||||
# devices_filter
|
||||
m_listdir.reset_mock()
|
||||
devices_filter = MagicMock(return_value=["drive"])
|
||||
list(audit_location_generator(devices_filter=devices_filter))
|
||||
devices_filter.assert_called_once_with(tmpdir, ["drive"])
|
||||
self.assertIn(((data,),), m_listdir.call_args_list)
|
||||
|
||||
m_listdir.reset_mock()
|
||||
devices_filter = MagicMock(return_value=[])
|
||||
list(audit_location_generator(devices_filter=devices_filter))
|
||||
devices_filter.assert_called_once_with(tmpdir, ["drive"])
|
||||
self.assertNotIn(((data,),), m_listdir.call_args_list)
|
||||
|
||||
# partitions_filter
|
||||
m_listdir.reset_mock()
|
||||
partitions_filter = MagicMock(return_value=["partition1"])
|
||||
list(audit_location_generator(
|
||||
partitions_filter=partitions_filter))
|
||||
partitions_filter.assert_called_once_with(data,
|
||||
["partition1"])
|
||||
self.assertIn(((partition,),), m_listdir.call_args_list)
|
||||
|
||||
m_listdir.reset_mock()
|
||||
partitions_filter = MagicMock(return_value=[])
|
||||
list(audit_location_generator(
|
||||
partitions_filter=partitions_filter))
|
||||
partitions_filter.assert_called_once_with(data,
|
||||
["partition1"])
|
||||
self.assertNotIn(((partition,),), m_listdir.call_args_list)
|
||||
|
||||
# suffixes_filter
|
||||
m_listdir.reset_mock()
|
||||
suffixes_filter = MagicMock(return_value=["suffix1"])
|
||||
list(audit_location_generator(suffixes_filter=suffixes_filter))
|
||||
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
|
||||
self.assertIn(((suffix,),), m_listdir.call_args_list)
|
||||
|
||||
m_listdir.reset_mock()
|
||||
suffixes_filter = MagicMock(return_value=[])
|
||||
list(audit_location_generator(suffixes_filter=suffixes_filter))
|
||||
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
|
||||
self.assertNotIn(((suffix,),), m_listdir.call_args_list)
|
||||
|
||||
# hashes_filter
|
||||
m_listdir.reset_mock()
|
||||
hashes_filter = MagicMock(return_value=["hash1"])
|
||||
list(audit_location_generator(hashes_filter=hashes_filter))
|
||||
hashes_filter.assert_called_once_with(suffix, ["hash1"])
|
||||
self.assertIn(((hash_path,),), m_listdir.call_args_list)
|
||||
|
||||
m_listdir.reset_mock()
|
||||
hashes_filter = MagicMock(return_value=[])
|
||||
list(audit_location_generator(hashes_filter=hashes_filter))
|
||||
hashes_filter.assert_called_once_with(suffix, ["hash1"])
|
||||
self.assertNotIn(((hash_path,),), m_listdir.call_args_list)
|
||||
|
||||
|
||||
class TestGreenAsyncPile(unittest.TestCase):
|
||||
|
||||
|
@ -7224,7 +7354,8 @@ class TestShardRange(unittest.TestCase):
|
|||
upper='', object_count=0, bytes_used=0,
|
||||
meta_timestamp=ts_1.internal, deleted=0,
|
||||
state=utils.ShardRange.FOUND,
|
||||
state_timestamp=ts_1.internal, epoch=None)
|
||||
state_timestamp=ts_1.internal, epoch=None,
|
||||
reported=0)
|
||||
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
|
||||
expect)
|
||||
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
|
||||
|
@ -7233,11 +7364,13 @@ class TestShardRange(unittest.TestCase):
|
|||
upper='u', object_count=2, bytes_used=10,
|
||||
meta_timestamp=ts_2, deleted=0,
|
||||
state=utils.ShardRange.CREATED,
|
||||
state_timestamp=ts_3.internal, epoch=ts_4)
|
||||
state_timestamp=ts_3.internal, epoch=ts_4,
|
||||
reported=0)
|
||||
expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2,
|
||||
'bytes_used': 10, 'meta_timestamp': ts_2.internal,
|
||||
'state': utils.ShardRange.CREATED,
|
||||
'state_timestamp': ts_3.internal, 'epoch': ts_4})
|
||||
'state_timestamp': ts_3.internal, 'epoch': ts_4,
|
||||
'reported': 0})
|
||||
assert_initialisation_ok(good_run.copy(), expect)
|
||||
|
||||
# obj count and bytes used as int strings
|
||||
|
@ -7255,6 +7388,11 @@ class TestShardRange(unittest.TestCase):
|
|||
assert_initialisation_ok(good_deleted,
|
||||
dict(expect, deleted=1))
|
||||
|
||||
good_reported = good_run.copy()
|
||||
good_reported['reported'] = 1
|
||||
assert_initialisation_ok(good_reported,
|
||||
dict(expect, reported=1))
|
||||
|
||||
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
|
||||
|
||||
assert_initialisation_fails(
|
||||
|
@ -7293,7 +7431,7 @@ class TestShardRange(unittest.TestCase):
|
|||
'upper': upper, 'object_count': 10, 'bytes_used': 100,
|
||||
'meta_timestamp': ts_2.internal, 'deleted': 0,
|
||||
'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal,
|
||||
'epoch': ts_4}
|
||||
'epoch': ts_4, 'reported': 0}
|
||||
self.assertEqual(expected, sr_dict)
|
||||
self.assertIsInstance(sr_dict['lower'], six.string_types)
|
||||
self.assertIsInstance(sr_dict['upper'], six.string_types)
|
||||
|
@ -7308,6 +7446,14 @@ class TestShardRange(unittest.TestCase):
|
|||
for key in sr_dict:
|
||||
bad_dict = dict(sr_dict)
|
||||
bad_dict.pop(key)
|
||||
if key == 'reported':
|
||||
# This was added after the fact, and we need to be able to eat
|
||||
# data from old servers
|
||||
utils.ShardRange.from_dict(bad_dict)
|
||||
utils.ShardRange(**bad_dict)
|
||||
continue
|
||||
|
||||
# The rest were present from the beginning
|
||||
with self.assertRaises(KeyError):
|
||||
utils.ShardRange.from_dict(bad_dict)
|
||||
# But __init__ still (generally) works!
|
||||
|
|
|
@ -28,6 +28,7 @@ from contextlib import contextmanager
|
|||
import sqlite3
|
||||
import pickle
|
||||
import json
|
||||
import itertools
|
||||
|
||||
import six
|
||||
|
||||
|
@ -558,6 +559,98 @@ class TestContainerBroker(unittest.TestCase):
|
|||
broker.reclaim(Timestamp.now().internal, time())
|
||||
broker.delete_db(Timestamp.now().internal)
|
||||
|
||||
def test_batch_reclaim(self):
|
||||
num_of_objects = 60
|
||||
obj_specs = []
|
||||
now = time()
|
||||
top_of_the_minute = now - (now % 60)
|
||||
c = itertools.cycle([True, False])
|
||||
for m, is_deleted in six.moves.zip(range(num_of_objects), c):
|
||||
offset = top_of_the_minute - (m * 60)
|
||||
obj_specs.append((Timestamp(offset), is_deleted))
|
||||
random.seed(now)
|
||||
random.shuffle(obj_specs)
|
||||
policy_indexes = list(p.idx for p in POLICIES)
|
||||
broker = ContainerBroker(':memory:', account='test_account',
|
||||
container='test_container')
|
||||
broker.initialize(Timestamp('1').internal, 0)
|
||||
for i, obj_spec in enumerate(obj_specs):
|
||||
# with object12 before object2 and shuffled ts.internal we
|
||||
# shouldn't be able to accidently rely on any implicit ordering
|
||||
obj_name = 'object%s' % i
|
||||
pidx = random.choice(policy_indexes)
|
||||
ts, is_deleted = obj_spec
|
||||
if is_deleted:
|
||||
broker.delete_object(obj_name, ts.internal, pidx)
|
||||
else:
|
||||
broker.put_object(obj_name, ts.internal, 0, 'text/plain',
|
||||
'etag', storage_policy_index=pidx)
|
||||
|
||||
def count_reclaimable(conn, reclaim_age):
|
||||
return conn.execute(
|
||||
"SELECT count(*) FROM object "
|
||||
"WHERE deleted = 1 AND created_at < ?", (reclaim_age,)
|
||||
).fetchone()[0]
|
||||
|
||||
# This is intended to divide the set of timestamps exactly in half
|
||||
# regardless of the value of now
|
||||
reclaim_age = top_of_the_minute + 1 - (num_of_objects / 2 * 60)
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(count_reclaimable(conn, reclaim_age),
|
||||
num_of_objects / 4)
|
||||
|
||||
orig__reclaim = broker._reclaim
|
||||
trace = []
|
||||
|
||||
def tracing_reclaim(conn, age_timestamp, marker):
|
||||
trace.append((age_timestamp, marker,
|
||||
count_reclaimable(conn, age_timestamp)))
|
||||
return orig__reclaim(conn, age_timestamp, marker)
|
||||
|
||||
with mock.patch.object(broker, '_reclaim', new=tracing_reclaim), \
|
||||
mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
|
||||
broker.reclaim(reclaim_age, reclaim_age)
|
||||
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(count_reclaimable(conn, reclaim_age), 0)
|
||||
self.assertEqual(3, len(trace), trace)
|
||||
self.assertEqual([age for age, marker, reclaimable in trace],
|
||||
[reclaim_age] * 3)
|
||||
# markers are in-order
|
||||
self.assertLess(trace[0][1], trace[1][1])
|
||||
self.assertLess(trace[1][1], trace[2][1])
|
||||
# reclaimable count gradually decreases
|
||||
# generally, count1 > count2 > count3, but because of the randomness
|
||||
# we may occassionally have count1 == count2 or count2 == count3
|
||||
self.assertGreaterEqual(trace[0][2], trace[1][2])
|
||||
self.assertGreaterEqual(trace[1][2], trace[2][2])
|
||||
# technically, this might happen occasionally, but *really* rarely
|
||||
self.assertTrue(trace[0][2] > trace[1][2] or
|
||||
trace[1][2] > trace[2][2])
|
||||
|
||||
def test_reclaim_with_duplicate_names(self):
|
||||
broker = ContainerBroker(':memory:', account='test_account',
|
||||
container='test_container')
|
||||
broker.initialize(Timestamp('1').internal, 0)
|
||||
now = time()
|
||||
ages_ago = Timestamp(now - (3 * 7 * 24 * 60 * 60))
|
||||
for i in range(10):
|
||||
for spidx in range(10):
|
||||
obj_name = 'object%s' % i
|
||||
broker.delete_object(obj_name, ages_ago.internal, spidx)
|
||||
reclaim_age = now - (2 * 7 * 24 * 60 * 60)
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT count(*) FROM object "
|
||||
"WHERE created_at < ?", (reclaim_age,)
|
||||
).fetchone()[0], 100)
|
||||
with mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
|
||||
broker.reclaim(reclaim_age, reclaim_age)
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT count(*) FROM object "
|
||||
).fetchone()[0], 0)
|
||||
|
||||
@with_tempdir
|
||||
def test_reclaim_deadlock(self, tempdir):
|
||||
db_path = os.path.join(
|
||||
|
@ -642,10 +735,12 @@ class TestContainerBroker(unittest.TestCase):
|
|||
self.assertEqual(info['put_timestamp'], start.internal)
|
||||
self.assertTrue(Timestamp(info['created_at']) >= start)
|
||||
self.assertEqual(info['delete_timestamp'], '0')
|
||||
if self.__class__ in (TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges):
|
||||
if self.__class__ in (
|
||||
TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges,
|
||||
TestContainerBrokerBeforeShardRangeReportedColumn):
|
||||
self.assertEqual(info['status_changed_at'], '0')
|
||||
else:
|
||||
self.assertEqual(info['status_changed_at'],
|
||||
|
@ -932,6 +1027,8 @@ class TestContainerBroker(unittest.TestCase):
|
|||
"SELECT object_count FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 0)
|
||||
|
||||
# Reput same event
|
||||
broker.merge_shard_ranges(
|
||||
|
@ -957,6 +1054,64 @@ class TestContainerBroker(unittest.TestCase):
|
|||
"SELECT object_count FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 0)
|
||||
|
||||
# Mark it as reported
|
||||
broker.merge_shard_ranges(
|
||||
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
|
||||
'low', 'up', meta_timestamp=meta_timestamp,
|
||||
reported=True))
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT name FROM shard_range").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT timestamp FROM shard_range").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
|
||||
meta_timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT lower FROM shard_range").fetchone()[0], 'low')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT upper FROM shard_range").fetchone()[0], 'up')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT deleted FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT object_count FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 1)
|
||||
|
||||
# Reporting latches it
|
||||
broker.merge_shard_ranges(
|
||||
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
|
||||
'low', 'up', meta_timestamp=meta_timestamp,
|
||||
reported=False))
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT name FROM shard_range").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT timestamp FROM shard_range").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
|
||||
meta_timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT lower FROM shard_range").fetchone()[0], 'low')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT upper FROM shard_range").fetchone()[0], 'up')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT deleted FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT object_count FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 1)
|
||||
|
||||
# Put new event
|
||||
timestamp = next(self.ts).internal
|
||||
|
@ -984,11 +1139,14 @@ class TestContainerBroker(unittest.TestCase):
|
|||
"SELECT object_count FROM shard_range").fetchone()[0], 1)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 0)
|
||||
|
||||
# Put old event
|
||||
broker.merge_shard_ranges(
|
||||
ShardRange('"a/{<shardrange \'&\' name>}"', old_put_timestamp,
|
||||
'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp))
|
||||
'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp,
|
||||
reported=True))
|
||||
with broker.get() as conn:
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT name FROM shard_range").fetchone()[0],
|
||||
|
@ -1009,6 +1167,8 @@ class TestContainerBroker(unittest.TestCase):
|
|||
"SELECT object_count FROM shard_range").fetchone()[0], 1)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT reported FROM shard_range").fetchone()[0], 0)
|
||||
|
||||
# Put old delete event
|
||||
broker.merge_shard_ranges(
|
||||
|
@ -1885,10 +2045,12 @@ class TestContainerBroker(unittest.TestCase):
|
|||
self.assertEqual(info['hash'], '00000000000000000000000000000000')
|
||||
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
|
||||
self.assertEqual(info['delete_timestamp'], '0')
|
||||
if self.__class__ in (TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges):
|
||||
if self.__class__ in (
|
||||
TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges,
|
||||
TestContainerBrokerBeforeShardRangeReportedColumn):
|
||||
self.assertEqual(info['status_changed_at'], '0')
|
||||
else:
|
||||
self.assertEqual(info['status_changed_at'],
|
||||
|
@ -3182,10 +3344,12 @@ class TestContainerBroker(unittest.TestCase):
|
|||
self.assertEqual(0, info['storage_policy_index']) # sanity check
|
||||
self.assertEqual(0, info['object_count'])
|
||||
self.assertEqual(0, info['bytes_used'])
|
||||
if self.__class__ in (TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges):
|
||||
if self.__class__ in (
|
||||
TestContainerBrokerBeforeMetadata,
|
||||
TestContainerBrokerBeforeXSync,
|
||||
TestContainerBrokerBeforeSPI,
|
||||
TestContainerBrokerBeforeShardRanges,
|
||||
TestContainerBrokerBeforeShardRangeReportedColumn):
|
||||
self.assertEqual(info['status_changed_at'], '0')
|
||||
else:
|
||||
self.assertEqual(timestamp.internal, info['status_changed_at'])
|
||||
|
@ -5222,6 +5386,75 @@ class TestContainerBrokerBeforeShardRanges(ContainerBrokerMigrationMixin,
|
|||
FROM shard_range''')
|
||||
|
||||
|
||||
def pre_reported_create_shard_range_table(self, conn):
|
||||
"""
|
||||
Copied from ContainerBroker before the
|
||||
reported column was added; used for testing with
|
||||
TestContainerBrokerBeforeShardRangeReportedColumn.
|
||||
|
||||
Create a shard_range table with no 'reported' column.
|
||||
|
||||
:param conn: DB connection object
|
||||
"""
|
||||
conn.execute("""
|
||||
CREATE TABLE shard_range (
|
||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT,
|
||||
timestamp TEXT,
|
||||
lower TEXT,
|
||||
upper TEXT,
|
||||
object_count INTEGER DEFAULT 0,
|
||||
bytes_used INTEGER DEFAULT 0,
|
||||
meta_timestamp TEXT,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
state INTEGER,
|
||||
state_timestamp TEXT,
|
||||
epoch TEXT
|
||||
);
|
||||
""")
|
||||
|
||||
conn.execute("""
|
||||
CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
|
||||
BEGIN
|
||||
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
|
||||
END;
|
||||
""")
|
||||
|
||||
|
||||
class TestContainerBrokerBeforeShardRangeReportedColumn(
|
||||
ContainerBrokerMigrationMixin, TestContainerBroker):
|
||||
"""
|
||||
Tests for ContainerBroker against databases created
|
||||
before the shard_ranges table was added.
|
||||
"""
|
||||
# *grumble grumble* This should include container_info/policy_stat :-/
|
||||
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
|
||||
'sqlite_sequence', 'container_stat', 'shard_range'}
|
||||
|
||||
def setUp(self):
|
||||
super(TestContainerBrokerBeforeShardRangeReportedColumn,
|
||||
self).setUp()
|
||||
ContainerBroker.create_shard_range_table = \
|
||||
pre_reported_create_shard_range_table
|
||||
|
||||
broker = ContainerBroker(':memory:', account='a', container='c')
|
||||
broker.initialize(Timestamp('1').internal, 0)
|
||||
with self.assertRaises(sqlite3.DatabaseError) as raised, \
|
||||
broker.get() as conn:
|
||||
conn.execute('''SELECT reported
|
||||
FROM shard_range''')
|
||||
self.assertIn('no such column: reported', str(raised.exception))
|
||||
|
||||
def tearDown(self):
|
||||
super(TestContainerBrokerBeforeShardRangeReportedColumn,
|
||||
self).tearDown()
|
||||
broker = ContainerBroker(':memory:', account='a', container='c')
|
||||
broker.initialize(Timestamp('1').internal, 0)
|
||||
with broker.get() as conn:
|
||||
conn.execute('''SELECT reported
|
||||
FROM shard_range''')
|
||||
|
||||
|
||||
class TestUpdateNewItemFromExisting(unittest.TestCase):
|
||||
# TODO: add test scenarios that have swift_bytes in content_type
|
||||
t0 = '1234567890.00000'
|
||||
|
|
|
@ -2380,15 +2380,17 @@ class TestContainerController(unittest.TestCase):
|
|||
'X-Container-Sysmeta-Test': 'set',
|
||||
'X-Container-Meta-Test': 'persisted'}
|
||||
|
||||
# PUT shard range to non-existent container with non-autocreate prefix
|
||||
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
|
||||
body=json.dumps([dict(shard_range)]))
|
||||
# PUT shard range to non-existent container without autocreate flag
|
||||
req = Request.blank(
|
||||
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
|
||||
body=json.dumps([dict(shard_range)]))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(404, resp.status_int)
|
||||
|
||||
# PUT shard range to non-existent container with autocreate prefix,
|
||||
# PUT shard range to non-existent container with autocreate flag,
|
||||
# missing storage policy
|
||||
headers['X-Timestamp'] = next(ts_iter).internal
|
||||
headers['X-Backend-Auto-Create'] = 't'
|
||||
req = Request.blank(
|
||||
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
|
||||
body=json.dumps([dict(shard_range)]))
|
||||
|
@ -2397,7 +2399,7 @@ class TestContainerController(unittest.TestCase):
|
|||
self.assertIn(b'X-Backend-Storage-Policy-Index header is required',
|
||||
resp.body)
|
||||
|
||||
# PUT shard range to non-existent container with autocreate prefix
|
||||
# PUT shard range to non-existent container with autocreate flag
|
||||
headers['X-Timestamp'] = next(ts_iter).internal
|
||||
policy_index = random.choice(POLICIES).idx
|
||||
headers['X-Backend-Storage-Policy-Index'] = str(policy_index)
|
||||
|
@ -2407,7 +2409,7 @@ class TestContainerController(unittest.TestCase):
|
|||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(201, resp.status_int)
|
||||
|
||||
# repeat PUT of shard range to autocreated container - 204 response
|
||||
# repeat PUT of shard range to autocreated container - 202 response
|
||||
headers['X-Timestamp'] = next(ts_iter).internal
|
||||
headers.pop('X-Backend-Storage-Policy-Index') # no longer required
|
||||
req = Request.blank(
|
||||
|
@ -2416,7 +2418,7 @@ class TestContainerController(unittest.TestCase):
|
|||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(202, resp.status_int)
|
||||
|
||||
# regular PUT to autocreated container - 204 response
|
||||
# regular PUT to autocreated container - 202 response
|
||||
headers['X-Timestamp'] = next(ts_iter).internal
|
||||
req = Request.blank(
|
||||
'/sda1/p/.shards_a/shard_c', method='PUT',
|
||||
|
@ -4649,61 +4651,53 @@ class TestContainerController(unittest.TestCase):
|
|||
"%d on param %s" % (resp.status_int, param))
|
||||
|
||||
def test_put_auto_create(self):
|
||||
headers = {'x-timestamp': Timestamp(1).internal,
|
||||
'x-size': '0',
|
||||
'x-content-type': 'text/plain',
|
||||
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
|
||||
def do_test(expected_status, path, extra_headers=None, body=None):
|
||||
headers = {'x-timestamp': Timestamp(1).internal,
|
||||
'x-size': '0',
|
||||
'x-content-type': 'text/plain',
|
||||
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
|
||||
if extra_headers:
|
||||
headers.update(extra_headers)
|
||||
req = Request.blank('/sda1/p/' + path,
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers=headers, body=body)
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, expected_status)
|
||||
|
||||
req = Request.blank('/sda1/p/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
do_test(404, 'a/c/o')
|
||||
do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'no'})
|
||||
do_test(201, '.a/c/o')
|
||||
do_test(404, 'a/.c/o')
|
||||
do_test(404, 'a/c/.o')
|
||||
do_test(201, 'a/c/o', {'X-Backend-Auto-Create': 'yes'})
|
||||
|
||||
req = Request.blank('/sda1/p/.a/c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201)
|
||||
|
||||
req = Request.blank('/sda1/p/a/.c/o',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
req = Request.blank('/sda1/p/a/c/.o',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
do_test(404, '.shards_a/c/o')
|
||||
create_shard_headers = {
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Storage-Policy-Index': '0'}
|
||||
do_test(404, '.shards_a/c', create_shard_headers, '[]')
|
||||
create_shard_headers['X-Backend-Auto-Create'] = 't'
|
||||
do_test(201, '.shards_a/c', create_shard_headers, '[]')
|
||||
|
||||
def test_delete_auto_create(self):
|
||||
headers = {'x-timestamp': Timestamp(1).internal}
|
||||
def do_test(expected_status, path, extra_headers=None):
|
||||
headers = {'x-timestamp': Timestamp(1).internal}
|
||||
if extra_headers:
|
||||
headers.update(extra_headers)
|
||||
req = Request.blank('/sda1/p/' + path,
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers=headers)
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, expected_status)
|
||||
|
||||
req = Request.blank('/sda1/p/a/c/o',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
req = Request.blank('/sda1/p/.a/c/o',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 204)
|
||||
|
||||
req = Request.blank('/sda1/p/a/.c/o',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
req = Request.blank('/sda1/p/a/.c/.o',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers=dict(headers))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
do_test(404, 'a/c/o')
|
||||
do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'false'})
|
||||
do_test(204, '.a/c/o')
|
||||
do_test(404, 'a/.c/o')
|
||||
do_test(404, 'a/.c/.o')
|
||||
do_test(404, '.shards_a/c/o')
|
||||
do_test(204, 'a/c/o', {'X-Backend-Auto-Create': 'true'})
|
||||
do_test(204, '.shards_a/c/o', {'X-Backend-Auto-Create': 'true'})
|
||||
|
||||
def test_content_type_on_HEAD(self):
|
||||
Request.blank('/sda1/p/a/o',
|
||||
|
|
|
@ -4189,6 +4189,7 @@ class TestSharder(BaseTestSharder):
|
|||
def capture_send(conn, data):
|
||||
bodies.append(data)
|
||||
|
||||
self.assertFalse(broker.get_own_shard_range().reported) # sanity
|
||||
with self._mock_sharder() as sharder:
|
||||
with mocked_http_conn(204, 204, 204,
|
||||
give_send=capture_send) as mock_conn:
|
||||
|
@ -4198,6 +4199,7 @@ class TestSharder(BaseTestSharder):
|
|||
self.assertEqual('PUT', req['method'])
|
||||
self.assertEqual([expected_sent] * 3,
|
||||
[json.loads(b) for b in bodies])
|
||||
self.assertTrue(broker.get_own_shard_range().reported)
|
||||
|
||||
def test_update_root_container_own_range(self):
|
||||
broker = self._make_broker()
|
||||
|
@ -4230,6 +4232,32 @@ class TestSharder(BaseTestSharder):
|
|||
with annotate_failure(state):
|
||||
check_only_own_shard_range_sent(state)
|
||||
|
||||
def test_update_root_container_already_reported(self):
|
||||
broker = self._make_broker()
|
||||
|
||||
def check_already_reported_not_sent(state):
|
||||
own_shard_range = broker.get_own_shard_range()
|
||||
|
||||
own_shard_range.reported = True
|
||||
self.assertTrue(own_shard_range.update_state(
|
||||
state, state_timestamp=next(self.ts_iter)))
|
||||
# Check that updating state clears the flag
|
||||
self.assertFalse(own_shard_range.reported)
|
||||
|
||||
# If we claim to have already updated...
|
||||
own_shard_range.reported = True
|
||||
broker.merge_shard_ranges([own_shard_range])
|
||||
|
||||
# ... then there's nothing to send
|
||||
with self._mock_sharder() as sharder:
|
||||
with mocked_http_conn() as mock_conn:
|
||||
sharder._update_root_container(broker)
|
||||
self.assertFalse(mock_conn.requests)
|
||||
|
||||
for state in ShardRange.STATES:
|
||||
with annotate_failure(state):
|
||||
check_already_reported_not_sent(state)
|
||||
|
||||
def test_update_root_container_all_ranges(self):
|
||||
broker = self._make_broker()
|
||||
other_shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
|
||||
|
|
|
@ -51,7 +51,8 @@ from swift.common.storage_policy import POLICIES, ECDriverError, \
|
|||
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
|
||||
debug_logger, patch_policies, SlowBody, FakeStatus, \
|
||||
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub, \
|
||||
fake_ec_node_response, StubResponse, mocked_http_conn
|
||||
fake_ec_node_response, StubResponse, mocked_http_conn, \
|
||||
quiet_eventlet_exceptions
|
||||
from test.unit.proxy.test_server import node_error_count
|
||||
|
||||
|
||||
|
@ -1617,7 +1618,8 @@ class TestReplicatedObjController(CommonObjectControllerMixin,
|
|||
# to the next node rather than hang the request
|
||||
headers = [{'X-Backend-Timestamp': 'not-a-timestamp'}, {}]
|
||||
codes = [200, 200]
|
||||
with set_http_connect(*codes, headers=headers):
|
||||
with quiet_eventlet_exceptions(), set_http_connect(
|
||||
*codes, headers=headers):
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
|
||||
|
|
|
@ -7982,7 +7982,7 @@ class TestObjectDisconnectCleanup(unittest.TestCase):
|
|||
continue
|
||||
device_path = os.path.join(_testdir, dev)
|
||||
for datadir in os.listdir(device_path):
|
||||
if 'object' not in datadir:
|
||||
if any(p in datadir for p in ('account', 'container')):
|
||||
continue
|
||||
data_path = os.path.join(device_path, datadir)
|
||||
rmtree(data_path, ignore_errors=True)
|
||||
|
|
|
@ -14,12 +14,13 @@
|
|||
# limitations under the License.
|
||||
- hosts: all
|
||||
become: true
|
||||
roles:
|
||||
- ensure-pip
|
||||
tasks:
|
||||
- name: installing dependencies
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- python-pyeclib
|
||||
- python-pip
|
||||
- python-nose
|
||||
- python-swiftclient
|
||||
|
||||
|
|
|
@ -7,4 +7,6 @@
|
|||
bindep_dir: "{{ zuul_work_dir }}"
|
||||
- test-setup
|
||||
- ensure-tox
|
||||
- additional-tempauth-users
|
||||
- additional-keystone-users
|
||||
- dsvm-additional-middlewares
|
||||
|
|
|
@ -179,10 +179,12 @@
|
|||
with_items: "{{ find_result.files }}"
|
||||
|
||||
- name: set the options in the proxy config file
|
||||
shell:
|
||||
cmd: |
|
||||
crudini --set /etc/swift/proxy-server.conf app:proxy-server node_timeout 20
|
||||
executable: /bin/bash
|
||||
ini_file:
|
||||
path: /etc/swift/proxy-server.conf
|
||||
section: app:proxy-server
|
||||
option: node_timeout
|
||||
value: 20
|
||||
create: no
|
||||
|
||||
- name: copy the SAIO scripts for resetting the environment
|
||||
command: cp -r {{ zuul.project.src_dir }}/doc/saio/bin /home/{{ ansible_ssh_user }}/bin creates=/home/{{ ansible_ssh_user }}/bin
|
||||
|
|
26
tox.ini
26
tox.ini
|
@ -58,24 +58,12 @@ commands = ./.functests {posargs}
|
|||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
|
||||
|
||||
[testenv:func-s3api-py3]
|
||||
basepython = python3
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=s3api
|
||||
|
||||
[testenv:func-encryption-py3]
|
||||
basepython = python3
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
|
||||
|
||||
[testenv:func-domain-remap-staticweb-py3]
|
||||
basepython = python3
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
|
||||
|
||||
[testenv:func]
|
||||
basepython = python2.7
|
||||
deps = {[testenv:py27]deps}
|
||||
|
@ -88,13 +76,6 @@ commands = ./.functests {posargs}
|
|||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
|
||||
|
||||
[testenv:func-domain-remap-staticweb]
|
||||
basepython = python2.7
|
||||
deps = {[testenv:py27]deps}
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
|
||||
|
||||
[testenv:func-ec]
|
||||
basepython = python2.7
|
||||
deps = {[testenv:py27]deps}
|
||||
|
@ -102,13 +83,6 @@ commands = ./.functests {posargs}
|
|||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
|
||||
|
||||
[testenv:func-s3api]
|
||||
basepython = python2.7
|
||||
deps = {[testenv:py27]deps}
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
SWIFT_TEST_IN_PROCESS_CONF_LOADER=s3api
|
||||
|
||||
[testenv:func-losf]
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
|
|
Loading…
Reference in New Issue