Merge remote-tracking branch 'remotes/origin/master' into feature/s3api

Change-Id: Ib5461698a1a2f82c2248fd383de599880288f9fb
This commit is contained in:
Kota Tsuyuzaki 2018-03-28 16:49:15 +09:00
commit 8c17f610d4
70 changed files with 3288 additions and 940 deletions

View File

@ -93,19 +93,42 @@
parent: swift-tox-func-ec
nodeset: centos-7
- job:
name: swift-tox-func-domain-remap-staticweb
parent: swift-tox-base
description: |
Run functional tests for swift under cPython version 2.7.
Uses tox with the ``func-domain-remap-staticweb`` environment.
It sets TMPDIR to an XFS mount point created via
tools/test-setup.sh.
vars:
tox_envlist: func-domain-remap-staticweb
# TODO: add experimental env for s3api
- job:
name: swift-tox-func-s3api
parent: swift-tox-base
description: |
Run functional tests for swift under cPython version 2.7.
Uses tox with the ``func-s3api`` environment.
It sets TMPDIR to an XFS mount point created via
tools/test-setup.sh.
vars:
tox_envlist: func-s3api
# TODO: add experimental env for s3api
- job:
name: swift-probetests-centos-7
parent: unittests
nodeset: centos-7
voting: false
description: |
Setup a SAIO dev environment and run Swift's probe tests
pre-run:
- playbooks/saio_single_node_setup/install_dependencies.yaml
- playbooks/saio_single_node_setup/setup_saio.yaml
- playbooks/saio_single_node_setup/make_rings.yaml
run: playbooks/probetests/run.yaml
post-run: playbooks/probetests/post.yaml
- project:
check:
@ -114,14 +137,17 @@
- swift-tox-py35
- swift-tox-func
- swift-tox-func-encryption
- swift-tox-func-domain-remap-staticweb
- swift-tox-func-ec
- swift-tox-func-s3api
- swift-probetests-centos-7
gate:
jobs:
- swift-tox-py27
- swift-tox-py35
- swift-tox-func
- swift-tox-func-encryption
- swift-tox-func-domain-remap-staticweb
- swift-tox-func-ec
- swift-tox-func-s3api
experimental:

View File

@ -2,8 +2,8 @@
Team and repository tags
========================
.. image:: https://governance.openstack.org/badges/swift.svg
:target: https://governance.openstack.org/reference/tags/index.html
.. image:: https://governance.openstack.org/tc/badges/swift.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on

View File

@ -38,6 +38,9 @@ if __name__ == '__main__':
parser.add_option(
'-d', '--swift-dir', default='/etc/swift',
help="Pass location of swift directory")
parser.add_option(
'--drop-prefixes', default=False, action="store_true",
help="When outputting metadata, drop the per-section common prefixes")
options, args = parser.parse_args()

View File

@ -14,10 +14,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
from swift.account.replicator import AccountReplicator
from swift.common.utils import parse_options
from swift.common.daemon import run_daemon
if __name__ == '__main__':
conf_file, options = parse_options(once=True)
parser = optparse.OptionParser("%prog CONFIG [options]")
parser.add_option('-d', '--devices',
help=('Replicate only given devices. '
'Comma-separated list. '
'Only has effect if --once is used.'))
parser.add_option('-p', '--partitions',
help=('Replicate only given partitions. '
'Comma-separated list. '
'Only has effect if --once is used.'))
conf_file, options = parse_options(parser=parser, once=True)
run_daemon(AccountReplicator, conf_file, **options)

View File

@ -38,6 +38,9 @@ if __name__ == '__main__':
parser.add_option(
'-d', '--swift-dir', default='/etc/swift',
help="Pass location of swift directory")
parser.add_option(
'--drop-prefixes', default=False, action="store_true",
help="When outputting metadata, drop the per-section common prefixes")
options, args = parser.parse_args()

View File

@ -14,10 +14,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
from swift.container.replicator import ContainerReplicator
from swift.common.utils import parse_options
from swift.common.daemon import run_daemon
if __name__ == '__main__':
conf_file, options = parse_options(once=True)
parser = optparse.OptionParser("%prog CONFIG [options]")
parser.add_option('-d', '--devices',
help=('Replicate only given devices. '
'Comma-separated list. '
'Only has effect if --once is used.'))
parser.add_option('-p', '--partitions',
help=('Replicate only given partitions. '
'Comma-separated list. '
'Only has effect if --once is used.'))
conf_file, options = parse_options(parser=parser, once=True)
run_daemon(ContainerReplicator, conf_file, **options)

View File

@ -14,15 +14,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import sys
from optparse import OptionParser
import six
from swift.common.storage_policy import reload_storage_policies
from swift.common.utils import set_swift_dir
from swift.cli.info import print_obj, InfoSystemExit
if __name__ == '__main__':
if not six.PY2:
# Make stdout able to write escaped bytes
sys.stdout = codecs.getwriter("utf-8")(
sys.stdout.detach(), errors='surrogateescape')
parser = OptionParser('%prog [options] OBJECT_FILE')
parser.add_option(
'-n', '--no-check-etag', default=True,
@ -31,6 +39,9 @@ if __name__ == '__main__':
parser.add_option(
'-d', '--swift-dir', default='/etc/swift', dest='swift_dir',
help="Pass location of swift directory")
parser.add_option(
'--drop-prefixes', default=False, action="store_true",
help="When outputting metadata, drop the per-section common prefixes")
parser.add_option(
'-P', '--policy-name', dest='policy_name',
help="Specify storage policy name")

View File

@ -2,5 +2,4 @@ Object Versioning
=================
.. automodule:: swift.common.middleware.versioned_writes
:members:
:show-inheritance:

View File

@ -163,6 +163,25 @@ use = egg:swift#recon
# Work only with ionice_class.
# ionice_class =
# ionice_priority =
#
# The handoffs_only mode option is for special-case emergency
# situations such as full disks in the cluster. This option SHOULD NOT
# BE ENABLED except in emergencies. When handoffs_only mode is enabled
# the replicator will *only* replicate from handoff nodes to primary
# nodes and will not sync primary nodes with other primary nodes.
#
# This has two main effects: first, the replicator becomes much more
# effective at removing misplaced databases, thereby freeing up disk
# space at a much faster pace than normal. Second, the replicator does
# not sync data between primary nodes, so out-of-sync account and
# container listings will not resolve while handoffs_only is enabled.
#
# This mode is intended to allow operators to temporarily sacrifice
# consistency in order to gain faster rebalancing, such as during a
# capacity addition with nearly-full disks. It is not intended for
# long-term use.
#
# handoffs_only = no
[account-auditor]
# You can override the default log routing for this app here (don't use set!):

View File

@ -172,6 +172,25 @@ use = egg:swift#recon
# Work only with ionice_class.
# ionice_class =
# ionice_priority =
#
# The handoffs_only mode option is for special-case emergency
# situations such as full disks in the cluster. This option SHOULD NOT
# BE ENABLED except in emergencies. When handoffs_only mode is enabled
# the replicator will *only* replicate from handoff nodes to primary
# nodes and will not sync primary nodes with other primary nodes.
#
# This has two main effects: first, the replicator becomes much more
# effective at removing misplaced databases, thereby freeing up disk
# space at a much faster pace than normal. Second, the replicator does
# not sync data between primary nodes, so out-of-sync account and
# container listings will not resolve while handoffs_only is enabled.
#
# This mode is intended to allow operators to temporarily sacrifice
# consistency in order to gain faster rebalancing, such as during a
# capacity addition with nearly-full disks. It is not intended for
# long-term use.
#
# handoffs_only = no
[container-updater]
# You can override the default log routing for this app here (don't use set!):

View File

@ -729,7 +729,7 @@ use = egg:swift#cname_lookup
#
# Specify the nameservers to use to do the CNAME resolution. If unset, the
# system configuration is used. Multiple nameservers can be specified
# separated by a comma. Default port 53 can be overriden. IPv6 is accepted.
# separated by a comma. Default port 53 can be overridden. IPv6 is accepted.
# Example: 127.0.0.1, 127.0.0.2, 127.0.0.3:5353, [::1], [::1]:5353
# nameservers =

View File

@ -0,0 +1,16 @@
- hosts: all
become: true
tasks:
- name: Ensure swift logs are readable before syncing
file:
path: '/var/log/swift'
mode: u=rwX,g=rX,o=rX
state: directory
recurse: yes
- name: Copy swift logs from worker nodes to executor node
synchronize:
src: '/var/log/swift'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true

View File

@ -0,0 +1,26 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
tasks:
- name: run probe tests
shell:
cmd: |
source ~/.bashrc
nosetests test/probe/
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'

View File

@ -0,0 +1,24 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
become: true
tasks:
- name: installing dependencies
yum: name={{ item }} state=present
with_items:
- python-eventlet
- python-pyeclib
- python-nose
- python-swiftclient

View File

@ -0,0 +1,29 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
tasks:
- name: install swift
become: true
shell:
cmd: python setup.py develop
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- name: make rings
shell:
cmd: remakerings
executable: /bin/bash
chdir: '/etc/swift'

View File

@ -0,0 +1,174 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
become: true
tasks:
- name: assure /srv directory exists
file: path=/srv state=directory
- name: create loopback device
command: truncate -s 1GB /srv/swift-disk creates=/srv/swift-disk
- name: create filesystem /srv/swift-disk
become: true
filesystem: fstype=xfs dev=/srv/swift-disk
- name: create mount path /mnt/sdb1
file: path=/mnt/sdb1 state=directory
- name: mount /mnt/sdb1
mount: name=/mnt/sdb1 src=/srv/swift-disk fstype=xfs opts="loop,noatime,nodiratime,nobarrier,logbufs=8" dump=0 passno=0 state=mounted
- name: create sub-partitions
file: >
path=/mnt/sdb1/{{ item }}
state=directory
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
with_items:
- 1
- 2
- 3
- 4
- name: create symlinks
become: true
file: >
src=/mnt/sdb1/{{ item }}
dest=/srv/{{ item }}
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
state=link
with_items:
- 1
- 2
- 3
- 4
- name: create node partition directories
file: >
path=/srv/{{ item[1] }}/node/sdb{{ item[0] + item[1] }}
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
state=directory
with_nested:
- [0, 4]
- [1, 2, 3, 4]
- name: create /var/run/swift
file: >
path=/var/run/swift
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
state=directory
- name: create /var/cache/swift
file: >
path=/var/cache/swift
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
state=directory
- name: create /var/cache/swift[n]
file: >
path=/var/cache/swift{{ item }}
owner={{ ansible_user_id }}
group={{ ansible_user_gid }}
state=directory
with_items:
- 2
- 3
- 4
- name: create rc.local from template
template: src=rc.local.j2 dest=/etc/rc.d/rc.local owner=root group=root mode=0755
- name: create /etc/rsyncd.conf
command: cp {{ zuul.project.src_dir }}/doc/saio/rsyncd.conf /etc/
- name: update rsyncd.conf with correct username
replace: dest=/etc/rsyncd.conf regexp=<your-user-name> replace={{ ansible_user_id }}
- name: enable rsync
lineinfile: dest=/etc/xinetd.d/rsync line="disable = no" create=yes
- name: set selinux to permissive
selinux: policy=targeted state=disabled
- name: restart rsync
service: name=rsyncd state=restarted enabled=yes
- name: start memcache
service: name=memcached state=started enabled=yes
- name: configure rsyslog
command: cp {{ zuul.project.src_dir }}/doc/saio/rsyslog.d/10-swift.conf /etc/rsyslog.d/
- name: modify /etc/rsyslog.conf
lineinfile: dest=/etc/rsyslog.conf
line="$PrivDropToGroup adm"
create=yes
insertafter="^#### GLOBAL DIRECTIVES"
- name: assure /var/log/swift directory exists
file: path=/var/log/swift
state=directory
owner=root
group=adm
mode="g+w"
- name: restart rsyslog
service: name=rsyslog state=restarted enabled=yes
- name: clean up /etc/swift directory
file: path=/etc/swift state=absent
- name: create clean /etc/swift
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift /etc/swift
- name: copy the sample configuration files for running tests
command: cp -r {{ zuul.project.src_dir }}/test/sample.conf /etc/swift/test.conf
- name: set correct ownership of /etc/swift
file: path=/etc/swift owner={{ ansible_user_id }} group={{ ansible_user_gid }} recurse=yes
- name: find config files to modify user option
find: paths="/etc/swift" patterns="*.conf" recurse=yes
register: find_result
- name: replace user name
replace: dest={{ item.path }} regexp=<your-user-name> replace={{ ansible_user_id }}
with_items: "{{ find_result.files }}"
- name: copy the SAIO scripts for resetting the environment
command: cp -r {{ zuul.project.src_dir }}/doc/saio/bin /home/{{ ansible_ssh_user }}/bin creates=/home/{{ ansible_ssh_user }}/bin
- name: set the correct file mode for SAIO scripts
file: dest=/home/{{ ansible_ssh_user }}/bin mode=0777 recurse=yes
- name: add new env. variable for loopback device
lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SAIO_BLOCK_DEVICE=/srv/swift-disk"
- name: remove line from resetswift
lineinfile: dest=/home/{{ ansible_ssh_user }}/bin/resetswift line="sudo find /var/log/swift -type f -exec rm -f {} \;" state=absent
- name: add new env. variable for running tests
lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf"
- name: make sure PATH includes the bin directory
lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export PATH=${PATH}:/home/{{ ansible_ssh_user }}/bin"
- name: increase open files limit to run probe tests
lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="ulimit -n 4096"

View File

@ -0,0 +1,8 @@
#!/bin/bash
mkdir -p /var/cache/swift /var/cache/swift2 /var/cache/swift3 /var/cache/swift4
chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/cache/swift*
mkdir -p /var/run/swift
chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/run/swift
exit 0

View File

@ -7,6 +7,8 @@
current
queens
pike
ocata

View File

@ -4,15 +4,15 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift Release Notes\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-02-09 02:12+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2018-01-27 01:24+0000\n"
"PO-Revision-Date: 2018-02-16 07:33+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en-GB\n"
"X-Generator: Zanata 3.9.6\n"
"Language: en_GB\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "2.10.0"
@ -45,6 +45,9 @@ msgstr "2.15.0"
msgid "2.15.1"
msgstr "2.15.1"
msgid "2.16.0"
msgstr "2.16.0"
msgid ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
@ -65,6 +68,13 @@ msgstr ""
"in distinct regions, which means that the dispersion of replicas between the "
"components can be guaranteed."
msgid ""
"Accept a trade off of dispersion for balance in the ring builder that will "
"result in getting to balanced rings much more quickly in some cases."
msgstr ""
"Accept a trade off of dispersion for balance in the ring builder that will "
"result in getting to balanced rings much more quickly in some cases."
msgid ""
"Account and container databases will now be quarantined if the database "
"schema has been corrupted."
@ -72,12 +82,22 @@ msgstr ""
"Account and container databases will now be quarantined if the database "
"schema has been corrupted."
msgid ""
"Account and container replication stats logs now include ``remote_merges``, "
"the number of times a whole database was sent to another node."
msgstr ""
"Account and container replication stats logs now include ``remote_merges``, "
"the number of times a whole database was sent to another node."
msgid "Add Composite Ring Functionality"
msgstr "Add Composite Ring Functionality"
msgid "Add Vary headers for CORS responses."
msgstr "Add Vary headers for CORS responses."
msgid "Add checksum to object extended attributes."
msgstr "Add checksum to object extended attributes."
msgid ""
"Add support to increase object ring partition power transparently to end "
"users and with no cluster downtime. Increasing the ring part power allows "
@ -105,6 +125,9 @@ msgstr ""
"Added a configurable URL base to staticweb, fixing issues when the "
"accessible endpoint isn't known to the Swift cluster (eg http vs https)."
msgid "Added a configurable URL base to staticweb."
msgstr "Added a configurable URL base to staticweb."
msgid ""
"Added support for per-policy proxy config options. This allows per-policy "
"affinity options to be set for use with duplicated EC policies and composite "
@ -122,6 +145,22 @@ msgstr ""
"``write_affinity``, ``write_affinity_node_count``, and "
"``write_affinity_handoff_delete_count``."
msgid ""
"Added support for retrieving the encryption root secret from an external key "
"management system. In practice, this is currently limited to Barbican."
msgstr ""
"Added support for retrieving the encryption root secret from an external key "
"management system. In practice, this is currently limited to Barbican."
msgid ""
"All 416 responses will now include a Content-Range header with an "
"unsatisfied-range value. This allows the caller to know the valid range "
"request value for an object."
msgstr ""
"All 416 responses will now include a Content-Range header with an "
"unsatisfied-range value. This allows the caller to know the valid range "
"request value for an object."
msgid "Always set Swift processes to use UTC."
msgstr "Always set Swift processes to use UTC."
@ -131,6 +170,22 @@ msgstr "Bug Fixes"
msgid "Cache all answers from nameservers in cname_lookup."
msgstr "Cache all answers from nameservers in cname_lookup."
msgid "Cleaned up logged tracebacks when talking to memcached servers."
msgstr "Cleaned up logged tracebacks when talking to memcached servers."
msgid ""
"Closed a bug where ssync may have written bad fragment data in some "
"circumstances. A check was added to ensure the correct number of bytes is "
"written for a fragment before finalizing the write. Also, erasure coded "
"fragment metadata will now be validated on read requests and, if bad data is "
"found, the fragment will be quarantined."
msgstr ""
"Closed a bug where ssync may have written bad fragment data in some "
"circumstances. A check was added to ensure the correct number of bytes is "
"written for a fragment before finalising the write. Also, erasure coded "
"fragment metadata will now be validated on read requests and, if bad data is "
"found, the fragment will be quarantined."
msgid ""
"Closed a bug where ssync may have written bad fragment data in some "
"circumstances. A check was added to ensure the correct number of bytes is "
@ -171,6 +226,9 @@ msgstr ""
"to be synced before all of the referenced segments. This fixes a bug where "
"container sync would not copy SLO manifests."
msgid "Correctly handle deleted files with if-none-match requests."
msgstr "Correctly handle deleted files with if-none-match requests."
msgid ""
"Correctly send 412 Precondition Failed if a user sends an invalid copy "
"destination. Previously Swift would send a 500 Internal Server Error."
@ -178,6 +236,9 @@ msgstr ""
"Correctly send 412 Precondition Failed if a user sends an invalid copy "
"destination. Previously Swift would send a 500 Internal Server Error."
msgid "Critical Issues"
msgstr "Critical Issues"
msgid "Current (Unreleased) Release Notes"
msgstr "Current (Unreleased) Release Notes"
@ -269,6 +330,15 @@ msgstr ""
"objects with non-ASCII names from being reconstructed and caused the "
"reconstructor process to hang."
msgid ""
"Fixed XML responses (eg on bulk extractions and SLO upload failures) to be "
"more correct. The enclosing \"delete\" tag was removed where it doesn't make "
"sense and replaced with \"extract\" or \"upload\" depending on the context."
msgstr ""
"Fixed XML responses (e.g. on bulk extractions and SLO upload failures) to be "
"more correct. The enclosing \"delete\" tag was removed where it doesn't make "
"sense and replaced with \"extract\" or \"upload\" depending on the context."
msgid "Fixed a bug in domain_remap when obj starts/ends with slash."
msgstr "Fixed a bug in domain_remap when obj starts/ends with slash."
@ -336,9 +406,23 @@ msgid "Fixed a rare infinite loop in `swift-ring-builder` while placing parts."
msgstr ""
"Fixed a rare infinite loop in `swift-ring-builder` while placing parts."
msgid ""
"Fixed a rare issue where multiple backend timeouts could result in bad data "
"being returned to the client."
msgstr ""
"Fixed a rare issue where multiple backend timeouts could result in bad data "
"being returned to the client."
msgid "Fixed a socket leak in copy middleware when a large object was copied."
msgstr "Fixed a socket leak in copy middleware when a large object was copied."
msgid ""
"Fixed an issue where background consistency daemon child processes would "
"deadlock waiting on the same file descriptor."
msgstr ""
"Fixed an issue where background consistency daemon child processes would "
"deadlock waiting on the same file descriptor."
msgid "Fixed deadlock when logging from a tpool thread."
msgstr "Fixed deadlock when logging from a tpool thread."
@ -378,6 +462,17 @@ msgstr ""
"2.7.0 and could cause an increase in rsync replication stats during and "
"after upgrade, due to inconsistent hashing of partition suffixes."
msgid ""
"Fixed regression in consolidate_hashes that occurred when a new file was "
"stored to new suffix to a non-empty partition. This bug was introduced in "
"2.7.0 and could cause an increase in rsync replication stats during and "
"after upgrade, due to inconsistent hashing of partition suffixes."
msgstr ""
"Fixed regression in consolidate_hashes that occurred when a new file was "
"stored to new suffix to a non-empty partition. This bug was introduced in "
"2.7.0 and could cause an increase in rsync replication stats during and "
"after upgrade, due to inconsistent hashing of partition suffixes."
msgid "Fixed some minor test compatibility issues."
msgstr "Fixed some minor test compatibility issues."
@ -387,6 +482,11 @@ msgstr "Fixed the KeyError message when auditor finds an expired object."
msgid "Fixed the stats calculation in the erasure code reconstructor."
msgstr "Fixed the stats calculation in the erasure code reconstructor."
msgid ""
"Fixed using ``swift-ring-builder set_weight`` with more than one device."
msgstr ""
"Fixed using ``swift-ring-builder set_weight`` with more than one device."
msgid ""
"For further information see the `docs <https://docs.openstack.org/swift/"
"latest/overview_ring.html#module-swift.common.ring.composite_builder>`__"
@ -428,6 +528,15 @@ msgstr ""
msgid "Improvements in key parts of the consistency engine"
msgstr "Improvements in key parts of the consistency engine"
msgid ""
"In SLO manifests, the `etag` and `size_bytes` keys are now fully optional "
"and not required. Previously, the keys needed to exist but the values were "
"optional. The only required key is `path`."
msgstr ""
"In SLO manifests, the `etag` and `size_bytes` keys are now fully optional "
"and not required. Previously, the keys needed to exist but the values were "
"optional. The only required key is `path`."
msgid ""
"Include object sysmeta in POST responses. Sysmeta is still stripped from the "
"response before being sent to the client, but this allows middleware to make "
@ -440,6 +549,24 @@ msgstr ""
msgid "Include received fragment index in reconstructor log warnings."
msgstr "Include received fragment index in reconstructor log warnings."
msgid ""
"Instead of using a separate .durable file to indicate the durable status of "
"an EC fragment archive, we rename the .data to include a durable marker in "
"the filename. This saves one inode for every EC .data file. Existing ."
"durable files will not be removed, and they will continue to work just fine."
msgstr ""
"Instead of using a separate .durable file to indicate the durable status of "
"an EC fragment archive, we rename the .data to include a durable marker in "
"the filename. This saves one inode for every EC .data file. Existing ."
"durable files will not be removed, and they will continue to work just fine."
msgid ""
"Let clients request heartbeats during SLO PUTs by including the query "
"parameter ``heartbeat=on``."
msgstr ""
"Let clients request heartbeats during SLO PUTs by including the query "
"parameter ``heartbeat=on``."
msgid ""
"Listing containers in accounts with json or xml now includes a "
"`last_modified` time. This does not change any on-disk data, but simply "
@ -454,6 +581,15 @@ msgstr ""
msgid "Log correct status code for conditional requests."
msgstr "Log correct status code for conditional requests."
msgid ""
"Log deprecation warning for ``allow_versions`` in the container server "
"config. Configure the ``versioned_writes`` middleware in the proxy server "
"instead. This option will be ignored in a future release."
msgstr ""
"Log deprecation warning for ``allow_versions`` in the container server "
"config. Configure the ``versioned_writes`` middleware in the proxy server "
"instead. This option will be ignored in a future release."
msgid "Log the correct request type of a subrequest downstream of copy."
msgstr "Log the correct request type of a sub-request downstream of copy."
@ -464,6 +600,20 @@ msgstr ""
"Make mount_check option usable in containerised environments by adding a "
"check for an \".ismount\" file at the root directory of a device."
msgid "Mirror X-Trans-Id to X-Openstack-Request-Id."
msgstr "Mirror X-Trans-Id to X-Openstack-Request-Id."
msgid ""
"Move listing formatting out to a new proxy middleware named "
"``listing_formats``. ``listing_formats`` should be just right of the first "
"proxy-logging middleware, and left of most other middlewares. If it is not "
"already present, it will be automatically inserted for you."
msgstr ""
"Move listing formatting out to a new proxy middleware named "
"``listing_formats``. ``listing_formats`` should be just right of the first "
"proxy-logging middleware, and left of most other middleware. If it is not "
"already present, it will be automatically inserted for you."
msgid ""
"Moved other-requirements.txt to bindep.txt. bindep.txt lists non-python "
"dependencies of Swift."
@ -490,6 +640,20 @@ msgstr ""
msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid ""
"Note that after writing EC data with Swift 2.11.0 or later, that data will "
"not be accessible to earlier versions of Swift."
msgstr ""
"Note that after writing EC data with Swift 2.11.0 or later, that data will "
"not be accessible to earlier versions of Swift."
msgid ""
"Note: if you have a custom middleware that makes account or container "
"listings, it will only receive listings in JSON format."
msgstr ""
"Note: if you have a custom middleware that makes account or container "
"listings, it will only receive listings in JSON format."
msgid ""
"Now Swift will use ``write_affinity_handoff_delete_count`` to define how "
"many local handoff nodes should swift send request to get more candidates "
@ -573,14 +737,38 @@ msgstr ""
"write affinity configured, users always get 404 when deleting object before "
"it's replicated to appropriate nodes."
msgid ""
"Remove ``swift-temp-url`` script. The functionality has been in swiftclient "
"for a long time and this script has been deprecated since 2.10.0."
msgstr ""
"Remove ``swift-temp-url`` script. The functionality has been in swiftclient "
"for a long time and this script has been deprecated since 2.10.0."
msgid "Remove deprecated ``vm_test_mode`` option."
msgstr "Remove deprecated ``vm_test_mode`` option."
msgid "Remove empty db hash and suffix directories if a db gets quarantined."
msgstr "Remove empty DB hash and suffix directories if a DB gets quarantined."
msgid ""
"Removed \"in-process-\" from func env tox name to work with upstream CI."
msgstr ""
"Removed \"in-process-\" from func env tox name to work with upstream CI."
msgid ""
"Removed a race condition where a POST to an SLO could modify the X-Static-"
"Large-Object metadata."
msgstr ""
"Removed a race condition where a POST to an SLO could modify the X-Static-"
"Large-Object metadata."
msgid ""
"Removed all ``post_as_copy`` related code and configs. The option has been "
"deprecated since 2.13.0."
msgstr ""
"Removed all ``post_as_copy`` related code and configs. The option has been "
"deprecated since 2.13.0."
msgid ""
"Removed per-device reconstruction stats. Now that the reconstructor is "
"shuffling parts before going through them, those stats no longer make sense."
@ -588,9 +776,51 @@ msgstr ""
"Removed per-device reconstruction stats. Now that the reconstructor is "
"shuffling parts before going through them, those stats no longer make sense."
msgid ""
"Replaced ``replication_one_per_device`` by custom count defined by "
"``replication_concurrency_per_device``. The original config value is "
"deprecated, but continues to function for now. If both values are defined, "
"the old ``replication_one_per_device`` is ignored."
msgstr ""
"Replaced ``replication_one_per_device`` by custom count defined by "
"``replication_concurrency_per_device``. The original config value is "
"deprecated, but continues to function for now. If both values are defined, "
"the old ``replication_one_per_device`` is ignored."
msgid "Require that known-bad EC schemes be deprecated"
msgstr "Require that known-bad EC schemes be deprecated"
msgid "Respect server type for --md5 check in swift-recon."
msgstr "Respect server type for --md5 check in swift-recon."
msgid ""
"Respond 400 Bad Request when Accept headers fail to parse instead of "
"returning 406 Not Acceptable."
msgstr ""
"Respond 400 Bad Request when Accept headers fail to parse instead of "
"returning 406 Not Acceptable."
msgid ""
"Ring files now include byteorder information about the endian of the machine "
"used to generate the file, and the values are appropriately byteswapped if "
"deserialized on a machine with a different endianness. Newly created ring "
"files will be byteorder agnostic, but previously generated ring files will "
"still fail on different endian architectures. Regenerating older ring files "
"will cause them to become byteorder agnostic. The regeneration of the ring "
"files will not cause any new data movement. Newer ring files will still be "
"usable by older versions of Swift (on machines with the same endianness--"
"this maintains existing behavior)."
msgstr ""
"Ring files now include byteorder information about the endian of the machine "
"used to generate the file, and the values are appropriately byteswapped if "
"deserialised on a machine with a different endianness. Newly created ring "
"files will be byteorder agnostic, but previously generated ring files will "
"still fail on different endian architectures. Regenerating older ring files "
"will cause them to become byteorder agnostic. The regeneration of the ring "
"files will not cause any new data movement. Newer ring files will still be "
"usable by older versions of Swift (on machines with the same endianness--"
"this maintains existing behaviour)."
msgid ""
"Rings with min_part_hours set to zero will now only move one partition "
"replica per rebalance, thus matching behavior when min_part_hours is greater "
@ -609,6 +839,17 @@ msgstr ""
"header of the MD5 sum of the concatenated MD5 sums of the referenced "
"segments."
msgid ""
"SLO will now concurrently HEAD segments, resulting in much faster manifest "
"validation and object creation. By default, two HEAD requests will be done "
"at a time, but this can be changed by the operator via the new `concurrency` "
"setting in the \"[filter:slo]\" section of the proxy server config."
msgstr ""
"SLO will now concurrently HEAD segments, resulting in much faster manifest "
"validation and object creation. By default, two HEAD requests will be done "
"at a time, but this can be changed by the operator via the new `concurrency` "
"setting in the \"[filter:slo]\" section of the proxy server config."
msgid ""
"Significant improvements to the api-ref doc available at http://developer."
"openstack.org/api-ref/object-storage/."
@ -616,9 +857,43 @@ msgstr ""
"Significant improvements to the api-ref doc available at http://developer."
"openstack.org/api-ref/object-storage/."
msgid ""
"Static Large Object (SLO) manifest may now (again) have zero-byte last "
"segments."
msgstr ""
"Static Large Object (SLO) manifest may now (again) have zero-byte last "
"segments."
msgid "Support multi-range GETs for static large objects."
msgstr "Support multi-range GETs for static large objects."
msgid "Suppress unexpected-file warnings for rsync temp files."
msgstr "Suppress unexpected-file warnings for rsync temp files."
msgid "Suppressed the KeyError message when auditor finds an expired object."
msgstr "Suppressed the KeyError message when auditor finds an expired object."
msgid "Swift Release Notes"
msgstr "Swift Release Notes"
msgid ""
"TempURLs now support a validation against a common prefix. A prefix-based "
"signature grants access to all objects which share the same prefix. This "
"avoids the creation of a large amount of signatures, when a whole container "
"or pseudofolder is shared."
msgstr ""
"TempURLs now support a validation against a common prefix. A prefix-based "
"signature grants access to all objects which share the same prefix. This "
"avoids the creation of a large amount of signatures, when a whole container "
"or pseudofolder is shared."
msgid ""
"TempURLs using the \"inline\" parameter can now also set the \"filename\" "
"parameter. Both are used in the Content-Disposition response header."
msgstr ""
"TempURLs using the \"inline\" parameter can now also set the \"filename\" "
"parameter. Both are used in the Content-Disposition response header."
msgid ""
"Temporary URLs now support one common form of ISO 8601 timestamps in "
"addition to Unix seconds-since-epoch timestamps. The ISO 8601 format "
@ -641,6 +916,19 @@ msgstr ""
"get high concurrency, and this change results in much faster rebalance times "
"on servers with many drives."
msgid ""
"The ``domain_remap`` middleware now supports the ``mangle_client_paths`` "
"option. Its default \"false\" value changes ``domain_remap`` parsing to stop "
"stripping the ``path_root`` value from URL paths. If users depend on this "
"path mangling, operators should set ``mangle_client_paths`` to \"True\" "
"before upgrading."
msgstr ""
"The ``domain_remap`` middleware now supports the ``mangle_client_paths`` "
"option. Its default \"false\" value changes ``domain_remap`` parsing to stop "
"stripping the ``path_root`` value from URL paths. If users depend on this "
"path mangling, operators should set ``mangle_client_paths`` to \"True\" "
"before upgrading."
msgid ""
"The default for `object_post_as_copy` has been changed to False. The option "
"is now deprecated and will be removed in a future release. If your cluster "
@ -686,6 +974,15 @@ msgstr ""
"instead of going disk-by-disk. This eliminates single-disk I/O contention "
"and allows continued scaling as concurrency is increased."
msgid ""
"The improvements to EC reads made in Swift 2.10.0 have also been applied to "
"the reconstructor. This allows fragments to be rebuilt in more "
"circumstances, resulting in faster recovery from failures."
msgstr ""
"The improvements to EC reads made in Swift 2.10.0 have also been applied to "
"the reconstructor. This allows fragments to be rebuilt in more "
"circumstances, resulting in faster recovery from failures."
msgid ""
"The object and container server config option ``slowdown`` has been "
"deprecated in favor of the new ``objects_per_second`` and "
@ -722,6 +1019,13 @@ msgstr ""
"The output of devices from ``swift-ring-builder`` has been reordered by "
"region, zone, ip, and device."
msgid ""
"Throttle update_auditor_status calls so it updates no more than once per "
"minute."
msgstr ""
"Throttle update_auditor_status calls so it updates no more than once per "
"minute."
msgid ""
"Throttle update_auditor_status calls so it updates no more than once per "
"minute. This prevents excessive IO on a new cluster."
@ -739,6 +1043,9 @@ msgstr ""
msgid "Updated docs to reference appropriate ports."
msgstr "Updated docs to reference appropriate ports."
msgid "Updated the PyECLib dependency to 1.3.1."
msgstr "Updated the PyECLib dependency to 1.3.1."
msgid ""
"Updated the `hashes.pkl` file format to include timestamp information for "
"race detection. Also simplified hashing logic to prevent race conditions and "
@ -761,6 +1068,17 @@ msgstr "Upgrade Notes"
msgid "Various other minor bug fixes and improvements."
msgstr "Various other minor bug fixes and improvements."
msgid ""
"WARNING: If you are using the ISA-L library for erasure codes, please "
"upgrade to liberasurecode 1.3.1 (or later) as soon as possible. If you are "
"using isa_l_rs_vand with more than 4 parity, please read https://bugs."
"launchpad.net/swift/+bug/1639691 and take necessary action."
msgstr ""
"WARNING: If you are using the ISA-L library for erasure codes, please "
"upgrade to liberasurecode 1.3.1 (or later) as soon as possible. If you are "
"using isa_l_rs_vand with more than 4 parity, please read https://bugs."
"launchpad.net/swift/+bug/1639691 and take necessary action."
msgid ""
"We do not yet have CLI tools for creating composite rings, but the "
"functionality has been enabled in the ring modules to support this advanced "
@ -770,6 +1088,26 @@ msgstr ""
"functionality has been enabled in the ring modules to support this advanced "
"functionality. CLI tools will be delivered in a subsequent release."
msgid ""
"When requesting objects, return 404 if a tombstone is found and is newer "
"than any data found. Previous behavior was to return stale data."
msgstr ""
"When requesting objects, return 404 if a tombstone is found and is newer "
"than any data found. Previous behaviour was to return stale data."
msgid ""
"With heartbeating turned on, the proxy will start its response immediately "
"with 202 Accepted then send a single whitespace character periodically until "
"the request completes. At that point, a final summary chunk will be sent "
"which includes a \"Response Status\" key indicating success or failure and "
"(if successful) an \"Etag\" key indicating the Etag of the resulting SLO."
msgstr ""
"With heartbeating turned on, the proxy will start its response immediately "
"with 202 Accepted then send a single whitespace character periodically until "
"the request completes. At that point, a final summary chunk will be sent "
"which includes a \"Response Status\" key indicating success or failure and "
"(if successful) an \"Etag\" key indicating the Etag of the resulting SLO."
msgid "Write-affinity aware object deletion"
msgstr "Write-affinity aware object deletion"

View File

@ -4,15 +4,15 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift Release Notes\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-02-09 02:12+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2018-02-08 05:31+0000\n"
"PO-Revision-Date: 2018-02-08 07:28+0000\n"
"Last-Translator: Shu Muto <shu-mutou@rf.jp.nec.com>\n"
"Language-Team: Japanese\n"
"Language: ja\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
msgid "2.10.0"
@ -45,6 +45,12 @@ msgstr "2.15.0"
msgid "2.15.1"
msgstr "2.15.1"
msgid "2.16.0"
msgstr "2.16.0"
msgid "2.17.0"
msgstr "2.17.0"
msgid ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
@ -64,6 +70,13 @@ msgstr ""
"は、別個の領域に別個のデバイスを使用して独立して構築されているため、コンポー"
"ネント間のレプリカの分散を保証できます。"
msgid ""
"Accept a trade off of dispersion for balance in the ring builder that will "
"result in getting to balanced rings much more quickly in some cases."
msgstr ""
"リングビルダーのバランスのために、分散のトレードオフを受け入れ、場合によって"
"はバランスされたリングにより早く到達します。"
msgid ""
"Account and container databases will now be quarantined if the database "
"schema has been corrupted."
@ -71,12 +84,22 @@ msgstr ""
"データベーススキーマが壊れていると、アカウントとコンテナーのデータベースが隔"
"離されるようになりました。"
msgid ""
"Account and container replication stats logs now include ``remote_merges``, "
"the number of times a whole database was sent to another node."
msgstr ""
"アカウントとコンテナー複製の統計ログに、データベース全体が別のノードに送信さ"
"れた回数、``remote_merges`` が追加されました。"
msgid "Add Composite Ring Functionality"
msgstr "複合リング機能を追加しました。"
msgid "Add Vary headers for CORS responses."
msgstr "CORS 応答用の Vary ヘッダーを追加しました。"
msgid "Add checksum to object extended attributes."
msgstr "オブジェクトの拡張属性にチェックサムを追加します。"
msgid ""
"Add support to increase object ring partition power transparently to end "
"users and with no cluster downtime. Increasing the ring part power allows "
@ -91,6 +114,13 @@ msgstr ""
" <https://docs.openstack.org/swift/latest/ring_partpower.html>`__ を参照して"
"ください。"
msgid ""
"Added ``--swift-versions`` to ``swift-recon`` CLI to compare installed "
"versions in the cluster."
msgstr ""
"クラスターにインストールされているバージョンを比較するために、``swift-"
"recon`` CLI に ``--swift-versions`` を追加しました。"
msgid ""
"Added a \"user\" option to the drive-audit config file. Its value is used to "
"set the owner of the drive-audit recon cache."
@ -109,6 +139,14 @@ msgstr ""
msgid "Added a configurable URL base to staticweb."
msgstr "静的ウェブに対する設定可能な URL ベースを追加しました。"
msgid "Added container/object listing with prefix to InternalClient."
msgstr ""
"InternalClient のコンテナー/オブジェクトの一覧作成で接頭辞を指定できるように"
"なりました。"
msgid "Added support for inline data segments in SLO manifests."
msgstr "SLO マニフェストにおけるインラインデータセグメントをサポートしました。"
msgid ""
"Added support for per-policy proxy config options. This allows per-policy "
"affinity options to be set for use with duplicated EC policies and composite "
@ -126,6 +164,16 @@ msgstr ""
"``read_affinity``、 ``write_affinity``、 ``write_affinity_node_count``、 "
"``write_affinity_handoff_delete_count`` です。"
msgid ""
"Added support for retrieving the encryption root secret from an external key "
"management system. In practice, this is currently limited to Barbican."
msgstr ""
"外部鍵管理システムからの暗号化ルートシークレットの取得をサポートしました。現"
"在 Barbican に限定されています。"
msgid "Added symlink objects support."
msgstr "シンボリックリンクオブジェクトをサポートしました。"
msgid ""
"All 416 responses will now include a Content-Range header with an "
"unsatisfied-range value. This allows the caller to know the valid range "
@ -135,6 +183,9 @@ msgstr ""
"ようになりました。 これにより、呼び出し元はオブジェクトの有効範囲要求値を知る"
"ことができます。"
msgid "Allow the expirer to gracefully move past updating stale work items."
msgstr "expirer が安全に古い作業項目を移動できるようになりました。"
msgid "Always set Swift processes to use UTC."
msgstr "Swift プロセスがいつも UTC を使うように設定しました。"
@ -144,6 +195,18 @@ msgstr "バグ修正"
msgid "Cache all answers from nameservers in cname_lookup."
msgstr "cname_lookup でネームサーバーからのすべての応答をキャッシュします。"
msgid ""
"Changed where liberasurecode-devel for CentOS 7 is referenced and installed "
"as a dependency."
msgstr ""
"CentOS 7 での、liberasurecode-devel が参照、インストールされる場所を変更しま"
"した。"
msgid "Cleaned up logged tracebacks when talking to memcached servers."
msgstr ""
"memcached サーバーと通信するときのトレースバックログをクリーンアップしまし"
"た。"
msgid ""
"Closed a bug where ssync may have written bad fragment data in some "
"circumstances. A check was added to ensure the correct number of bytes is "
@ -228,6 +291,17 @@ msgid "Daemons using InternalClient can now be properly killed with SIGTERM."
msgstr ""
"InternalClient を使用するデーモンは、 SIGTERM を使用して適切に停止できます。"
msgid ""
"Deleting an expiring object will now cause less work in the system. The "
"number of async pending files written has been reduced for all objects and "
"greatly reduced for erasure-coded objects. This dramatically reduces the "
"burden on container servers."
msgstr ""
"期限切れオブジェクトの削除は、システムでの作業を削減します。非同期で保留され"
"ているファイルの数は、すべてのオブジェクトで削減され、消去コード付きオブジェ"
"クトでは大幅に削減されます。これにより、コンテナーサーバーの負担が劇的に軽減"
"しました。"
msgid ""
"Deprecate swift-temp-url and call python-swiftclient's implementation "
"instead. This adds python-swiftclient as an optional dependency of Swift."
@ -239,6 +313,13 @@ msgstr ""
msgid "Deprecation Notes"
msgstr "廃止予定の機能"
msgid "Disallow X-Delete-At header values equal to the X-Timestamp header."
msgstr ""
"X-Delete-At ヘッダーの値が X-Timestamp ヘッダーと等しいことを禁止します。"
msgid "Display more info on empty rings."
msgstr "空のリングに詳細情報を表示します。"
msgid "Do not follow CNAME when host is in storage_domain."
msgstr "ホストが storage_domain にある場合、CNAME に従わないようにしました。"
@ -307,6 +388,15 @@ msgstr ""
"非 ASCII 名のオブジェクトが再構築されず、再構築プロセスがハングアップする原因"
"となるオブジェクト再構成の UnicodeDecodeError が修正されました。"
msgid ""
"Fixed XML responses (eg on bulk extractions and SLO upload failures) to be "
"more correct. The enclosing \"delete\" tag was removed where it doesn't make "
"sense and replaced with \"extract\" or \"upload\" depending on the context."
msgstr ""
"XML レスポンス(一括抽出や SLO アップロードの失敗など)がより正確になりまし"
"た。意味のない \"delete\" の閉じタグは削除され、コンテキストに応じた "
"\"extract\" あるいは \"upload\" に置き換えられました。"
msgid "Fixed a bug in domain_remap when obj starts/ends with slash."
msgstr ""
"オブジェクトがスラッシュで開始/終了するときの domain_remap のバグを修正しまし"
@ -377,11 +467,25 @@ msgid "Fixed a rare infinite loop in `swift-ring-builder` while placing parts."
msgstr ""
"パーツを置いている間の`swift-ring-builder` のまれな無限ループを修正しました。"
msgid ""
"Fixed a rare issue where multiple backend timeouts could result in bad data "
"being returned to the client."
msgstr ""
"複数のバックエンドのタイムアウトが原因で、クライアントに不正なデータが返され"
"るという稀な問題を修正しました。"
msgid "Fixed a socket leak in copy middleware when a large object was copied."
msgstr ""
"ラージオブジェクトをコピーしたときの copy ミドルウェアのソケットリークを修正"
"しました。"
msgid ""
"Fixed an issue where background consistency daemon child processes would "
"deadlock waiting on the same file descriptor."
msgstr ""
"バックグラウンド一貫性デーモンの子プロセスが同じファイル記述子を待ってデッド"
"ロックする問題を修正しました。"
msgid "Fixed deadlock when logging from a tpool thread."
msgstr "tpool スレッドからのロギング時のデッドロックを修正しました。"
@ -411,6 +515,11 @@ msgstr ""
"パーティションが予想よりもずっと少なく更新される可能性がある hashes.pkl の固"
"定の非確定的なサフィックスの更新を修正しました。"
msgid "Fixed rare socket leak on range requests to erasure-coded objects."
msgstr ""
"消去コード付きオブジェクトへの範囲リクエストでの稀なソケットリークを修正しま"
"した。"
msgid ""
"Fixed regression in consolidate_hashes that occured when a new file was "
"stored to new suffix to a non-empty partition. This bug was introduced in "
@ -446,6 +555,11 @@ msgstr ""
msgid "Fixed the stats calculation in the erasure code reconstructor."
msgstr "消去コード再構成の統計計算を修正しました。"
msgid ""
"Fixed using ``swift-ring-builder set_weight`` with more than one device."
msgstr ""
"複数のデバイスでの``swift-ring-builder set_weight`` の使用を修正しました。"
msgid ""
"For further information see the `docs <https://docs.openstack.org/swift/"
"latest/overview_ring.html#module-swift.common.ring.composite_builder>`__"
@ -453,10 +567,35 @@ msgstr ""
"詳細は `docs <https://docs.openstack.org/swift/latest/overview_ring."
"html#module-swift.common.ring.composite_builder>`__ を参照してください。"
msgid "Fractional replicas are no longer allowed for erasure code policies."
msgstr "断片的な複製は、消去コードポリシーには使用できなくなりました。"
msgid ""
"GET and HEAD requests to a symlink will operate on the referenced object and "
"require appropriate permission in the target container. DELETE and PUT "
"requests will operate on the symlink object itself. POST requests are not "
"forwarded to the referenced object. POST requests sent to a symlink will "
"result in a 307 Temporary Redirect response."
msgstr ""
"シンボリックリンクに対する GET と HEAD リクエストは、参照されたオブジェクトに"
"対して操作が行われ、対象となるコンテナーへの適切な権限を必要とします。DELETE "
"と PUT リクエストは、シンボリックリンクオブジェクト自身に操作が行われます。"
"POST リクエストは参照されているオブジェクトに転送されません。シンボリックリン"
"クに対する POST リクエストの送信は、307 Temporary Redirect レスポンスになりま"
"す。"
msgid "I/O priority is now supported on AArch64 architecture."
msgstr ""
"AArch64 アーキテクチャーで I/O 優先順位がサポートされるようになりました。"
msgid ""
"If a proxy server is configured to autocreate accounts and the account "
"create fails, it will now return a server error (500) instead of Not Found "
"(404)."
msgstr ""
"プロキシサーバーにアカウント自動作成が設定されていて、アカウント作成に失敗す"
"ると、Not Found (404) ではなく、サーバーエラー (500) が返されます。"
msgid ""
"If using erasure coding with ISA-L in rs_vand mode and 5 or more parity "
"fragments, Swift will emit a warning. This is a configuration that is known "
@ -485,11 +624,31 @@ msgstr ""
"openstack-manuals プロジェクトからドキュメントコンテンツをインポートしまし"
"た。"
msgid ""
"Improved ``object-updater`` stats logging. It now tells you all of its stats "
"(successes, failures, quarantines due to bad pickles, unlinks, and errors), "
"and it tells you incremental progress every five minutes. The logging at the "
"end of a pass remains and has been expanded to also include all stats."
msgstr ""
"``object-updater`` 統計ログを改善しました。すべての統計(成功、失敗、悪いピク"
"ルスによる検疫、リンク解除、エラー)を出力し、また、5分毎に進捗状況を出力し"
"ます。成功の最後のログは残り、すべての統計情報も含むように拡張されました。"
msgid ""
"Improved performance by eliminating an unneeded directory structure hash."
msgstr ""
"不要なディレクトリ構造ハッシュを排除してパフォーマンスを向上させました。"
msgid ""
"Improved the granularity of the ring dispersion metric so that small "
"improvements after a rebalance can show changes in the dispersion number. "
"Dispersion in existing and new rings can be recalculated using the new ``--"
"recalculate`` option to ``swift-ring-builder``."
msgstr ""
"再分散後の小さな改善により分散数の変化を示すことができるように、リング分散メ"
"トリックの粒度を改善しました。既存、および新しいリングの分散は、``swift-ring-"
"builder`` の新しい ``--recalculate`` オプションを使うことで再計算されます。"
msgid "Improvements in key parts of the consistency engine"
msgstr "整合性エンジンの重要な部分を改善しました。"
@ -525,6 +684,13 @@ msgstr ""
"す。 これにより、すべてのEC .data ファイルに対して1つの inode が節約されま"
"す。 既存の .durable ファイルは削除されず、正常に動作し続けます。"
msgid ""
"Let clients request heartbeats during SLO PUTs by including the query "
"parameter ``heartbeat=on``."
msgstr ""
"SLO PUT の間、クエリーパラメーター ``heartbeat=on`` を含めることで、クライア"
"ントがハートビートを要求できるようにしました。"
msgid ""
"Listing containers in accounts with json or xml now includes a "
"`last_modified` time. This does not change any on-disk data, but simply "
@ -539,6 +705,15 @@ msgstr ""
msgid "Log correct status code for conditional requests."
msgstr "条件付きリクエストの正しいステータスコードを記録します。"
msgid ""
"Log deprecation warning for ``allow_versions`` in the container server "
"config. Configure the ``versioned_writes`` middleware in the proxy server "
"instead. This option will be ignored in a future release."
msgstr ""
"コンテナーサーバーの設定の ``allow_versions`` のために、非推奨警告ログを出力"
"します。代わりに ``versioned_writes`` ミドルウェアをプロキシサーバーに設定し"
"ます。このオプションは将来のリリースでは無視されます。"
msgid "Log the correct request type of a subrequest downstream of copy."
msgstr "サブリクエストの正しいリクエストタイプをコピーの後ろに記録します。"
@ -552,6 +727,20 @@ msgstr ""
msgid "Mirror X-Trans-Id to X-Openstack-Request-Id."
msgstr "X-Trans-Id を X-Openstack-Request-Id に写します。"
msgid ""
"Move listing formatting out to a new proxy middleware named "
"``listing_formats``. ``listing_formats`` should be just right of the first "
"proxy-logging middleware, and left of most other middlewares. If it is not "
"already present, it will be automatically inserted for you."
msgstr ""
"リストの成型を ``listing_formats`` という新しいプロキシミドルウェアに移動しま"
"した。``listing_formats`` は、最初の proxy-logging ミドルウェアの直ぐ右にあ"
"り、他のミドルウェアの左になければなりません。まだ存在しない場合は、自動的に"
"挿入されます。"
msgid "Moved Zuul v3 tox jobs into the Swift code repo."
msgstr "Zuul v3 の tox ジョブを Swift のリポジトリに移動しました。"
msgid ""
"Moved other-requirements.txt to bindep.txt. bindep.txt lists non-python "
"dependencies of Swift."
@ -585,6 +774,13 @@ msgstr ""
"Swift 2.11.0 以降で EC データを書き込んだ後は、以前のバージョンの Swift では"
"そのデータにアクセスできないことに注意してください。"
msgid ""
"Note: if you have a custom middleware that makes account or container "
"listings, it will only receive listings in JSON format."
msgstr ""
"注意: アカウントやコンテナー一覧を作るカスタムミドルウェアがある場合、受け取"
"る一覧は JSON 形式のみです。"
msgid ""
"Now Swift will use ``write_affinity_handoff_delete_count`` to define how "
"many local handoff nodes should swift send request to get more candidates "
@ -597,6 +793,12 @@ msgstr ""
"きかを定義します。デフォルト値 \"auto\" は、 Swift がレプリカの数と現在のクラ"
"スタートポロジーに基づいて自動的に数を計算することを意味します。"
msgid "Now ``swift-recon-cron`` works with conf.d configs."
msgstr "``swift-recon-cron`` は conf.d の設定で動作するようになりました。"
msgid "Object expiry improvements"
msgstr "オブジェクトの有効期限の改善"
msgid ""
"Object versioning now supports a \"history\" mode in addition to the older "
"\"stack\" mode. The difference is in how DELETE requests are handled. For "
@ -669,6 +871,13 @@ msgstr ""
"クトを削除すると、オブジェクトが適切なノードにレプリケートされる前にオブジェ"
"クトを削除すると常に 404 となりました。"
msgid ""
"Remove ``swift-temp-url`` script. The functionality has been in swiftclient "
"for a long time and this script has been deprecated since 2.10.0."
msgstr ""
"``swift-temp-url`` スクリプトを削除しました。この機能は、長い間 swiftclient "
"にありましたが、2.10.0 から非推奨でした。"
msgid "Remove deprecated ``vm_test_mode`` option."
msgstr "非推奨の ``vm_test_mode`` オプションを削除しました。"
@ -683,6 +892,20 @@ msgstr ""
"上流の CI で動作するように、func env tox 名から \"in-process-\" を削除しまし"
"た。"
msgid ""
"Removed a race condition where a POST to an SLO could modify the X-Static-"
"Large-Object metadata."
msgstr ""
"SLO クラウドへの POST が X-Static-Large-Object メタデータを変更できる、競合状"
"態を削除しました。"
msgid ""
"Removed all ``post_as_copy`` related code and configs. The option has been "
"deprecated since 2.13.0."
msgstr ""
"``post_as_copy`` に関連するすべてのコードと設定を削除しました。このオプション"
"は、2.13.0 から非推奨でした。"
msgid ""
"Removed per-device reconstruction stats. Now that the reconstructor is "
"shuffling parts before going through them, those stats no longer make sense."
@ -690,12 +913,30 @@ msgstr ""
"デバイスごとの再構成の統計を削除しました。再構成は、それらを通過する前にパー"
"ツをシャッフルするので、それらの統計はもはや意味をなしません。"
msgid ""
"Replaced ``replication_one_per_device`` by custom count defined by "
"``replication_concurrency_per_device``. The original config value is "
"deprecated, but continues to function for now. If both values are defined, "
"the old ``replication_one_per_device`` is ignored."
msgstr ""
"``replication_one_per_device`` を ``replication_concurrency_per_device`` に"
"よって定義されるカスタムカウントに置き換えました。元の設定値は非推奨となりま"
"したが、引き続き機能します。両方の値が定義された場合、古い "
"``replication_one_per_device`` は無視されます。"
msgid "Require that known-bad EC schemes be deprecated"
msgstr "既知の悪い EC スキームの要件を非推奨にしました。"
msgid "Respect server type for --md5 check in swift-recon."
msgstr "swift-recon での --md5 チェックのサーバー種別を尊重します。"
msgid ""
"Respond 400 Bad Request when Accept headers fail to parse instead of "
"returning 406 Not Acceptable."
msgstr ""
"Accept ヘッダーの解析に失敗した時、406 Not Acceptable の代わりに 400 Bad "
"Request が返されます。"
msgid ""
"Ring files now include byteorder information about the endian of the machine "
"used to generate the file, and the values are appropriately byteswapped if "
@ -746,6 +987,15 @@ msgstr ""
"ストが実行されますが、これはプロキシーサーバーの設定の \"[filterslo]\" セク"
"ションの新しい `concurrency` 設定によってオペレーターが変更できます。"
msgid ""
"Save the ring when dispersion improves, even if balance doesn't improve."
msgstr ""
"バランスが改善されない場合でも、分散が改善されたときにリングを保存します。"
msgid "Send ETag header in 206 Partial Content responses to SLO reads."
msgstr ""
"SLO 読み込みへの 206 Partial Content 応答で ETag ヘッダーを送信します。"
msgid ""
"Significant improvements to the api-ref doc available at http://developer."
"openstack.org/api-ref/object-storage/."
@ -753,6 +1003,23 @@ msgstr ""
"http://developer.openstack.org/api-ref/object-storage/ の api-ref ドキュメン"
"トに対する重要な改善が行われました。"
msgid ""
"Static Large Object (SLO) manifest may now (again) have zero-byte last "
"segments."
msgstr ""
"Static Large Object (SLO) マニフェストは、0 バイトの最終セグメントを再度持つ"
"ようになりました。"
msgid ""
"Stop logging tracebacks in the ``object-replicator`` when it runs out of "
"handoff locations."
msgstr ""
"``object-replicator`` を実行する場所を使い果たした時のトレースバックのログを"
"停止しました。"
msgid "Stopped logging tracebacks when receiving an unexpected response."
msgstr "想定外の応答を受信した時のトレースバックのログを停止しました。"
msgid "Support multi-range GETs for static large objects."
msgstr "静的ラージオブジェクトの multi-range GET をサポートしました。"
@ -767,6 +1034,19 @@ msgstr ""
msgid "Swift Release Notes"
msgstr "Swift リリースノート"
msgid ""
"Symlink objects reference one other object. They are created by creating an "
"empty object with an X-Symlink-Target header. The value of the header is of "
"the format <container>/<object>, and the target does not need to exist at "
"the time of symlink creation. Cross-account symlinks can be created by "
"including the X-Symlink-Target-Account header."
msgstr ""
"Symlink オブジェクトは他のオブジェクトを参照します。これらは、X-Symlink-"
"Target ヘッダーを持つ空のオブジェクトの作成によって作られます。ヘッダーの値"
"は <container>/<object> 形式であり、シンボリックリンク作成時にターゲットが存"
"在する必要はありません。クロスアカウントのシンボリックリンクは、X-Symlink-"
"Target-Account ヘッダーを含むことによって作成できます。"
msgid ""
"TempURLs now support a validation against a common prefix. A prefix-based "
"signature grants access to all objects which share the same prefix. This "
@ -808,6 +1088,19 @@ msgstr ""
"より、多くのドライブを搭載したサーバーでは大幅に高速なリバランスが行われま"
"す。"
msgid ""
"The ``domain_remap`` middleware now supports the ``mangle_client_paths`` "
"option. Its default \"false\" value changes ``domain_remap`` parsing to stop "
"stripping the ``path_root`` value from URL paths. If users depend on this "
"path mangling, operators should set ``mangle_client_paths`` to \"True\" "
"before upgrading."
msgstr ""
"``domain_remap`` ミドルウェアは、``mangle_client_paths`` オプションをサポート"
"しました。デフォルト値 \"false\" では、``domain_remap`` の解析で URL のパスか"
"ら ``path_root`` 値を取り除かなくなります。このパスの切り取りに依存している場"
"合は、アップグレードする前に、オペレーターは ``mangle_client_paths`` を "
"\"True\" に設定する必要があります。"
msgid ""
"The default for `object_post_as_copy` has been changed to False. The option "
"is now deprecated and will be removed in a future release. If your cluster "
@ -861,6 +1154,16 @@ msgstr ""
"により、より多くの状況でフラグメントを再構築することができ、障害からの迅速な"
"回復が可能になります。"
msgid ""
"The number of container updates on object PUTs (ie to update listings) has "
"been recomputed to be far more efficient while maintaining durability "
"guarantees. Specifically, object PUTs to erasure-coded policies will now "
"normally result in far fewer container updates."
msgstr ""
"オブジェクトの PUT によるコンテナー更新の数(つまり、一覧の更新)は、耐久性の"
"保証を維持しながら、遥かに効率的に再計算されます。具体的には、消去符号化ポリ"
"シーへのオブジェクトの PUT は、通常、コンテナーの更新が大幅に少なくなります。"
msgid ""
"The object and container server config option ``slowdown`` has been "
"deprecated in favor of the new ``objects_per_second`` and "
@ -897,6 +1200,17 @@ msgstr ""
"``swift-ring-builder`` からのデバイスの出力は、リージョン、ゾーン、IP、デバイ"
"スによって、並べ替えられます。"
msgid ""
"The tempurl digest algorithm is now configurable, and Swift added support "
"for both SHA-256 and SHA-512. Supported tempurl digests are exposed to "
"clients in ``/info``. Additionally, tempurl signatures can now be base64 "
"encoded."
msgstr ""
"tmpurl のダイジェストアルゴリズムが設定可能になり、Swift は、SHA-256 および "
"SHA-512 の両方のサポートを追加しました。サポートされる tmpurl ダイジェスト"
"は、``/info`` にてクライアントに公開されます。さらに、tempurl の署名を "
"base64 でエンコードできるようになりました。"
msgid ""
"Throttle update_auditor_status calls so it updates no more than once per "
"minute."
@ -943,6 +1257,16 @@ msgstr ""
msgid "Upgrade Notes"
msgstr "アップグレード時の注意"
msgid ""
"Upgrade impact -- during a rolling upgrade, an updated proxy server may "
"write a manifest that an out-of-date proxy server will not be able to read. "
"This will resolve itself once the upgrade completes on all nodes."
msgstr ""
"アップグレードの影響 -- ローリングアップグレード中に、更新されたプロキシサー"
"バーは、期限切れのプロキシサーバーが読み込むことができないマニフェストを書き"
"出す可能性があります。これは、すべてのノードでアップグレードが完了すると自ず"
"と解決します。"
msgid "Various other minor bug fixes and improvements."
msgstr "様々な他のマイナーなバグ修正と改善。"
@ -966,9 +1290,47 @@ msgstr ""
"ポートするためにリングモジュールで機能が有効になっています。 CLI ツールは、以"
"降のリリースで提供されます。"
msgid ""
"When requesting objects, return 404 if a tombstone is found and is newer "
"than any data found. Previous behavior was to return stale data."
msgstr ""
"オブジェクトを要求するとき、廃棄済みオブジェクト (tombstone) があり、他のデー"
"タよりも新しい場合には 404 を返します。以前の動作では、古いデータが返されてい"
"ました。"
msgid ""
"When the object auditor examines an object, it will now add any missing "
"metadata checksums."
msgstr ""
"オブジェクト監査がオブジェクトを検査するとき、欠落しているメタデータのチェッ"
"クサムを追加します。"
msgid ""
"With heartbeating turned on, the proxy will start its response immediately "
"with 202 Accepted then send a single whitespace character periodically until "
"the request completes. At that point, a final summary chunk will be sent "
"which includes a \"Response Status\" key indicating success or failure and "
"(if successful) an \"Etag\" key indicating the Etag of the resulting SLO."
msgstr ""
"ハートビートをオンにすると、プロキシは 直ぐに 202 Accepted で応答を開始し、リ"
"クエストが完了するまで一つの空白文字を定期的に送信します。その時点で、成功か"
"失敗かを示す「Response Status 」キーと、成功した場合には SLO の結果として生じ"
"る Etag を示す「Etag」キーを含む最終サマリーチャンクが送信されるようになりま"
"す。"
msgid "Write-affinity aware object deletion"
msgstr "書き込みアフィニティは、オブジェクトの削除を認識します。"
msgid ""
"X-Delete-At computation now uses X-Timestamp instead of system time. This "
"prevents clock skew causing inconsistent expiry data."
msgstr ""
"X-Delete-At の計算に、システム時間の代わりに X-Timestamp を使うようになりまし"
"た。これは、時刻の誤差によって起こる期限データの矛盾を防止します。"
msgid "``swift-ring-builder`` improvements"
msgstr "``swift-ring-builder`` の改善"
msgid ""
"cname_lookup middleware now accepts a ``nameservers`` config variable that, "
"if defined, will be used for DNS lookups instead of the system default."

View File

@ -3,15 +3,15 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift Release Notes\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-02-07 10:45+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-02-07 03:09+0000\n"
"Last-Translator: Sungjin Kang <gang.sungjin@gmail.com>\n"
"Language-Team: Korean (South Korea)\n"
"Language: ko-KR\n"
"X-Generator: Zanata 3.9.6\n"
"Language: ko_KR\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
msgid "2.10.0"

View File

@ -0,0 +1,6 @@
===================================
Queens Series Release Notes
===================================
.. release-notes::
:branch: stable/queens

View File

@ -17,6 +17,7 @@ Script for generating a form signature for use with FormPost middleware.
"""
from __future__ import print_function
import hmac
import six
from hashlib import sha1
from os.path import basename
from time import time
@ -92,8 +93,14 @@ def main(argv):
print('For example: /v1/account/container')
print(' Or: /v1/account/container/object_prefix')
return 1
sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires),
data = '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires)
if six.PY3:
data = data if isinstance(data, six.binary_type) else \
data.encode('utf8')
key = key if isinstance(key, six.binary_type) else \
key.encode('utf8')
sig = hmac.new(key, data,
sha1).hexdigest()
print(' Expires:', expires)
print('Signature:', sig)

View File

@ -23,7 +23,7 @@ from swift.common.utils import hash_path, storage_directory, \
from swift.common.ring import Ring
from swift.common.request_helpers import is_sys_meta, is_user_meta, \
strip_sys_meta_prefix, strip_user_meta_prefix, \
is_object_transient_sysmeta
is_object_transient_sysmeta, strip_object_transient_sysmeta_prefix
from swift.account.backend import AccountBroker, DATADIR as ABDATADIR
from swift.container.backend import ContainerBroker, DATADIR as CBDATADIR
from swift.obj.diskfile import get_data_dir, read_metadata, DATADIR_BASE, \
@ -191,13 +191,17 @@ def print_ring_locations(ring, datadir, account, container=None, obj=None,
'real value is set in the config file on each storage node.')
def print_db_info_metadata(db_type, info, metadata):
def print_db_info_metadata(db_type, info, metadata, drop_prefixes=False):
"""
print out data base info/metadata based on its type
:param db_type: database type, account or container
:param info: dict of data base info
:param metadata: dict of data base metadata
:param drop_prefixes: if True, strip "X-Account-Meta-",
"X-Container-Meta-", "X-Account-Sysmeta-", and
"X-Container-Sysmeta-" when displaying
User Metadata and System Metadata dicts
"""
if info is None:
raise ValueError('DB info is None')
@ -274,9 +278,13 @@ def print_db_info_metadata(db_type, info, metadata):
sys_metadata = {}
for key, (value, timestamp) in metadata.items():
if is_user_meta(db_type, key):
user_metadata[strip_user_meta_prefix(db_type, key)] = value
if drop_prefixes:
key = strip_user_meta_prefix(db_type, key)
user_metadata[key] = value
elif is_sys_meta(db_type, key):
sys_metadata[strip_sys_meta_prefix(db_type, key)] = value
if drop_prefixes:
key = strip_sys_meta_prefix(db_type, key)
sys_metadata[key] = value
else:
title = key.replace('_', '-').title()
print(' %s: %s' % (title, value))
@ -291,7 +299,7 @@ def print_db_info_metadata(db_type, info, metadata):
print('No user metadata found in db file')
def print_obj_metadata(metadata):
def print_obj_metadata(metadata, drop_prefixes=False):
"""
Print out basic info and metadata from object, as returned from
:func:`swift.obj.diskfile.read_metadata`.
@ -302,6 +310,10 @@ def print_obj_metadata(metadata):
Additional metadata is displayed unmodified.
:param metadata: dict of object metadata
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
:raises ValueError:
"""
@ -341,10 +353,16 @@ def print_obj_metadata(metadata):
for key, value in metadata.items():
if is_user_meta('Object', key):
if drop_prefixes:
key = strip_user_meta_prefix('Object', key)
user_metadata[key] = value
elif is_sys_meta('Object', key):
if drop_prefixes:
key = strip_sys_meta_prefix('Object', key)
sys_metadata[key] = value
elif is_object_transient_sysmeta(key):
if drop_prefixes:
key = strip_object_transient_sysmeta_prefix(key)
transient_sys_metadata[key] = value
else:
other_metadata[key] = value
@ -352,8 +370,8 @@ def print_obj_metadata(metadata):
def print_metadata(title, items):
print(title)
if items:
for meta_key in sorted(items):
print(' %s: %s' % (meta_key, items[meta_key]))
for key, value in sorted(items.items()):
print(' %s: %s' % (key, value))
else:
print(' No metadata found')
@ -363,7 +381,8 @@ def print_obj_metadata(metadata):
print_metadata('Other Metadata:', other_metadata)
def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False):
def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False,
drop_prefixes=False):
if db_type not in ('account', 'container'):
print("Unrecognized DB type: internal error")
raise InfoSystemExit()
@ -388,7 +407,7 @@ def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False):
raise
account = info['account']
container = info['container'] if db_type == 'container' else None
print_db_info_metadata(db_type, info, broker.metadata)
print_db_info_metadata(db_type, info, broker.metadata, drop_prefixes)
try:
ring = Ring(swift_dir, ring_name=db_type)
except Exception:
@ -398,7 +417,7 @@ def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False):
def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
policy_name=''):
policy_name='', drop_prefixes=False):
"""
Display information about an object read from the datafile.
Optionally verify the datafile content matches the ETag metadata.
@ -409,6 +428,10 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
metadata.
:param swift_dir: the path on disk to rings
:param policy_name: optionally the name to use when finding the ring
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
"""
if not os.path.exists(datafile):
print("Data file doesn't exist")
@ -458,7 +481,7 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
etag = metadata.pop('ETag', '')
length = metadata.pop('Content-Length', '')
path = metadata.get('name', '')
print_obj_metadata(metadata)
print_obj_metadata(metadata, drop_prefixes)
# Optional integrity check; it's useful, but slow.
file_len = None

View File

@ -56,12 +56,19 @@ def utf8encode(*args):
for s in args]
def utf8encodekeys(metadata):
uni_keys = [k for k in metadata if isinstance(k, six.text_type)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
def native_str_keys(metadata):
if six.PY2:
uni_keys = [k for k in metadata if isinstance(k, six.text_type)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
else:
bin_keys = [k for k in metadata if isinstance(k, six.binary_type)]
for k in bin_keys:
sv = metadata[k]
del metadata[k]
metadata[k.decode('utf-8')] = sv
def _db_timeout(timeout, db_file, call):
@ -741,7 +748,7 @@ class DatabaseBroker(object):
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
utf8encodekeys(metadata)
native_str_keys(metadata)
else:
metadata = {}
return metadata
@ -803,7 +810,7 @@ class DatabaseBroker(object):
self.db_type)
md = row[0]
md = json.loads(md) if md else {}
utf8encodekeys(md)
native_str_keys(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise

View File

@ -33,7 +33,7 @@ from swift.common.direct_client import quote
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
json, Timestamp
json, Timestamp, parse_overrides, round_robin_iter
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
@ -69,6 +69,17 @@ def quarantine_db(object_file, server_type):
renamer(object_dir, quarantine_dir, fsync=False)
def looks_like_partition(dir_name):
"""
True if the directory name is a valid partition number, False otherwise.
"""
try:
part = int(dir_name)
return part >= 0
except ValueError:
return False
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
@ -76,12 +87,13 @@ def roundrobin_datadirs(datadirs):
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:param datadirs: a list of (path, node_id, partition_filter) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
def walk_datadir(datadir, node_id, part_filter):
partitions = [pd for pd in os.listdir(datadir)
if looks_like_partition(pd) and part_filter(pd)]
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
@ -113,13 +125,12 @@ def roundrobin_datadirs(datadirs):
if e.errno != errno.ENOTEMPTY:
raise
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield next(it)
except StopIteration:
its.remove(it)
its = [walk_datadir(datadir, node_id, filt)
for datadir, node_id, filt in datadirs]
rr_its = round_robin_iter(its)
for datadir in rr_its:
yield datadir
class ReplConnection(BufferedHTTPConnection):
@ -194,6 +205,7 @@ class Replicator(Daemon):
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
def _zero_stats(self):
"""Zero out the stats."""
@ -434,6 +446,8 @@ class Replicator(Daemon):
elif 200 <= response.status < 300:
rinfo = json.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if rinfo.get('metadata', ''):
broker.update_metadata(json.loads(rinfo['metadata']))
if self._in_sync(rinfo, info, broker, local_sync):
return True
# if the difference in rowids between the two differs by
@ -619,17 +633,44 @@ class Replicator(Daemon):
return match.groups()[0]
return "UNKNOWN"
def _partition_dir_filter(self, device_id, partitions_to_replicate):
def filt(partition_dir):
partition = int(partition_dir)
if self.handoffs_only:
primary_node_ids = [
d['id'] for d in self.ring.get_part_nodes(partition)]
if device_id in primary_node_ids:
return False
if partition not in partitions_to_replicate:
return False
return True
return filt
def report_up_to_date(self, full_info):
return True
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
devices_to_replicate, partitions_to_replicate = parse_overrides(
**kwargs)
self._zero_stats()
dirs = []
ips = whataremyips(self.bind_ip)
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
if self.handoffs_only:
self.logger.warning(
'Starting replication pass with handoffs_only enabled. '
'This mode is not intended for normal '
'operation; use handoffs_only with care.')
self._local_device_ids = set()
found_local = False
for node in self.ring.devs:
@ -646,13 +687,20 @@ class Replicator(Daemon):
self.logger.warning(
_('Skipping %(device)s as it is not mounted') % node)
continue
if node['device'] not in devices_to_replicate:
self.logger.debug(
'Skipping device %s due to given arguments',
node['device'])
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
dirs.append((datadir, node['id']))
part_filt = self._partition_dir_filter(
node['id'], partitions_to_replicate)
dirs.append((datadir, node['id'], part_filt))
if not found_local:
self.logger.error("Can't find itself %s with port %s in ring "
"file, not replicating",
@ -663,6 +711,10 @@ class Replicator(Daemon):
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
if self.handoffs_only:
self.logger.warning(
'Finished replication pass with handoffs_only enabled. '
'If handoffs_only is no longer required, disable it.')
self._report_stats()
def run_forever(self, *args, **kwargs):

View File

@ -76,7 +76,9 @@ ERROR_LIMIT_DURATION = 60
def md5hash(key):
return md5(key).hexdigest()
if not isinstance(key, bytes):
key = key.encode('utf-8')
return md5(key).hexdigest().encode('ascii')
def sanitize_timeout(timeout):
@ -88,7 +90,21 @@ def sanitize_timeout(timeout):
"""
if timeout > (30 * 24 * 60 * 60):
timeout += time.time()
return timeout
return int(timeout)
def set_msg(key, flags, timeout, value):
if not isinstance(key, bytes):
raise TypeError('key must be bytes')
if not isinstance(value, bytes):
raise TypeError('value must be bytes')
return b' '.join([
b'set',
key,
str(flags).encode('ascii'),
str(timeout).encode('ascii'),
str(len(value)).encode('ascii'),
]) + (b'\r\n' + value + b'\r\n')
class MemcacheConnectionError(Exception):
@ -253,13 +269,15 @@ class MemcacheRing(object):
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('set %s %d %d %s\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
sock.sendall(set_msg(key, flags, timeout, value))
# Wait for the set to complete
fp.readline()
self._return_conn(server, fp, sock)
@ -281,14 +299,14 @@ class MemcacheRing(object):
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('get %s\r\n' % key)
sock.sendall(b'get ' + key + b'\r\n')
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'END':
if line[0].upper() == b'END':
break
if line[0].upper() == 'VALUE' and line[1] == key:
if line[0].upper() == b'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
@ -297,7 +315,7 @@ class MemcacheRing(object):
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
value = json.loads(value.decode('ascii'))
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
@ -323,28 +341,31 @@ class MemcacheRing(object):
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = 'incr'
command = b'incr'
if delta < 0:
command = 'decr'
delta = str(abs(int(delta)))
command = b'decr'
delta = str(abs(int(delta))).encode('ascii')
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('%s %s %s\r\n' % (command, key, delta))
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'NOT_FOUND':
if line[0].upper() == b'NOT_FOUND':
add_val = delta
if command == 'decr':
add_val = '0'
sock.sendall('add %s %d %d %s\r\n%s\r\n' %
(key, 0, timeout, len(add_val), add_val))
if command == b'decr':
add_val = b'0'
sock.sendall(b' '.join([
b'add', key, b'0', str(timeout).encode('ascii'),
str(len(add_val)).encode('ascii')
]) + b'\r\n' + add_val + b'\r\n')
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_STORED':
sock.sendall('%s %s %s\r\n' % (command, key,
delta))
if line[0].upper() == b'NOT_STORED':
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
@ -382,7 +403,7 @@ class MemcacheRing(object):
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('delete %s\r\n' % key)
sock.sendall(b'delete ' + key + b'\r\n')
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
@ -409,7 +430,7 @@ class MemcacheRing(object):
"""
server_key = md5hash(server_key)
timeout = sanitize_timeout(time)
msg = ''
msg = []
for key, value in mapping.items():
key = md5hash(key)
flags = 0
@ -417,14 +438,13 @@ class MemcacheRing(object):
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
msg += ('set %s %d %d %s\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
msg.append(set_msg(key, flags, timeout, value))
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(msg)
sock.sendall(b''.join(msg))
# Wait for the set to complete
for line in range(len(mapping)):
fp.readline()
@ -447,15 +467,15 @@ class MemcacheRing(object):
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall('get %s\r\n' % ' '.join(keys))
sock.sendall(b'get ' + b' '.join(keys) + b'\r\n')
line = fp.readline().strip().split()
responses = {}
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'END':
if line[0].upper() == b'END':
break
if line[0].upper() == 'VALUE':
if line[0].upper() == b'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
@ -464,7 +484,7 @@ class MemcacheRing(object):
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
value = json.loads(value.decode('ascii'))
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()

View File

@ -314,7 +314,7 @@ class Bulk(object):
resp = head_cont_req.get_response(self.app)
if resp.is_success:
return False
if resp.status_int == 404:
if resp.status_int == HTTP_NOT_FOUND:
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'

View File

@ -353,7 +353,6 @@ class SegmentedIterable(object):
self.current_resp = None
def _coalesce_requests(self):
start_time = time.time()
pending_req = pending_etag = pending_size = None
try:
for seg_dict in self.listing_iter:
@ -376,11 +375,6 @@ class SegmentedIterable(object):
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
# The "multipart-manifest=get" query param ensures that the
# segment is a plain old object, not some flavor of large
# object; therefore, its etag is its MD5sum and hence we can
@ -433,108 +427,119 @@ class SegmentedIterable(object):
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
def _internal_iter(self):
def _requests_to_bytes_iter(self):
# Take the requests out of self._coalesce_requests, actually make
# the requests, and generate the bytes from the responses.
#
# Yields 2-tuples (segment-name, byte-chunk). The segment name is
# used for logging.
for data_or_req, seg_etag, seg_size in self._coalesce_requests():
if isinstance(data_or_req, bytes): # ugly, awful overloading
yield ('data segment', data_or_req)
continue
seg_req = data_or_req
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
yield (seg_req.path, chunk)
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
def _byte_counting_iter(self):
# Checks that we give the client the right number of bytes. Raises
# SegmentError if the number of bytes is wrong.
bytes_left = self.response_body_length
try:
for data_or_req, seg_etag, seg_size in self._coalesce_requests():
if isinstance(data_or_req, bytes):
chunk = data_or_req # ugly, awful overloading
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
continue
seg_req = data_or_req
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_req.path,
'left': bytes_left})
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
if bytes_left:
for seg_name, chunk in self._requests_to_bytes_iter():
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
raise SegmentError(
'Not enough bytes for %s; closing connection' % self.name)
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_name,
'left': bytes_left})
if bytes_left:
raise SegmentError(
'Not enough bytes for %s; closing connection' % self.name)
def _time_limited_iter(self):
# Makes sure a GET response doesn't take more than self.max_get_time
# seconds to process. Raises an exception if things take too long.
start_time = time.time()
for chunk in self._byte_counting_iter():
now = time.time()
yield chunk
if now - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
def _internal_iter(self):
# Top level of our iterator stack: pass bytes through; catch and
# handle exceptions.
try:
for chunk in self._time_limited_iter():
yield chunk
except (ListingIterError, SegmentError) as err:
self.logger.error(err)
if not self.validated_first_segment:

View File

@ -290,8 +290,8 @@ def _resp_status_property():
else:
if isinstance(value, six.text_type):
value = value.encode('utf-8')
self.status_int = int(value.split(' ', 1)[0])
self.explanation = self.title = value.split(' ', 1)[1]
self.status_int = int(value.split(b' ', 1)[0])
self.explanation = self.title = value.split(b' ', 1)[1]
return property(getter, setter,
doc="Retrieve and set the Response status, e.g. '200 OK'")

View File

@ -45,7 +45,7 @@ import ctypes
import ctypes.util
from optparse import OptionParser
from tempfile import mkstemp, NamedTemporaryFile
from tempfile import gettempdir, mkstemp, NamedTemporaryFile
import glob
import itertools
import stat
@ -78,8 +78,7 @@ from six.moves.urllib.parse import urlparse as stdlib_urlparse
from swift import gettext_ as _
import swift.common.exceptions
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.common.http import is_server_error
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.linkat import linkat
@ -940,11 +939,16 @@ class Timestamp(object):
:param delta: deca-microsecond difference from the base timestamp
param, an int
"""
if isinstance(timestamp, bytes):
timestamp = timestamp.decode('ascii')
if isinstance(timestamp, six.string_types):
parts = timestamp.split('_', 1)
self.timestamp = float(parts.pop(0))
if parts:
self.offset = int(parts[0], 16)
base, base_offset = timestamp.partition('_')[::2]
self.timestamp = float(base)
if '_' in base_offset:
raise ValueError('invalid literal for int() with base 16: '
'%r' % base_offset)
if base_offset:
self.offset = int(base_offset, 16)
else:
self.offset = 0
else:
@ -1661,24 +1665,6 @@ class StatsdClient(object):
sample_rate)
def server_handled_successfully(status_int):
"""
True for successful responses *or* error codes that are not Swift's fault,
False otherwise. For example, 500 is definitely the server's fault, but
412 is an error code (4xx are all errors) that is due to a header the
client sent.
If one is tracking error rates to monitor server health, one would be
advised to use a function like this one, lest a client cause a flurry of
404s or 416s and make a spurious spike in your errors graph.
"""
return (is_success(status_int) or
is_redirection(status_int) or
status_int == HTTP_NOT_FOUND or
status_int == HTTP_PRECONDITION_FAILED or
status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE)
def timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
@ -1691,7 +1677,15 @@ def timing_stats(**dec_kwargs):
def _timing_stats(ctrl, *args, **kwargs):
start_time = time.time()
resp = func(ctrl, *args, **kwargs)
if server_handled_successfully(resp.status_int):
# .timing is for successful responses *or* error codes that are
# not Swift's fault. For example, 500 is definitely the server's
# fault, but 412 is an error code (4xx are all errors) that is
# due to a header the client sent.
#
# .errors.timing is for failures that *are* Swift's fault.
# Examples include 507 for an unmounted drive or 500 for an
# unhandled exception.
if not is_server_error(resp.status_int):
ctrl.logger.timing_since(method + '.timing',
start_time, **dec_kwargs)
else:
@ -3416,6 +3410,15 @@ def get_valid_utf8_str(str_or_unicode):
return valid_unicode_str.encode('utf-8')
class Everything(object):
"""
A container that contains everything. If "e" is an instance of
Everything, then "x in e" is true for all x.
"""
def __contains__(self, element):
return True
def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
@ -3426,6 +3429,27 @@ def list_from_csv(comma_separated_str):
return []
def parse_overrides(devices='', partitions='', **kwargs):
"""
Given daemon kwargs parse out device and partition overrides or Everything.
:returns: a tuple of (devices, partitions) which an used like containers to
check if a given partition (integer) or device (string) is "in"
the collection on which we should act.
"""
devices = list_from_csv(devices)
if not devices:
devices = Everything()
partitions = [
int(part) for part in
list_from_csv(partitions)]
if not partitions:
partitions = Everything()
return devices, partitions
def csv_append(csv_string, item):
"""
Appends an item to a comma-separated string.
@ -4364,6 +4388,43 @@ def modify_priority(conf, logger):
_ioprio_set(io_class, io_priority)
def o_tmpfile_in_path_supported(dirpath):
if not hasattr(os, 'O_TMPFILE'):
return False
testfile = os.path.join(dirpath, ".o_tmpfile.test")
hasO_TMPFILE = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_TMPFILE)
except OSError as e:
if e.errno == errno.EINVAL:
hasO_TMPFILE = False
else:
raise Exception("Error on '%(path)s' while checking "
"O_TMPFILE: '%(ex)s'",
{'path': dirpath, 'ex': e})
except Exception as e:
raise Exception("Error on '%(path)s' while checking O_TMPFILE: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
finally:
if fd is not None:
os.close(fd)
# ensure closing the fd will actually remove the file
if os.path.isfile(testfile):
return False
return hasO_TMPFILE
def o_tmpfile_in_tmpdir_supported():
return o_tmpfile_in_path_supported(gettempdir())
def o_tmpfile_supported():
"""
Returns True if O_TMPFILE flag is supported.
@ -4581,3 +4642,17 @@ class PipeMutex(object):
class ThreadSafeSysLogHandler(SysLogHandler):
def createLock(self):
self.lock = PipeMutex()
def round_robin_iter(its):
"""
Takes a list of iterators, yield an element from each in a round-robin
fashion until all of them are exhausted.
:param its: list of iterators
"""
while its:
for it in its:
try:
yield next(it)
except StopIteration:
its.remove(it)

View File

@ -119,6 +119,8 @@ class ConfigString(NamedConfigLoader):
}
self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
# Defaults don't need interpolation (crazy PasteDeploy...)
self.parser.defaults = lambda: dict(self.parser._defaults, **defaults)
self.parser.readfp(self.contents)

View File

@ -38,7 +38,7 @@ from swift.common.constraints import valid_timestamp, check_utf8, check_drive
from swift.common import constraints
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import HTTP_NOT_FOUND, is_success
from swift.common.http import HTTP_NO_CONTENT, HTTP_NOT_FOUND, is_success
from swift.common.middleware import listing_formats
from swift.common.storage_policy import POLICIES
from swift.common.base_storage_server import BaseStorageServer
@ -522,7 +522,7 @@ class ContainerController(BaseStorageServer):
content_type=out_content_type, charset='utf-8')
ret.last_modified = math.ceil(float(resp_headers['X-PUT-Timestamp']))
if not ret.body:
ret.status_int = 204
ret.status_int = HTTP_NO_CONTENT
return ret
@public

View File

@ -10,16 +10,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-02-09 02:13+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2018-01-27 09:17+0000\n"
"PO-Revision-Date: 2018-02-16 07:31+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language: en-GB\n"
"Language: en_GB\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: English (United Kingdom)\n"
msgid ""
@ -672,10 +672,6 @@ msgstr "Exception in top-level replication loop"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exception in top-level reconstruction loop"
#, python-format
msgid "Exception while deleting container %(container)s %(err)s"
msgstr "Exception while deleting container %(container)s %(err)s"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exception with %(ip)s:%(port)s/%(device)s"
@ -925,14 +921,6 @@ msgstr "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
#, python-format
msgid ""
"Pass beginning; %(containers)s possible containers; %(objects)s possible "
"objects"
msgstr ""
"Pass beginning; %(containers)s possible containers; %(objects)s possible "
"objects"
#, python-format
msgid "Pass completed in %(time)ds; %(objects)d objects expired"
msgstr "Pass completed in %(time)ds; %(objects)d objects expired"
@ -976,6 +964,10 @@ msgstr "Problem with fragment response: %s"
msgid "Profiling Error: %s"
msgstr "Profiling Error: %s"
#, python-format
msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
msgstr "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""

View File

@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-02-09 02:13+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -18,7 +18,7 @@ msgstr ""
"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Spanish\n"
msgid ""
@ -620,10 +620,6 @@ msgstr "Excepción en el bucle de réplica de nivel superior"
msgid "Exception in top-levelreconstruction loop"
msgstr "Excepción en el bucle de reconstrucción de nivel superior"
#, python-format
msgid "Exception while deleting container %(container)s %(err)s"
msgstr "Excepción al suprimir el contenedor %(container)s %(err)s"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Excepción con %(ip)s:%(port)s/%(device)s"
@ -860,14 +856,6 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs"
#, python-format
msgid ""
"Pass beginning; %(containers)s possible containers; %(objects)s possible "
"objects"
msgstr ""
"Inicio del paso; %(containers)s posibles contenedores; %(objects)s posibles "
"objetos"
#, python-format
msgid "Pass completed in %(time)ds; %(objects)d objects expired"
msgstr "Paso completado en %(time)ds; %(objects)d objetos caducados"

View File

@ -10,16 +10,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-01-31 06:08+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:43+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
"Language: ko-KR\n"
"Language: ko_KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Korean (South Korea)\n"
msgid ""

View File

@ -12,16 +12,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-01-31 06:08+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:43+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
"Language: pt-BR\n"
"Language: pt_BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Portuguese (Brazil)\n"
msgid ""

View File

@ -9,16 +9,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-01-31 06:08+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:43+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
"Language: tr-TR\n"
"Language: tr_TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Turkish (Turkey)\n"
msgid ""

View File

@ -9,16 +9,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-01-31 06:08+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:43+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
"Language: zh-CN\n"
"Language: zh_CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Chinese (China)\n"
msgid ""

View File

@ -8,16 +8,16 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-01-31 06:08+0000\n"
"POT-Creation-Date: 2018-02-28 19:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:43+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
"Language: zh-TW\n"
"Language: zh_TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.9.6\n"
"X-Generator: Zanata 4.3.3\n"
"Language-Team: Chinese (Taiwan)\n"
msgid ""

View File

@ -27,7 +27,7 @@ from eventlet import Timeout
from swift.obj import diskfile, replicator
from swift.common.utils import (
get_logger, ratelimit_sleep, dump_recon_cache, list_from_csv, listdir,
unlink_paths_older_than, readconf, config_auto_int_value)
unlink_paths_older_than, readconf, config_auto_int_value, round_robin_iter)
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist,\
DiskFileDeleted, DiskFileExpired
from swift.common.daemon import Daemon
@ -120,18 +120,17 @@ class AuditorWorker(object):
total_quarantines = 0
total_errors = 0
time_auditing = 0
# TODO: we should move audit-location generation to the storage policy,
# as we may (conceivably) have a different filesystem layout for each.
# We'd still need to generate the policies to audit from the actual
# directories found on-disk, and have appropriate error reporting if we
# find a directory that doesn't correspond to any known policy. This
# will require a sizable refactor, but currently all diskfile managers
# can find all diskfile locations regardless of policy -- so for now
# just use Policy-0's manager.
all_locs = (self.diskfile_router[POLICIES[0]]
# get AuditLocations for each policy
loc_generators = []
for policy in POLICIES:
loc_generators.append(
self.diskfile_router[policy]
.object_audit_location_generator(
device_dirs=device_dirs,
policy, device_dirs=device_dirs,
auditor_type=self.auditor_type))
all_locs = round_robin_iter(loc_generators)
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
@ -192,8 +191,11 @@ class AuditorWorker(object):
self.logger.info(
_('Object audit stats: %s') % json.dumps(self.stats_buckets))
# Unset remaining partitions to not skip them in the next run
diskfile.clear_auditor_status(self.devices, self.auditor_type)
for policy in POLICIES:
# Unset remaining partitions to not skip them in the next run
self.diskfile_router[policy].clear_auditor_status(
policy,
self.auditor_type)
def record_stats(self, obj_size):
"""
@ -319,7 +321,8 @@ class ObjectAuditor(Daemon):
zero_byte_only_at_fps=zero_byte_only_at_fps)
worker.audit_all_objects(mode=mode, device_dirs=device_dirs)
def fork_child(self, zero_byte_fps=False, **kwargs):
def fork_child(self, zero_byte_fps=False, sleep_between_zbf_scanner=False,
**kwargs):
"""Child execution"""
pid = os.fork()
if pid:
@ -328,6 +331,8 @@ class ObjectAuditor(Daemon):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if zero_byte_fps:
kwargs['zero_byte_fps'] = self.conf_zero_byte_fps
if sleep_between_zbf_scanner:
self._sleep()
try:
self.run_audit(**kwargs)
except Exception as e:
@ -391,8 +396,9 @@ class ObjectAuditor(Daemon):
len(pids) > 1 and not once:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
zbf_pid = self.fork_child(zero_byte_fps=True,
sleep_between_zbf_scanner=True,
**kwargs)
pids.add(zbf_pid)
pids.discard(pid)

View File

@ -83,8 +83,8 @@ PICKLE_PROTOCOL = 2
DEFAULT_RECLAIM_AGE = timedelta(weeks=1).total_seconds()
HASH_FILE = 'hashes.pkl'
HASH_INVALIDATIONS_FILE = 'hashes.invalid'
METADATA_KEY = 'user.swift.metadata'
METADATA_CHECKSUM_KEY = 'user.swift.metadata_checksum'
METADATA_KEY = b'user.swift.metadata'
METADATA_CHECKSUM_KEY = b'user.swift.metadata_checksum'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
@ -131,6 +131,26 @@ def _encode_metadata(metadata):
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
def _decode_metadata(metadata):
"""
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
"""
if six.PY2:
def to_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def to_str(item):
if isinstance(item, six.binary_type):
return item.decode('utf8', 'surrogateescape')
return item
return dict(((to_str(k), to_str(v)) for k, v in metadata.items()))
def read_metadata(fd, add_missing_checksum=False):
"""
Helper function to read the pickled metadata from an object file.
@ -144,8 +164,8 @@ def read_metadata(fd, add_missing_checksum=False):
key = 0
try:
while True:
metadata += xattr.getxattr(fd, '%s%s' % (METADATA_KEY,
(key or '')))
metadata += xattr.getxattr(
fd, METADATA_KEY + str(key or '').encode('ascii'))
key += 1
except (IOError, OSError) as e:
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
@ -173,7 +193,7 @@ def read_metadata(fd, add_missing_checksum=False):
logging.error("Error adding metadata: %s" % e)
if metadata_checksum:
computed_checksum = hashlib.md5(metadata).hexdigest()
computed_checksum = hashlib.md5(metadata).hexdigest().encode('ascii')
if metadata_checksum != computed_checksum:
raise DiskFileBadMetadataChecksum(
"Metadata checksum mismatch for %s: "
@ -183,7 +203,11 @@ def read_metadata(fd, add_missing_checksum=False):
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
return _encode_metadata(pickle.loads(metadata))
if six.PY2:
metadata = pickle.loads(metadata)
else:
metadata = pickle.loads(metadata, encoding='bytes')
return _decode_metadata(metadata)
def write_metadata(fd, metadata, xattr_size=65536):
@ -194,11 +218,11 @@ def write_metadata(fd, metadata, xattr_size=65536):
:param metadata: metadata to write
"""
metastr = pickle.dumps(_encode_metadata(metadata), PICKLE_PROTOCOL)
metastr_md5 = hashlib.md5(metastr).hexdigest()
metastr_md5 = hashlib.md5(metastr).hexdigest().encode('ascii')
key = 0
try:
while metastr:
xattr.setxattr(fd, '%s%s' % (METADATA_KEY, key or ''),
xattr.setxattr(fd, METADATA_KEY + str(key or '').encode('ascii'),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
@ -368,9 +392,10 @@ def invalidate_hash(suffix_dir):
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
with open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + "\n")
if not isinstance(suffix, bytes):
suffix = suffix.encode('utf-8')
with lock_path(partition_dir), open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + b"\n")
def relink_paths(target_path, new_target_path, check_existing=False):
@ -428,18 +453,20 @@ class AuditLocation(object):
return str(self.path)
def object_audit_location_generator(devices, mount_check=True, logger=None,
device_dirs=None, auditor_type="ALL"):
def object_audit_location_generator(devices, datadir, mount_check=True,
logger=None, device_dirs=None,
auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory if device_dirs isn't set. If
device_dirs is set, only yield AuditLocation for the objects under the
entries in device_dirs. The AuditLocation only knows the path to the hash
directory, not to the .data file therein (if any). This is to avoid a
double listdir(hash_dir); the DiskFile object will always do one, so
we don't.
objects stored under that directory for the given datadir (policy),
if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
for the objects under the entries in device_dirs. The AuditLocation only
knows the path to the hash directory, not to the .data file therein
(if any). This is to avoid a double listdir(hash_dir); the DiskFile object
will always do one, so we don't.
:param devices: parent directory of the devices to be audited
:param datadir: objects directory
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
@ -455,6 +482,7 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
base, policy = split_policy_string(datadir)
for device in device_dirs:
if not check_drive(devices, device, mount_check):
if logger:
@ -462,55 +490,37 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
'Skipping %s as it is not %s', device,
'mounted' if mount_check else 'a dir')
continue
# loop through object dirs for all policies
device_dir = os.path.join(devices, device)
try:
dirs = os.listdir(device_dir)
except OSError as e:
if logger:
logger.debug(
_('Skipping %(dir)s: %(err)s') % {'dir': device_dir,
'err': e.strerror})
datadir_path = os.path.join(devices, device, datadir)
if not os.path.exists(datadir_path):
continue
for dir_ in dirs:
if not dir_.startswith(DATADIR_BASE):
continue
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
base, policy = split_policy_string(dir_)
except PolicyError as e:
if logger:
logger.warning(_('Directory %(directory)r does not map '
'to a valid policy (%(error)s)') % {
'directory': dir_, 'error': e})
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
datadir_path = os.path.join(devices, device, dir_)
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
suffixes = listdir(part_path)
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type)
update_auditor_status(datadir_path, logger, [], auditor_type)
def get_auditor_status(datadir_path, logger, auditor_type):
@ -564,15 +574,13 @@ def update_auditor_status(datadir_path, logger, partitions, auditor_type):
{'auditor_status': auditor_status, 'err': e})
def clear_auditor_status(devices, auditor_type="ALL"):
for device in os.listdir(devices):
for dir_ in os.listdir(os.path.join(devices, device)):
if not dir_.startswith("objects"):
continue
datadir_path = os.path.join(devices, device, dir_)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
def clear_auditor_status(devices, datadir, auditor_type="ALL"):
device_dirs = listdir(devices)
for device in device_dirs:
datadir_path = os.path.join(devices, device, datadir)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
def strip_self(f):
@ -608,10 +616,10 @@ class DiskFileRouter(object):
self.policy_to_manager = {}
for policy in POLICIES:
manager_cls = self.policy_type_to_manager_cls[policy.policy_type]
self.policy_to_manager[policy] = manager_cls(*args, **kwargs)
self.policy_to_manager[int(policy)] = manager_cls(*args, **kwargs)
def __getitem__(self, policy):
return self.policy_to_manager[policy]
return self.policy_to_manager[int(policy)]
class BaseDiskFileManager(object):
@ -1315,15 +1323,22 @@ class BaseDiskFileManager(object):
pipe_size=self.pipe_size,
use_linkat=self.use_linkat, **kwargs)
def object_audit_location_generator(self, device_dirs=None,
def clear_auditor_status(self, policy, auditor_type="ALL"):
datadir = get_data_dir(policy)
clear_auditor_status(self.devices, datadir, auditor_type)
def object_audit_location_generator(self, policy, device_dirs=None,
auditor_type="ALL"):
"""
Yield an AuditLocation for all objects stored under device_dirs.
:param policy: the StoragePolicy instance
:param device_dirs: directory of target device
:param auditor_type: either ALL or ZBF
"""
return object_audit_location_generator(self.devices, self.mount_check,
datadir = get_data_dir(policy)
return object_audit_location_generator(self.devices, datadir,
self.mount_check,
self.logger, device_dirs,
auditor_type)

View File

@ -39,8 +39,8 @@ MAX_OBJECTS_TO_CACHE = 100000
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
Daemon that queries the internal hidden task accounts to discover objects
that need to be deleted.
:param conf: The daemon configuration.
"""
@ -49,13 +49,9 @@ class ObjectExpirer(Daemon):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
conf_path, 'Swift Object Expirer', request_tries)
self.read_conf_for_queue_access(swift)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
@ -65,13 +61,29 @@ class ObjectExpirer(Daemon):
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
# This option defines how long an un-processable expired object
# marker will be retried before it is abandoned. It is not coupled
# with the tombstone reclaim age in the consistency engine.
self.reclaim_age = int(conf.get('reclaim_age', 604800))
def read_conf_for_queue_access(self, swift):
self.expiring_objects_account = \
(self.conf.get('auto_create_account_prefix') or '.') + \
(self.conf.get('expiring_objects_account_name') or
'expiring_objects')
# This is for common parameter with general task queue in future
self.task_container_prefix = ''
self.ic_conf_path = \
self.conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(self.conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
self.ic_conf_path, 'Swift Object Expirer', request_tries)
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
@ -95,6 +107,20 @@ class ObjectExpirer(Daemon):
'time': elapsed, 'objects': self.report_objects})
self.report_last_time = time()
def parse_task_obj(self, task_obj):
"""
:param task_obj: a task object name in format of
"<timestamp>-<target_account>/<target_container>" +
"/<target_obj>"
:return: 4-tuples of (delete_at_time, target_account, target_container,
target_obj)
"""
timestamp, target_path = task_obj.split('-', 1)
timestamp = Timestamp(timestamp)
target_account, target_container, target_obj = \
split_path('/' + target_path, 3, 3, True)
return timestamp, target_account, target_container, target_obj
def round_robin_order(self, task_iter):
"""
Change order of expiration tasks to avoid deleting objects in a
@ -119,9 +145,10 @@ class ObjectExpirer(Daemon):
target_account, target_container, _junk = \
split_path('/' + delete_task['target_path'], 3, 3, True)
cache_key = '%s/%s' % (target_account, target_container)
# sanity
except ValueError:
self.logger.exception('Unexcepted error handling task %r' %
delete_task)
self.logger.error('Unexcepted error handling task %r' %
delete_task)
continue
obj_cache[cache_key].append(delete_task)
@ -135,45 +162,82 @@ class ObjectExpirer(Daemon):
for task in dump_obj_cache_in_round_robin():
yield task
def iter_task_containers_to_expire(self):
def hash_mod(self, name, divisor):
"""
Yields container name under the expiring_objects_account if
the container name (i.e. timestamp) is past.
:param name: a task object name
:param divisor: a divisor number
:return: an integer to decide which expirer is assigned to the task
"""
for c in self.swift.iter_containers(self.expiring_objects_account):
# md5 is only used for shuffling mod
return int(hashlib.md5(name).hexdigest(), 16) % divisor
def iter_task_accounts_to_expire(self):
"""
Yields (task_account, my_index, divisor).
my_index and divisor is used to assign task obj to only one
expirer. In expirer method, expirer calculates assigned index for each
expiration task. The assigned index is in [0, 1, ..., divisor - 1].
Expirers have their own "my_index" for each task_account. Expirer whose
"my_index" is equal to the assigned index executes the task. Because
each expirer have different "my_index", task objects are executed by
only one expirer.
"""
if self.processes > 0:
yield self.expiring_objects_account, self.process, self.processes
else:
yield self.expiring_objects_account, 0, 1
def delete_at_time_of_task_container(self, task_container):
"""
get delete_at timestamp from task_container name
"""
# task_container name is timestamp
return Timestamp(task_container)
def iter_task_containers_to_expire(self, task_account):
"""
Yields task_container names under the task_account if the delete at
timestamp of task_container is past.
"""
for c in self.swift.iter_containers(task_account,
prefix=self.task_container_prefix):
task_container = str(c['name'])
timestamp = Timestamp(task_container)
timestamp = self.delete_at_time_of_task_container(task_container)
if timestamp > Timestamp.now():
break
yield task_container
def iter_task_to_expire(self, task_containers):
def iter_task_to_expire(self, task_account_container_list,
my_index, divisor):
"""
Yields task expire info dict which consists of task_container,
target_path, timestamp_to_delete, and target_path
Yields task expire info dict which consists of task_account,
task_container, task_object, timestamp_to_delete, and target_path
"""
for task_container in task_containers:
for o in self.swift.iter_objects(self.expiring_objects_account,
task_container):
for task_account, task_container in task_account_container_list:
for o in self.swift.iter_objects(task_account, task_container):
task_object = o['name'].encode('utf8')
delete_timestamp, target_path = task_object.split('-', 1)
delete_timestamp = Timestamp(delete_timestamp)
try:
delete_timestamp, target_account, target_container, \
target_object = self.parse_task_obj(task_object)
except ValueError:
self.logger.exception('Unexcepted error handling task %r' %
task_object)
continue
if delete_timestamp > Timestamp.now():
# we shouldn't yield the object that doesn't reach
# the expiration date yet.
break
if self.processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (task_container, task_object)).
hexdigest(), 16)
if obj_process % self.processes != self.process:
continue
# Only one expirer daemon assigned for one task
if self.hash_mod('%s/%s' % (task_container, task_object),
divisor) != my_index:
continue
yield {'task_container': task_container,
yield {'task_account': task_account,
'task_container': task_container,
'task_object': task_object,
'target_path': target_path,
'target_path': '/'.join([
target_account, target_container, target_object]),
'delete_timestamp': delete_timestamp}
def run_once(self, *args, **kwargs):
@ -193,36 +257,55 @@ class ObjectExpirer(Daemon):
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; '
'%(containers)s possible containers; '
'%(objects)s possible objects') % {
'containers': containers, 'objects': objects})
task_account_container_list_to_delete = list()
for task_account, my_index, divisor in \
self.iter_task_accounts_to_expire():
container_count, obj_count = \
self.swift.get_account_info(task_account)
task_containers = list(self.iter_task_containers_to_expire())
# the task account is skipped if there are no task container
if not container_count:
continue
# delete_task_iter is a generator to yield a dict of
# task_container, task_object, delete_timestamp, target_path
# to handle delete actual object and pop the task from the queue.
delete_task_iter = self.round_robin_order(
self.iter_task_to_expire(task_containers))
self.logger.info(_(
'Pass beginning for task account %(account)s; '
'%(container_count)s possible containers; '
'%(obj_count)s possible objects') % {
'account': task_account,
'container_count': container_count,
'obj_count': obj_count})
for delete_task in delete_task_iter:
pool.spawn_n(self.delete_object, **delete_task)
task_account_container_list = \
[(task_account, task_container) for task_container in
self.iter_task_containers_to_expire(task_account)]
task_account_container_list_to_delete.extend(
task_account_container_list)
# delete_task_iter is a generator to yield a dict of
# task_account, task_container, task_object, delete_timestamp,
# target_path to handle delete actual object and pop the task
# from the queue.
delete_task_iter = \
self.round_robin_order(self.iter_task_to_expire(
task_account_container_list, my_index, divisor))
for delete_task in delete_task_iter:
pool.spawn_n(self.delete_object, **delete_task)
pool.waitall()
for container in task_containers:
for task_account, task_container in \
task_account_container_list_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
task_account, task_container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(container)s '
'%(err)s') % {'container': container,
'err': str(err)})
_('Exception while deleting container %(account)s '
'%(container)s %(err)s') % {
'account': task_account,
'container': task_container, 'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
@ -277,7 +360,7 @@ class ObjectExpirer(Daemon):
'process must be less than processes')
def delete_object(self, target_path, delete_timestamp,
task_container, task_object):
task_account, task_container, task_object):
start_time = time()
try:
try:
@ -289,33 +372,34 @@ class ObjectExpirer(Daemon):
if float(delete_timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(task_container, task_object)
self.pop_queue(task_account, task_container, task_object)
self.report_objects += 1
self.logger.increment('objects')
except UnexpectedResponse as err:
self.logger.increment('errors')
self.logger.error(
'Unexpected response while deleting object %(container)s '
'%(obj)s: %(err)s' % {
'container': task_container, 'obj': task_object,
'err': str(err.resp.status_int)})
'Unexpected response while deleting object '
'%(account)s %(container)s %(obj)s: %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err.resp.status_int)})
self.logger.debug(err.resp.body)
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
'Exception while deleting object %(container)s %(obj)s'
' %(err)s' % {'container': task_container,
'obj': task_object, 'err': str(err)})
'Exception while deleting object %(account)s %(container)s '
'%(obj)s %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err)})
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, container, obj):
def pop_queue(self, task_account, task_container, task_object):
"""
Issue a delete object request to the container for the expiring object
queue entry.
Issue a delete object request to the task_container for the expiring
object queue entry.
"""
direct_delete_container_entry(self.swift.container_ring,
self.expiring_objects_account,
container, obj)
direct_delete_container_entry(self.swift.container_ring, task_account,
task_container, task_object)
def delete_actual_object(self, actual_obj, timestamp):
"""

View File

@ -169,7 +169,8 @@ class ObjectReplicator(Daemon):
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
proc = ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
@ -177,9 +178,24 @@ class ObjectReplicator(Daemon):
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except GreenletExit:
self.logger.error(_("Killing by lockup detector"))
if proc:
# Assume rsync is still responsive and give it a chance
# to shut down gracefully
proc.terminate()
# Final good-faith effort to clean up the process table.
# Note that this blocks, but worst-case we wait for the
# lockup detector to come around and kill us. This can
# happen if the process is stuck down in kernel-space
# waiting on I/O or something.
proc.wait()
raise
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
if proc:
proc.kill()
proc.wait()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
@ -729,11 +745,6 @@ class ObjectReplicator(Daemon):
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = check_drive(self.devices_dir, job['device'],
self.mount_check)
if not dev_path:

View File

@ -15,6 +15,7 @@
""" Object Server for Swift """
import six
import six.moves.cPickle as pickle
import json
import os
@ -170,7 +171,9 @@ class ObjectController(BaseStorageServer):
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
if six.PY2:
socket._fileobject.default_bufsize = self.network_chunk_size
# TODO: find a way to enable similar functionality in py3
# Provide further setup specific to an object server implementation.
self.setup(conf)
@ -1060,10 +1063,10 @@ class ObjectController(BaseStorageServer):
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
orig_delete_at = Timestamp(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = int(req_if_delete_at_val)
req_if_delete_at = Timestamp(req_if_delete_at_val)
except KeyError:
pass
except ValueError:

View File

@ -150,11 +150,11 @@ class ObjectControllerRouter(object):
def __init__(self):
self.policy_to_controller_cls = {}
for policy in POLICIES:
self.policy_to_controller_cls[policy] = \
self.policy_to_controller_cls[int(policy)] = \
self.policy_type_to_controller_map[policy.policy_type]
def __getitem__(self, policy):
return self.policy_to_controller_cls[policy]
return self.policy_to_controller_cls[int(policy)]
class BaseObjectController(Controller):

View File

@ -276,7 +276,10 @@ class Application(object):
#
# ** Because it affects the client as well, currently, we use the
# client chunk size as the govenor and not the object chunk size.
socket._fileobject.default_bufsize = self.client_chunk_size
if sys.version_info < (3,):
socket._fileobject.default_bufsize = self.client_chunk_size
# TODO: find a way to enable similar functionality in py3
self.expose_info = config_true_value(
conf.get('expose_info', 'yes'))
self.disallowed_sections = list_from_csv(

View File

@ -372,14 +372,12 @@ def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load s3api configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for s3api')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
@ -409,6 +407,49 @@ def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
return test_conf_file, swift_conf_file
def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load domain_remap and staticweb into proxy server pipeline.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for domain_remap')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
old_pipeline = conf.get(section, 'pipeline')
pipeline = old_pipeline.replace(
"tempauth",
"domain_remap tempauth staticweb")
if pipeline == old_pipeline:
raise InProcessException(
"Failed to insert domain_remap and staticweb into pipeline: %s"
% old_pipeline)
conf.set(section, 'pipeline', pipeline)
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
# Mapping from possible values of the variable
# SWIFT_TEST_IN_PROCESS_CONF_LOADER
# to the method to call for loading the associated configuration
@ -417,6 +458,7 @@ def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
conf_loaders = {
'encryption': _load_encryption,
'ec': _load_ec_as_default_policy,
'domain_remap_staticweb': _load_domain_remap_staticweb,
's3api': _load_s3api,
}

View File

@ -543,7 +543,7 @@ class Container(Base):
def delete_files(self):
for f in listing_items(self.files):
file_item = self.file(f)
if not file_item.delete():
if not file_item.delete(tolerate_missing=True):
return False
return listing_empty(self.files)
@ -764,14 +764,19 @@ class File(Base):
self.conn.make_path(self.path))
return True
def delete(self, hdrs=None, parms=None, cfg=None):
def delete(self, hdrs=None, parms=None, cfg=None, tolerate_missing=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
cfg=cfg, parms=parms) != 204:
if tolerate_missing:
allowed_statuses = (204, 404)
else:
allowed_statuses = (204,)
if self.conn.make_request(
'DELETE', self.path, hdrs=hdrs, cfg=cfg,
parms=parms) not in allowed_statuses:
raise ResponseError(self.conn.response, 'DELETE',
self.conn.make_path(self.path))

View File

@ -0,0 +1,400 @@
#!/usr/bin/python -u
# Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from unittest2 import SkipTest
import test.functional as tf
from test.functional import cluster_info
from test.functional.tests import Utils, Base, BaseEnv
from test.functional.swift_test_client import Account, Connection, \
ResponseError
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
def requires_domain_remap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'domain_remap' not in cluster_info:
raise SkipTest('Domain Remap is not enabled')
return func(*args, **kwargs)
return wrapper
class TestStaticWebEnv(BaseEnv):
static_web_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.static_web_enabled is None:
cls.static_web_enabled = 'staticweb' in cluster_info
if not cls.static_web_enabled:
return
cls.account = Account(
cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create(
hdrs={'X-Container-Read': '.r:*,.rlistings'}):
raise ResponseError(cls.conn.response)
objects = ['index',
'error',
'listings_css',
'dir/',
'dir/obj',
'dir/subdir/',
'dir/subdir/obj']
cls.objects = {}
for item in sorted(objects):
parent = None
if '/' in item.rstrip('/'):
parent, _ = item.rstrip('/').rsplit('/', 1)
path = '%s/%s' % (cls.objects[parent + '/'].name,
Utils.create_name())
else:
path = Utils.create_name()
if item[-1] == '/':
cls.objects[item] = cls.container.file(path)
cls.objects[item].write(hdrs={
'Content-Type': 'application/directory'})
else:
cls.objects[item] = cls.container.file(path)
cls.objects[item].write('%s contents' % item)
class TestStaticWeb(Base):
env = TestStaticWebEnv
set_up = False
def setUp(self):
super(TestStaticWeb, self).setUp()
if self.env.static_web_enabled is False:
raise SkipTest("Static Web not enabled")
elif self.env.static_web_enabled is not True:
# just some sanity checking
raise Exception(
"Expected static_web_enabled to be True/False, got %r" %
(self.env.static_web_enabled,))
_, _, acct = self.env.account.conn.storage_url.split('/')
self.domain_remap_acct = '%s.example.com' % acct
self.domain_remap_cont = '%s.%s.example.com' % (
self.env.container.name, acct)
def _set_staticweb_headers(self, index=False, listings=False,
listings_css=False, error=False):
objects = self.env.objects
headers = {}
if index:
headers['X-Container-Meta-Web-Index'] = objects['index'].name
else:
headers['X-Remove-Container-Meta-Web-Index'] = 'true'
if listings:
headers['X-Container-Meta-Web-Listings'] = 'true'
else:
headers['X-Remove-Container-Meta-Web-Listings'] = 'true'
if listings_css:
headers['X-Container-Meta-Web-Listings-Css'] = \
objects['listings_css'].name
else:
headers['X-Remove-Container-Meta-Web-Listings-Css'] = 'true'
if error:
headers['X-Container-Meta-Web-Error'] = objects['error'].name
else:
headers['X-Remove-Container-Meta-Web-Error'] = 'true'
self.assertTrue(self.env.container.update_metadata(hdrs=headers))
def _test_redirect_with_slash(self, host, path, anonymous=False):
self._set_staticweb_headers(listings=True)
self.env.account.conn.make_request('GET', path,
hdrs={'X-Web-Mode': not anonymous,
'Host': host},
cfg={'no_auth_token': anonymous,
'absolute_path': True})
self.assert_status(301)
self.assertRegexpMatches(self.env.conn.response.getheader('location'),
'http[s]?://%s%s/' % (host, path))
def _test_redirect_slash_direct(self, anonymous):
host = self.env.account.conn.storage_netloc
path = '%s/%s' % (self.env.account.conn.storage_url,
self.env.container.name)
self._test_redirect_with_slash(host, path, anonymous=anonymous)
path = '%s/%s/%s' % (self.env.account.conn.storage_url,
self.env.container.name,
self.env.objects['dir/'].name)
self._test_redirect_with_slash(host, path, anonymous=anonymous)
def test_redirect_slash_auth_direct(self):
self._test_redirect_slash_direct(False)
def test_redirect_slash_anon_direct(self):
self._test_redirect_slash_direct(True)
@requires_domain_remap
def _test_redirect_slash_remap_acct(self, anonymous):
host = self.domain_remap_acct
path = '/%s' % self.env.container.name
self._test_redirect_with_slash(host, path, anonymous=anonymous)
path = '/%s/%s' % (self.env.container.name,
self.env.objects['dir/'].name)
self._test_redirect_with_slash(host, path, anonymous=anonymous)
def test_redirect_slash_auth_remap_acct(self):
self._test_redirect_slash_remap_acct(False)
def test_redirect_slash_anon_remap_acct(self):
self._test_redirect_slash_remap_acct(True)
@requires_domain_remap
def _test_redirect_slash_remap_cont(self, anonymous):
host = self.domain_remap_cont
path = '/%s' % self.env.objects['dir/'].name
self._test_redirect_with_slash(host, path, anonymous=anonymous)
def test_redirect_slash_auth_remap_cont(self):
self._test_redirect_slash_remap_cont(False)
def test_redirect_slash_anon_remap_cont(self):
self._test_redirect_slash_remap_cont(True)
def _test_get_path(self, host, path, anonymous=False, expected_status=200,
expected_in=[], expected_not_in=[]):
self.env.account.conn.make_request('GET', path,
hdrs={'X-Web-Mode': not anonymous,
'Host': host},
cfg={'no_auth_token': anonymous,
'absolute_path': True})
self.assert_status(expected_status)
body = self.env.account.conn.response.read()
for string in expected_in:
self.assertIn(string, body)
for string in expected_not_in:
self.assertNotIn(string, body)
def _test_listing(self, host, path, title=None, links=[], notins=[],
css=None, anonymous=False):
self._set_staticweb_headers(listings=True,
listings_css=(css is not None))
if title is None:
title = path
expected_in = ['Listing of %s' % title] + [
'<a href="{0}">{0}</a>'.format(link) for link in links]
expected_not_in = notins
if css:
expected_in.append('<link rel="stylesheet" type="text/css" '
'href="%s" />' % css)
self._test_get_path(host, path, anonymous=anonymous,
expected_in=expected_in,
expected_not_in=expected_not_in)
def _test_listing_direct(self, anonymous, listings_css):
objects = self.env.objects
host = self.env.account.conn.storage_netloc
path = '%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name)
css = objects['listings_css'].name if listings_css else None
self._test_listing(host, path, anonymous=True, css=css,
links=[objects['index'].name,
objects['dir/'].name + '/'],
notins=[objects['dir/obj'].name])
path = '%s/%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name,
objects['dir/'].name)
css = '../%s' % objects['listings_css'].name if listings_css else None
self._test_listing(host, path, anonymous=anonymous, css=css,
links=[objects['dir/obj'].name.split('/')[-1],
objects['dir/subdir/'].name.split('/')[-1]
+ '/'],
notins=[objects['index'].name,
objects['dir/subdir/obj'].name])
def test_listing_auth_direct_without_css(self):
self._test_listing_direct(False, False)
def test_listing_anon_direct_without_css(self):
self._test_listing_direct(True, False)
def test_listing_auth_direct_with_css(self):
self._test_listing_direct(False, True)
def test_listing_anon_direct_with_css(self):
self._test_listing_direct(True, True)
@requires_domain_remap
def _test_listing_remap_acct(self, anonymous, listings_css):
objects = self.env.objects
host = self.domain_remap_acct
path = '/%s/' % self.env.container.name
css = objects['listings_css'].name if listings_css else None
title = '%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name)
self._test_listing(host, path, title=title, anonymous=anonymous,
css=css,
links=[objects['index'].name,
objects['dir/'].name + '/'],
notins=[objects['dir/obj'].name])
path = '/%s/%s/' % (self.env.container.name, objects['dir/'].name)
css = '../%s' % objects['listings_css'].name if listings_css else None
title = '%s/%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name,
objects['dir/'])
self._test_listing(host, path, title=title, anonymous=anonymous,
css=css,
links=[objects['dir/obj'].name.split('/')[-1],
objects['dir/subdir/'].name.split('/')[-1]
+ '/'],
notins=[objects['index'].name,
objects['dir/subdir/obj'].name])
def test_listing_auth_remap_acct_without_css(self):
self._test_listing_remap_acct(False, False)
def test_listing_anon_remap_acct_without_css(self):
self._test_listing_remap_acct(True, False)
def test_listing_auth_remap_acct_with_css(self):
self._test_listing_remap_acct(False, True)
def test_listing_anon_remap_acct_with_css(self):
self._test_listing_remap_acct(True, True)
@requires_domain_remap
def _test_listing_remap_cont(self, anonymous, listings_css):
objects = self.env.objects
host = self.domain_remap_cont
path = '/'
css = objects['listings_css'].name if listings_css else None
title = '%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name)
self._test_listing(host, path, title=title, anonymous=anonymous,
css=css,
links=[objects['index'].name,
objects['dir/'].name + '/'],
notins=[objects['dir/obj'].name])
path = '/%s/' % objects['dir/'].name
css = '../%s' % objects['listings_css'].name if listings_css else None
title = '%s/%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name,
objects['dir/'])
self._test_listing(host, path, title=title, anonymous=anonymous,
css=css,
links=[objects['dir/obj'].name.split('/')[-1],
objects['dir/subdir/'].name.split('/')[-1]
+ '/'],
notins=[objects['index'].name,
objects['dir/subdir/obj'].name])
def test_listing_auth_remap_cont_without_css(self):
self._test_listing_remap_cont(False, False)
def test_listing_anon_remap_cont_without_css(self):
self._test_listing_remap_cont(True, False)
def test_listing_auth_remap_cont_with_css(self):
self._test_listing_remap_cont(False, True)
def test_listing_anon_remap_cont_with_css(self):
self._test_listing_remap_cont(True, True)
def _test_index(self, host, path, anonymous=False, expected_status=200):
self._set_staticweb_headers(index=True)
if expected_status == 200:
expected_in = ['index contents']
expected_not_in = ['Listing']
else:
expected_in = []
expected_not_in = []
self._test_get_path(host, path, anonymous=anonymous,
expected_status=expected_status,
expected_in=expected_in,
expected_not_in=expected_not_in)
def _test_index_direct(self, anonymous):
objects = self.env.objects
host = self.env.account.conn.storage_netloc
path = '%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name)
self._test_index(host, path, anonymous=anonymous)
path = '%s/%s/%s/' % (self.env.account.conn.storage_url,
self.env.container.name,
objects['dir/'].name)
self._test_index(host, path, anonymous=anonymous, expected_status=404)
def test_index_auth_direct(self):
self._test_index_direct(False)
def test_index_anon_direct(self):
self._test_index_direct(True)
@requires_domain_remap
def _test_index_remap_acct(self, anonymous):
objects = self.env.objects
host = self.domain_remap_acct
path = '/%s/' % self.env.container.name
self._test_index(host, path, anonymous=anonymous)
path = '/%s/%s/' % (self.env.container.name, objects['dir/'].name)
self._test_index(host, path, anonymous=anonymous, expected_status=404)
def test_index_auth_remap_acct(self):
self._test_index_remap_acct(False)
def test_index_anon_remap_acct(self):
self._test_index_remap_acct(True)
@requires_domain_remap
def _test_index_remap_cont(self, anonymous):
objects = self.env.objects
host = self.domain_remap_cont
path = '/'
self._test_index(host, path, anonymous=anonymous)
path = '/%s/' % objects['dir/'].name
self._test_index(host, path, anonymous=anonymous, expected_status=404)
def test_index_auth_remap_cont(self):
self._test_index_remap_cont(False)
def test_index_anon_remap_cont(self):
self._test_index_remap_cont(True)

View File

@ -587,10 +587,8 @@ class TestContainerTempurl(Base):
def test_tempurl_keys_hidden_from_acl_readonly(self):
if not tf.cluster_info.get('tempauth'):
raise SkipTest('TEMP AUTH SPECIFIC TEST')
original_token = self.env.container.conn.storage_token
self.env.container.conn.storage_token = self.env.conn2.storage_token
metadata = self.env.container.info()
self.env.container.conn.storage_token = original_token
metadata = self.env.container.info(cfg={
'use_token': self.env.conn2.storage_token})
self.assertNotIn(
'tempurl_key', metadata,

View File

@ -90,12 +90,13 @@ class TestDbUsyncReplicator(ReplProbeTest):
expected_meta = {
'x-container-meta-a': '2',
'x-container-meta-b': '2',
'x-container-meta-b': '3',
'x-container-meta-c': '1',
'x-container-meta-d': '2',
'x-container-meta-e': '3',
}
# node that got the object updates still doesn't have the meta
# node that got the object updates now has the meta
resp_headers = direct_client.direct_head_container(
cnode, cpart, self.account, container)
for header, value in expected_meta.items():
@ -104,14 +105,6 @@ class TestDbUsyncReplicator(ReplProbeTest):
self.assertNotIn(resp_headers.get('x-container-object-count'),
(None, '0', 0))
expected_meta = {
'x-container-meta-a': '2',
'x-container-meta-b': '3',
'x-container-meta-c': '1',
'x-container-meta-d': '2',
'x-container-meta-e': '3',
}
# other nodes still have the meta, as well as objects
for node in cnodes:
resp_headers = direct_client.direct_head_container(

View File

@ -1080,6 +1080,15 @@ class Timeout(object):
raise TimeoutException
def requires_o_tmpfile_support_in_tmp(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not utils.o_tmpfile_in_tmpdir_supported():
raise SkipTest('Requires O_TMPFILE support in TMPDIR')
return func(*args, **kwargs)
return wrapper
def requires_o_tmpfile_support(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
@ -1292,7 +1301,7 @@ def xattr_supported_check():
# assume the worst -- xattrs aren't supported
supports_xattr_cached_val = False
big_val = 'x' * (4096 + 1) # more than 4k of metadata
big_val = b'x' * (4096 + 1) # more than 4k of metadata
try:
fd, tmppath = mkstemp()
xattr.setxattr(fd, 'user.swift.testing_key', big_val)

View File

@ -17,7 +17,7 @@
import hashlib
import hmac
import mock
from six import StringIO
import six
import unittest
from swift.cli import form_signature
@ -33,14 +33,19 @@ class TestFormSignature(unittest.TestCase):
max_file_size = str(int(1024 * 1024 * 1024 * 3.14159)) # π GiB
max_file_count = '3'
expected_signature = hmac.new(
key,
"\n".join((
path, redirect, max_file_size, max_file_count,
str(int(the_time + expires)))),
hashlib.sha1).hexdigest()
data = "\n".join((
path, redirect, max_file_size, max_file_count,
str(int(the_time + expires))))
out = StringIO()
if six.PY3:
data = data if isinstance(data, six.binary_type) else \
data.encode('utf8')
key = key if isinstance(key, six.binary_type) else \
key.encode('utf8')
expected_signature = hmac.new(key, data, hashlib.sha1).hexdigest()
out = six.StringIO()
with mock.patch('swift.cli.form_signature.time', lambda: the_time):
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
@ -59,7 +64,7 @@ class TestFormSignature(unittest.TestCase):
self.assertIn(sig_input, out.getvalue())
def test_too_few_args(self):
out = StringIO()
out = six.StringIO()
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
@ -70,7 +75,7 @@ class TestFormSignature(unittest.TestCase):
self.assertIn(usage, out.getvalue())
def test_invalid_filesize_arg(self):
out = StringIO()
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
@ -79,7 +84,7 @@ class TestFormSignature(unittest.TestCase):
self.assertNotEqual(exitcode, 0)
def test_invalid_filecount_arg(self):
out = StringIO()
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
@ -88,7 +93,7 @@ class TestFormSignature(unittest.TestCase):
self.assertNotEqual(exitcode, 0)
def test_invalid_path_arg(self):
out = StringIO()
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
@ -97,7 +102,7 @@ class TestFormSignature(unittest.TestCase):
self.assertNotEqual(exitcode, 0)
def test_invalid_seconds_arg(self):
out = StringIO()
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([

View File

@ -42,8 +42,8 @@ class TestCliInfoBase(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
self.orig_hp = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.HASH_PATH_PREFIX = 'info'
utils.HASH_PATH_SUFFIX = 'info'
utils.HASH_PATH_PREFIX = b'info'
utils.HASH_PATH_SUFFIX = b'info'
self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_info')
utils.mkdirs(self.testdir)
rmtree(self.testdir)
@ -134,7 +134,7 @@ Metadata:
UUID: abadf100d0ddba11
X-Other-Something: boo
No system metadata found in db file
User Metadata: {'mydata': 'swift'}'''
User Metadata: {'x-account-meta-mydata': 'swift'}'''
self.assertEqual(sorted(out.getvalue().strip().split('\n')),
sorted(exp_out.split('\n')))
@ -160,7 +160,7 @@ No system metadata found in db file
md = {'x-container-sysmeta-mydata': ('swift', '0000000000.00000')}
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('container', info, md)
print_db_info_metadata('container', info, md, True)
exp_out = '''Path: /acct/cont
Account: acct
Container: cont
@ -875,7 +875,7 @@ class TestPrintObj(TestCliInfoBase):
self.assertRaises(InfoSystemExit, print_obj, datafile)
with open(datafile, 'wb') as fp:
fp.write('1234')
fp.write(b'1234')
out = StringIO()
with mock.patch('sys.stdout', out):
@ -1129,7 +1129,7 @@ Other Metadata:
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
@ -1138,8 +1138,8 @@ Other Metadata:
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
X-Object-Sysmeta-Mtime: 107.3
X-Object-Sysmeta-Name: Obj name
Mtime: 107.3
Name: Obj name
Transient System Metadata:
No metadata found
User Metadata:
@ -1209,7 +1209,7 @@ Other Metadata:
del metadata['name']
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
print_obj_metadata(metadata, True)
exp_out = '''Path: Not found in metadata
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
@ -1218,7 +1218,7 @@ System Metadata:
Transient System Metadata:
No metadata found
User Metadata:
X-Object-Meta-Mtime: 107.3
Mtime: 107.3
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
@ -1253,7 +1253,7 @@ Other Metadata:
del metadata['X-Timestamp']
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
@ -1266,7 +1266,7 @@ System Metadata:
Transient System Metadata:
No metadata found
User Metadata:
X-Object-Meta-Mtime: 107.3
Mtime: 107.3
Other Metadata:
No metadata found'''
@ -1300,6 +1300,34 @@ Other Metadata:
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({
'X-Object-Meta-Mtime': '107.3',
'X-Object-Sysmeta-Mtime': '106.3',
'X-Object-Transient-Sysmeta-Mtime': '105.3',
'X-Object-Mtime': '104.3',
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
Mtime: 106.3
Transient System Metadata:
Mtime: 105.3
User Metadata:
Mtime: 107.3
Other Metadata:
X-Object-Mtime: 104.3''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
class TestPrintObjWeirdPath(TestPrintObjFullMeta):
def setUp(self):

View File

@ -511,7 +511,7 @@ aliases = %s
self.recon_instance.umount_check(hosts)
output = stdout.getvalue()
r = re.compile("\Not mounted:|Device errors: .*")
r = re.compile("^Not mounted:|Device errors: .*")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:

View File

@ -62,7 +62,7 @@ class TestRelinker(unittest.TestCase):
self.object_fname = "1278553064.00000.data"
self.objname = os.path.join(self.objdir, self.object_fname)
with open(self.objname, "wb") as dummy:
dummy.write("Hello World!")
dummy.write(b"Hello World!")
write_metadata(dummy, {'name': '/a/c/o', 'Content-Length': '12'})
test_policies = [StoragePolicy(0, 'platin', True)]
@ -164,7 +164,7 @@ class TestRelinker(unittest.TestCase):
self._common_test_cleanup()
# Pretend the object in the new place got corrupted
with open(self.expected_file, "wb") as obj:
obj.write('trash')
obj.write(b'trash')
self.assertEqual(
1, relinker.cleanup(self.testdir, self.devices, True, self.logger))

View File

@ -3040,14 +3040,9 @@ class TestSloGetManifest(SloTestCase):
def test_download_takes_too_long(self, mock_time):
mock_time.time.side_effect = [
0, # start time
1, # just building the first segment request; purely local
2, # build the second segment request object, too, so we know we
# can't coalesce and should instead go fetch the first segment
7 * 3600, # that takes a while, but gets serviced; we build the
# third request and service the second
21 * 3600, # which takes *even longer* (ostensibly something to
# do with submanifests), but we build the fourth...
28 * 3600, # and before we go to service it we time out
10 * 3600, # a_5
20 * 3600, # b_10
30 * 3600, # c_15, but then we time out
]
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',

View File

@ -1220,7 +1220,8 @@ class TestDBReplicator(unittest.TestCase):
self.assertTrue(os.path.isdir(dirpath))
node_id = 1
results = list(db_replicator.roundrobin_datadirs([(datadir, node_id)]))
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
expected = [
('450', os.path.join(datadir, db_path), node_id),
]
@ -1241,12 +1242,14 @@ class TestDBReplicator(unittest.TestCase):
self.assertEqual({'18', '1054', '1060', '450'},
set(os.listdir(datadir)))
results = list(db_replicator.roundrobin_datadirs([(datadir, node_id)]))
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
self.assertEqual(results, expected)
self.assertEqual({'1054', '1060', '450'},
set(os.listdir(datadir)))
results = list(db_replicator.roundrobin_datadirs([(datadir, node_id)]))
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
self.assertEqual(results, expected)
# non db file in '1060' dir is not deleted and exception is handled
self.assertEqual({'1060', '450'},
@ -1266,9 +1269,11 @@ class TestDBReplicator(unittest.TestCase):
return []
path = path[len('/srv/node/sdx/containers'):]
if path == '':
return ['123', '456', '789', '9999']
return ['123', '456', '789', '9999', "-5", "not-a-partition"]
# 456 will pretend to be a file
# 9999 will be an empty partition with no contents
# -5 and not-a-partition were created by something outside
# Swift
elif path == '/123':
return ['abc', 'def.db'] # def.db will pretend to be a file
elif path == '/123/abc':
@ -1292,6 +1297,10 @@ class TestDBReplicator(unittest.TestCase):
'weird2'] # weird2 will pretend to be a dir, if asked
elif path == '9999':
return []
elif path == 'not-a-partition':
raise Exception("shouldn't look in not-a-partition")
elif path == '-5':
raise Exception("shouldn't look in -5")
return []
def _isdir(path):
@ -1327,8 +1336,8 @@ class TestDBReplicator(unittest.TestCase):
mock.patch(base + 'random.shuffle', _shuffle), \
mock.patch(base + 'os.rmdir', _rmdir):
datadirs = [('/srv/node/sda/containers', 1),
('/srv/node/sdb/containers', 2)]
datadirs = [('/srv/node/sda/containers', 1, lambda p: True),
('/srv/node/sdb/containers', 2, lambda p: True)]
results = list(db_replicator.roundrobin_datadirs(datadirs))
# The results show that the .db files are returned, the devices
# interleaved.
@ -1432,6 +1441,215 @@ class TestDBReplicator(unittest.TestCase):
replicator.logger)])
class TestHandoffsOnly(unittest.TestCase):
class FakeRing3Nodes(object):
_replicas = 3
# Three nodes, two disks each
devs = [
dict(id=0, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.1', port=6201,
replication_ip='10.0.0.1', replication_port=6201,
device='sdp'),
dict(id=1, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.1', port=6201,
replication_ip='10.0.0.1', replication_port=6201,
device='sdq'),
dict(id=2, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.2', port=6201,
replication_ip='10.0.0.2', replication_port=6201,
device='sdp'),
dict(id=3, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.2', port=6201,
replication_ip='10.0.0.2', replication_port=6201,
device='sdq'),
dict(id=4, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.3', port=6201,
replication_ip='10.0.0.3', replication_port=6201,
device='sdp'),
dict(id=5, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.3', port=6201,
replication_ip='10.0.0.3', replication_port=6201,
device='sdq'),
]
def __init__(self, *a, **kw):
pass
def get_part(self, account, container=None, obj=None):
return 0
def get_part_nodes(self, part):
nodes = []
for offset in range(self._replicas):
i = (part + offset) % len(self.devs)
nodes.append(self.devs[i])
return nodes
def get_more_nodes(self, part):
for offset in range(self._replicas, len(self.devs)):
i = (part + offset) % len(self.devs)
yield self.devs[i]
def _make_fake_db(self, disk, partition, db_hash):
directories = [
os.path.join(self.root, disk),
os.path.join(self.root, disk, 'containers'),
os.path.join(self.root, disk, 'containers', str(partition)),
os.path.join(self.root, disk, 'containers', str(partition),
db_hash[-3:]),
os.path.join(self.root, disk, 'containers', str(partition),
db_hash[-3:], db_hash)]
for d in directories:
try:
os.mkdir(d)
except OSError as err:
if err.errno != errno.EEXIST:
raise
file_path = os.path.join(directories[-1], db_hash + ".db")
with open(file_path, 'w'):
pass
def setUp(self):
self.root = mkdtemp()
# object disks; they're just here to make sure they don't trip us up
os.mkdir(os.path.join(self.root, 'sdc'))
os.mkdir(os.path.join(self.root, 'sdc', 'objects'))
os.mkdir(os.path.join(self.root, 'sdd'))
os.mkdir(os.path.join(self.root, 'sdd', 'objects'))
# part 0 belongs on sdp
self._make_fake_db('sdp', 0, '010101013cf2b7979af9eaa71cb67220')
# part 1 does not belong on sdp
self._make_fake_db('sdp', 1, 'abababab2b5368158355e799323b498d')
# part 1 belongs on sdq
self._make_fake_db('sdq', 1, '02020202e30f696a3cfa63d434a3c94e')
# part 2 does not belong on sdq
self._make_fake_db('sdq', 2, 'bcbcbcbc15d3835053d568c57e2c83b5')
def cleanUp(self):
rmtree(self.root, ignore_errors=True)
def test_scary_warnings(self):
logger = unit.FakeLogger()
replicator = TestReplicator({
'handoffs_only': 'yes',
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
}, logger=logger)
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object'), \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once()
self.assertEqual(
logger.get_lines_for_level('warning'),
[('Starting replication pass with handoffs_only enabled. This '
'mode is not intended for normal operation; use '
'handoffs_only with care.'),
('Finished replication pass with handoffs_only enabled. '
'If handoffs_only is no longer required, disable it.')])
def test_skips_primary_partitions(self):
replicator = TestReplicator({
'handoffs_only': 'yes',
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once()
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('1', os.path.join(
self.root, 'sdp', 'containers', '1', '98d',
'abababab2b5368158355e799323b498d',
'abababab2b5368158355e799323b498d.db'), 0),
mock.call('2', os.path.join(
self.root, 'sdq', 'containers', '2', '3b5',
'bcbcbcbc15d3835053d568c57e2c83b5',
'bcbcbcbc15d3835053d568c57e2c83b5.db'), 1)])
def test_override_partitions(self):
replicator = TestReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(partitions="0,2")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0),
mock.call('2', os.path.join(
self.root, 'sdq', 'containers', '2', '3b5',
'bcbcbcbc15d3835053d568c57e2c83b5',
'bcbcbcbc15d3835053d568c57e2c83b5.db'), 1)])
def test_override_devices(self):
replicator = TestReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(devices="sdp")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0),
mock.call('1', os.path.join(
self.root, 'sdp', 'containers', '1', '98d',
'abababab2b5368158355e799323b498d',
'abababab2b5368158355e799323b498d.db'), 0)])
def test_override_devices_and_partitions(self):
replicator = TestReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(partitions="0,2", devices="sdp")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0)])
class TestReplToNode(unittest.TestCase):
def setUp(self):
db_replicator.ring = FakeRing()
@ -1497,7 +1715,9 @@ class TestReplToNode(unittest.TestCase):
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
metadata = self.broker.metadata
self.assertEqual({}, metadata)
self.assertIn("X-Container-Sysmeta-Test", metadata)
self.assertEqual("XYZ", metadata["X-Container-Sysmeta-Test"][0])
self.assertEqual(now, metadata["X-Container-Sysmeta-Test"][1])
def test_repl_to_node_not_found(self):
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=404)

View File

@ -25,7 +25,7 @@ from tempfile import gettempdir
from swift.common.linkat import linkat
from swift.common.utils import O_TMPFILE
from test.unit import requires_o_tmpfile_support
from test.unit import requires_o_tmpfile_support_in_tmp
class TestLinkat(unittest.TestCase):
@ -38,7 +38,7 @@ class TestLinkat(unittest.TestCase):
def test_available(self):
self.assertFalse(linkat.available)
@requires_o_tmpfile_support
@requires_o_tmpfile_support_in_tmp
def test_errno(self):
with open('/dev/null', 'r') as fd:
self.assertRaises(IOError, linkat,
@ -77,7 +77,7 @@ class TestLinkat(unittest.TestCase):
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.linkat_retrieved)
@requires_o_tmpfile_support
@requires_o_tmpfile_support_in_tmp
def test_linkat_success(self):
fd = None

View File

@ -71,8 +71,8 @@ class MockMemcached(object):
# In particular, the "Storage commands" section may be interesting.
def __init__(self):
self.inbuf = ''
self.outbuf = ''
self.inbuf = b''
self.outbuf = b''
self.cache = {}
self.down = False
self.exc_on_delete = False
@ -84,81 +84,86 @@ class MockMemcached(object):
if self.down:
raise Exception('mock is down')
self.inbuf += string
while '\n' in self.inbuf:
cmd, self.inbuf = self.inbuf.split('\n', 1)
while b'\n' in self.inbuf:
cmd, self.inbuf = self.inbuf.split(b'\n', 1)
parts = cmd.split()
handler = getattr(self, 'handle_%s' % parts[0].lower(), None)
cmd_name = parts[0].decode('ascii').lower()
handler = getattr(self, 'handle_%s' % cmd_name, None)
if handler:
handler(*parts[1:])
else:
raise ValueError('Unhandled command: %s' % parts[0])
def handle_set(self, key, flags, exptime, num_bytes, noreply=''):
def handle_set(self, key, flags, exptime, num_bytes, noreply=b''):
self.cache[key] = flags, exptime, self.inbuf[:int(num_bytes)]
self.inbuf = self.inbuf[int(num_bytes) + 2:]
if noreply != 'noreply':
self.outbuf += 'STORED\r\n'
if noreply != b'noreply':
self.outbuf += b'STORED\r\n'
def handle_add(self, key, flags, exptime, num_bytes, noreply=''):
def handle_add(self, key, flags, exptime, num_bytes, noreply=b''):
value = self.inbuf[:int(num_bytes)]
self.inbuf = self.inbuf[int(num_bytes) + 2:]
if key in self.cache:
if noreply != 'noreply':
self.outbuf += 'NOT_STORED\r\n'
if noreply != b'noreply':
self.outbuf += b'NOT_STORED\r\n'
else:
self.cache[key] = flags, exptime, value
if noreply != 'noreply':
self.outbuf += 'STORED\r\n'
if noreply != b'noreply':
self.outbuf += b'STORED\r\n'
def handle_delete(self, key, noreply=''):
def handle_delete(self, key, noreply=b''):
if self.exc_on_delete:
raise Exception('mock is has exc_on_delete set')
if key in self.cache:
del self.cache[key]
if noreply != 'noreply':
self.outbuf += 'DELETED\r\n'
elif noreply != 'noreply':
self.outbuf += 'NOT_FOUND\r\n'
if noreply != b'noreply':
self.outbuf += b'DELETED\r\n'
elif noreply != b'noreply':
self.outbuf += b'NOT_FOUND\r\n'
def handle_get(self, *keys):
for key in keys:
if key in self.cache:
val = self.cache[key]
self.outbuf += 'VALUE %s %s %s\r\n' % (
key, val[0], len(val[2]))
self.outbuf += val[2] + '\r\n'
self.outbuf += 'END\r\n'
self.outbuf += b' '.join([
b'VALUE',
key,
val[0],
str(len(val[2])).encode('ascii')
]) + b'\r\n'
self.outbuf += val[2] + b'\r\n'
self.outbuf += b'END\r\n'
def handle_incr(self, key, value, noreply=''):
def handle_incr(self, key, value, noreply=b''):
if key in self.cache:
current = self.cache[key][2]
new_val = str(int(current) + int(value))
new_val = str(int(current) + int(value)).encode('ascii')
self.cache[key] = self.cache[key][:2] + (new_val, )
self.outbuf += str(new_val) + '\r\n'
self.outbuf += new_val + b'\r\n'
else:
self.outbuf += 'NOT_FOUND\r\n'
self.outbuf += b'NOT_FOUND\r\n'
def handle_decr(self, key, value, noreply=''):
def handle_decr(self, key, value, noreply=b''):
if key in self.cache:
current = self.cache[key][2]
new_val = str(int(current) - int(value))
if new_val[0] == '-': # ie, val is negative
new_val = '0'
new_val = str(int(current) - int(value)).encode('ascii')
if new_val[:1] == b'-': # ie, val is negative
new_val = b'0'
self.cache[key] = self.cache[key][:2] + (new_val, )
self.outbuf += str(new_val) + '\r\n'
self.outbuf += new_val + b'\r\n'
else:
self.outbuf += 'NOT_FOUND\r\n'
self.outbuf += b'NOT_FOUND\r\n'
def readline(self):
if self.read_return_empty_str:
return ''
return b''
if self.read_return_none:
return None
if self.down:
raise Exception('mock is down')
if '\n' in self.outbuf:
response, self.outbuf = self.outbuf.split('\n', 1)
return response + '\n'
if b'\n' in self.outbuf:
response, self.outbuf = self.outbuf.split(b'\n', 1)
return response + b'\n'
def read(self, size):
if self.down:
@ -199,7 +204,7 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip])
one = two = True
while one or two: # Run until we match hosts one and two
key = uuid4().hex
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peeripport = '%s:%s' % conn[2].getpeername()
self.assertTrue(peeripport in (sock1ipport, sock2ipport))
@ -222,7 +227,7 @@ class TestMemcached(unittest.TestCase):
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
@ -243,7 +248,7 @@ class TestMemcached(unittest.TestCase):
server_host = '[%s]' % sock_addr[0]
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
memcache_client = memcached.MemcacheRing([server_host])
key = uuid4().hex
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
@ -271,7 +276,7 @@ class TestMemcached(unittest.TestCase):
socket.SOCK_STREAM, 0, '',
('127.0.0.1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '%s:%s' % (peer_sockaddr[0],
@ -296,7 +301,7 @@ class TestMemcached(unittest.TestCase):
socket.SOCK_STREAM, 0, '',
('::1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
@ -312,16 +317,16 @@ class TestMemcached(unittest.TestCase):
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
cache_key = md5('some_key').hexdigest()
cache_key = md5(b'some_key').hexdigest().encode('ascii')
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
# See JSON_FLAG
self.assertEqual(mock.cache, {cache_key: ('2', '0', '[1, 2, 3]')})
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[1, 2, 3]')})
memcache_client.set('some_key', [4, 5, 6])
self.assertEqual(memcache_client.get('some_key'), [4, 5, 6])
self.assertEqual(mock.cache, {cache_key: ('2', '0', '[4, 5, 6]')})
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[4, 5, 6]')})
memcache_client.set('some_key', ['simple str', 'utf8 str éà'])
# As per http://wiki.openstack.org/encoding,
@ -329,10 +334,10 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(
memcache_client.get('some_key'), ['simple str', u'utf8 str éà'])
self.assertEqual(mock.cache, {cache_key: (
'2', '0', '["simple str", "utf8 str \\u00e9\\u00e0"]')})
b'2', b'0', b'["simple str", "utf8 str \\u00e9\\u00e0"]')})
memcache_client.set('some_key', [1, 2, 3], time=20)
self.assertEqual(mock.cache, {cache_key: ('2', '20', '[1, 2, 3]')})
self.assertEqual(mock.cache, {cache_key: (b'2', b'20', b'[1, 2, 3]')})
sixtydays = 60 * 24 * 60 * 60
esttimeout = time.time() + sixtydays
@ -347,7 +352,8 @@ class TestMemcached(unittest.TestCase):
[(mock, mock)] * 2)
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
self.assertEqual(mock.cache.values()[0][1], '0')
self.assertEqual(list(mock.cache.values()),
[(b'2', b'0', b'[1, 2, 3]')])
# Now lets return an empty string, and make sure we aren't logging
# the error.
@ -371,15 +377,15 @@ class TestMemcached(unittest.TestCase):
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
self.assertEqual(memcache_client.get('some_key'), '5')
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
self.assertEqual(memcache_client.get('some_key'), '10')
self.assertEqual(memcache_client.get('some_key'), b'10')
self.assertEqual(memcache_client.incr('some_key', delta=1), 11)
self.assertEqual(memcache_client.get('some_key'), '11')
self.assertEqual(memcache_client.get('some_key'), b'11')
self.assertEqual(memcache_client.incr('some_key', delta=-5), 6)
self.assertEqual(memcache_client.get('some_key'), '6')
self.assertEqual(memcache_client.get('some_key'), b'6')
self.assertEqual(memcache_client.incr('some_key', delta=-15), 0)
self.assertEqual(memcache_client.get('some_key'), '0')
self.assertEqual(memcache_client.get('some_key'), b'0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.incr, 'some_key', delta=-15)
@ -391,9 +397,9 @@ class TestMemcached(unittest.TestCase):
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
self.assertEqual(memcache_client.get('some_key'), '5')
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
self.assertEqual(memcache_client.get('some_key'), '10')
self.assertEqual(memcache_client.get('some_key'), b'10')
# Now lets return an empty string, and make sure we aren't logging
# the error.
@ -417,11 +423,11 @@ class TestMemcached(unittest.TestCase):
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
cache_key = md5('some_key').hexdigest()
cache_key = md5(b'some_key').hexdigest().encode('ascii')
memcache_client.incr('some_key', delta=5, time=55)
self.assertEqual(memcache_client.get('some_key'), '5')
self.assertEqual(mock.cache, {cache_key: ('0', '55', '5')})
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(mock.cache, {cache_key: (b'0', b'55', b'5')})
memcache_client.delete('some_key')
self.assertIsNone(memcache_client.get('some_key'))
@ -429,7 +435,7 @@ class TestMemcached(unittest.TestCase):
fiftydays = 50 * 24 * 60 * 60
esttimeout = time.time() + fiftydays
memcache_client.incr('some_key', delta=5, time=fiftydays)
self.assertEqual(memcache_client.get('some_key'), '5')
self.assertEqual(memcache_client.get('some_key'), b'5')
_junk, cache_timeout, _junk = mock.cache[cache_key]
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
@ -437,12 +443,12 @@ class TestMemcached(unittest.TestCase):
self.assertIsNone(memcache_client.get('some_key'))
memcache_client.incr('some_key', delta=5)
self.assertEqual(memcache_client.get('some_key'), '5')
self.assertEqual(mock.cache, {cache_key: ('0', '0', '5')})
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'5')})
memcache_client.incr('some_key', delta=5, time=55)
self.assertEqual(memcache_client.get('some_key'), '10')
self.assertEqual(mock.cache, {cache_key: ('0', '0', '10')})
self.assertEqual(memcache_client.get('some_key'), b'10')
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'10')})
def test_decr(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
@ -450,13 +456,13 @@ class TestMemcached(unittest.TestCase):
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.decr('some_key', delta=5), 0)
self.assertEqual(memcache_client.get('some_key'), '0')
self.assertEqual(memcache_client.get('some_key'), b'0')
self.assertEqual(memcache_client.incr('some_key', delta=15), 15)
self.assertEqual(memcache_client.get('some_key'), '15')
self.assertEqual(memcache_client.get('some_key'), b'15')
self.assertEqual(memcache_client.decr('some_key', delta=4), 11)
self.assertEqual(memcache_client.get('some_key'), '11')
self.assertEqual(memcache_client.get('some_key'), b'11')
self.assertEqual(memcache_client.decr('some_key', delta=15), 0)
self.assertEqual(memcache_client.get('some_key'), '0')
self.assertEqual(memcache_client.get('some_key'), b'0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.decr, 'some_key', delta=15)
@ -510,27 +516,27 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(
memcache_client.get_multi(('some_key2', 'some_key1'), 'multi_key'),
[[4, 5, 6], [1, 2, 3]])
for key in ('some_key1', 'some_key2'):
key = md5(key).hexdigest()
for key in (b'some_key1', b'some_key2'):
key = md5(key).hexdigest().encode('ascii')
self.assertIn(key, mock.cache)
_junk, cache_timeout, _junk = mock.cache[key]
self.assertEqual(cache_timeout, '0')
self.assertEqual(cache_timeout, b'0')
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
time=20)
for key in ('some_key1', 'some_key2'):
key = md5(key).hexdigest()
for key in (b'some_key1', b'some_key2'):
key = md5(key).hexdigest().encode('ascii')
_junk, cache_timeout, _junk = mock.cache[key]
self.assertEqual(cache_timeout, '20')
self.assertEqual(cache_timeout, b'20')
fortydays = 50 * 24 * 60 * 60
esttimeout = time.time() + fortydays
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
time=fortydays)
for key in ('some_key1', 'some_key2'):
key = md5(key).hexdigest()
for key in (b'some_key1', b'some_key2'):
key = md5(key).hexdigest().encode('ascii')
_junk, cache_timeout, _junk = mock.cache[key]
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
self.assertEqual(memcache_client.get_multi(

View File

@ -72,7 +72,7 @@ from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import POLICIES, reload_storage_policies
from swift.common.swob import Request, Response
from test.unit import FakeLogger, requires_o_tmpfile_support, \
quiet_eventlet_exceptions
requires_o_tmpfile_support_in_tmp, quiet_eventlet_exceptions
threading = eventlet.patcher.original('threading')
@ -183,6 +183,7 @@ class TestTimestamp(unittest.TestCase):
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
def test_invalid_string_conversion(self):
t = utils.Timestamp.now()
@ -390,6 +391,8 @@ class TestTimestamp(unittest.TestCase):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
u'1402436408.91203_000000f0',
b'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
@ -621,16 +624,7 @@ class TestTimestamp(unittest.TestCase):
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
def _test_greater_with_offset(self, now, test_values):
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
@ -655,6 +649,43 @@ class TestTimestamp(unittest.TestCase):
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
# Part 1: use the natural time of the Python. This is deliciously
# unpredictable, but completely legitimate and realistic. Finds bugs!
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 2: Same as above, but with fixed time values that reproduce
# specific corner cases.
now = 1519830570.6949348
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 3: The '%f' problem. Timestamps cannot be converted to %f
# strings, then back to timestamps, then compared with originals.
# You can only "import" a floating point representation once.
now = 1519830570.6949348
now = float('%f' % now)
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%s_00000000' % now,
)
self._test_greater_with_offset(now, test_values)
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
@ -3518,6 +3549,22 @@ cluster_dfw1 = http://dfw1.host/v1/
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_parse_overrides(self):
devices, partitions = utils.parse_overrides(devices='sdb1,sdb2')
self.assertIn('sdb1', devices)
self.assertIn('sdb2', devices)
self.assertNotIn('sdb3', devices)
self.assertIn(1, partitions)
self.assertIn('1', partitions) # matches because of Everything
self.assertIn(None, partitions) # matches because of Everything
devices, partitions = utils.parse_overrides(partitions='1,2,3')
self.assertIn('sdb1', devices)
self.assertIn('1', devices) # matches because of Everything
self.assertIn(None, devices) # matches because of Everything
self.assertIn(1, partitions)
self.assertNotIn('1', partitions)
self.assertNotIn(None, partitions)
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
@ -3807,7 +3854,7 @@ cluster_dfw1 = http://dfw1.host/v1/
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
@ -3827,7 +3874,7 @@ cluster_dfw1 = http://dfw1.host/v1/
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp()
# Create and write to a file
@ -3862,7 +3909,7 @@ cluster_dfw1 = http://dfw1.host/v1/
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support
@requires_o_tmpfile_support_in_tmp
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp()
target_dir = os.path.join(tempdir, uuid4().hex)
@ -3980,6 +4027,19 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertEqual(utils.replace_partition_in_path(old, 10), old)
self.assertEqual(utils.replace_partition_in_path(new, 11), new)
def test_round_robin_iter(self):
it1 = iter([1, 2, 3])
it2 = iter([4, 5])
it3 = iter([6, 7, 8, 9])
it4 = iter([])
rr_its = utils.round_robin_iter([it1, it2, it3, it4])
got = list(rr_its)
# Expect that items get fetched in a round-robin fashion from the
# iterators
self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got)
class ResellerConfReader(unittest.TestCase):
@ -4823,6 +4883,13 @@ class TestStatsdLogging(unittest.TestCase):
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(400)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
@ -4844,7 +4911,14 @@ class TestStatsdLogging(unittest.TestCase):
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
mock_controller = MockController(500)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(507)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')

View File

@ -203,7 +203,7 @@ class TestWSGI(unittest.TestCase):
conf_file = os.path.join(tempdir, 'file.conf')
def _write_and_load_conf_file(conf):
with open(conf_file, 'wb') as fd:
with open(conf_file, 'wt') as fd:
fd.write(dedent(conf))
return wsgi.load_app_config(conf_file)
@ -659,12 +659,12 @@ class TestWSGI(unittest.TestCase):
oldenv = {}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEqual(newenv['wsgi.input'].read(), '')
self.assertEqual(newenv['wsgi.input'].read(), b'')
oldenv = {'wsgi.input': BytesIO(b'original wsgi.input')}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEqual(newenv['wsgi.input'].read(), '')
self.assertEqual(newenv['wsgi.input'].read(), b'')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv)
@ -677,7 +677,7 @@ class TestWSGI(unittest.TestCase):
def test_pre_auth_req(self):
class FakeReq(object):
@classmethod
def fake_blank(cls, path, environ=None, body='', headers=None):
def fake_blank(cls, path, environ=None, body=b'', headers=None):
if environ is None:
environ = {}
if headers is None:
@ -687,7 +687,7 @@ class TestWSGI(unittest.TestCase):
was_blank = Request.blank
Request.blank = FakeReq.fake_blank
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', body='tester', headers={})
'PUT', '/', body=b'tester', headers={})
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', headers={})
Request.blank = was_blank
@ -695,7 +695,7 @@ class TestWSGI(unittest.TestCase):
def test_pre_auth_req_with_quoted_path(self):
r = wsgi.make_pre_authed_request(
{'HTTP_X_TRANS_ID': '1234'}, 'PUT', path=quote('/a space'),
body='tester', headers={})
body=b'tester', headers={})
self.assertEqual(r.path, quote('/a space'))
def test_pre_auth_req_drops_query(self):
@ -711,8 +711,8 @@ class TestWSGI(unittest.TestCase):
def test_pre_auth_req_with_body(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body')
self.assertEqual(r.body, 'the body')
{'QUERY_STRING': 'original'}, 'GET', 'path', b'the body')
self.assertEqual(r.body, b'the body')
def test_pre_auth_creates_script_name(self):
e = wsgi.make_pre_authed_env({})
@ -730,9 +730,9 @@ class TestWSGI(unittest.TestCase):
def test_pre_auth_req_swift_source(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body',
{'QUERY_STRING': 'original'}, 'GET', 'path', b'the body',
swift_source='UT')
self.assertEqual(r.body, 'the body')
self.assertEqual(r.body, b'the body')
self.assertEqual(r.environ['swift.source'], 'UT')
def test_run_server_global_conf_callback(self):
@ -1363,7 +1363,8 @@ class TestWSGIContext(unittest.TestCase):
self.assertEqual('aaaaa', next(iterator))
self.assertEqual('bbbbb', next(iterator))
iterable.close()
self.assertRaises(StopIteration, iterator.next)
with self.assertRaises(StopIteration):
next(iterator)
def test_update_content_length(self):
statuses = ['200 Ok']

View File

@ -17,6 +17,8 @@ import json
import unittest
import mock
import os
import sys
import signal
import time
import string
import xattr
@ -850,7 +852,7 @@ class TestAuditor(unittest.TestCase):
self.auditor.run_audit(**kwargs)
self.assertFalse(os.path.isdir(quarantine_path))
del(kwargs['zero_byte_fps'])
clear_auditor_status(self.devices)
clear_auditor_status(self.devices, 'objects')
self.auditor.run_audit(**kwargs)
self.assertTrue(os.path.isdir(quarantine_path))
@ -1252,6 +1254,12 @@ class TestAuditor(unittest.TestCase):
self.wait_called += 1
return (self.wait_called, 0)
def mock_signal(self, sig, action):
pass
def mock_exit(self):
pass
for i in string.ascii_letters[2:26]:
mkdirs(os.path.join(self.devices, 'sd%s' % i))
@ -1267,8 +1275,12 @@ class TestAuditor(unittest.TestCase):
my_auditor.run_audit = mocker.mock_run
was_fork = os.fork
was_wait = os.wait
was_signal = signal.signal
was_exit = sys.exit
os.fork = mocker.mock_fork
os.wait = mocker.mock_wait
signal.signal = mocker.mock_signal
sys.exit = mocker.mock_exit
try:
my_auditor._sleep = mocker.mock_sleep_stop
my_auditor.run_once(zero_byte_fps=50)
@ -1280,6 +1292,12 @@ class TestAuditor(unittest.TestCase):
'ERROR auditing: %s', loop_error)
my_auditor.audit_loop = real_audit_loop
# sleep between ZBF scanner forks
self.assertRaises(StopForever, my_auditor.fork_child, True, True)
mocker.fork_called = 0
signal.signal = was_signal
sys.exit = was_exit
self.assertRaises(StopForever,
my_auditor.run_forever, zero_byte_fps=50)
self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 50)
@ -1306,11 +1324,11 @@ class TestAuditor(unittest.TestCase):
mocker.fork_called = 0
self.assertRaises(StopForever, my_auditor.run_forever)
# Fork is called 2 times since the zbf process is forked just
# once before self._sleep() is called and StopForever is raised
# Also wait is called just once before StopForever is raised
self.assertEqual(mocker.fork_called, 2)
self.assertEqual(mocker.wait_called, 1)
# Fork or Wait are called greate than or equal to 2 times in the
# main process. 2 times if zbf run once and 3 times if zbf run
# again
self.assertGreaterEqual(mocker.fork_called, 2)
self.assertGreaterEqual(mocker.wait_called, 2)
my_auditor._sleep = mocker.mock_sleep_continue
my_auditor.audit_loop = works_only_once(my_auditor.audit_loop,
@ -1320,13 +1338,13 @@ class TestAuditor(unittest.TestCase):
mocker.fork_called = 0
mocker.wait_called = 0
self.assertRaises(LetMeOut, my_auditor.run_forever)
# Fork is called no. of devices + (no. of devices)/2 + 1 times
# since zbf process is forked (no.of devices)/2 + 1 times
# Fork or Wait are called greater than or equal to
# no. of devices + (no. of devices)/2 + 1 times in main process
no_devices = len(os.listdir(self.devices))
self.assertEqual(mocker.fork_called, no_devices + no_devices / 2
+ 1)
self.assertEqual(mocker.wait_called, no_devices + no_devices / 2
+ 1)
self.assertGreaterEqual(mocker.fork_called, no_devices +
no_devices / 2 + 1)
self.assertGreaterEqual(mocker.wait_called, no_devices +
no_devices / 2 + 1)
finally:
os.fork = was_fork

View File

@ -326,7 +326,7 @@ class TestDiskFileModuleMethods(unittest.TestCase):
check_metadata()
# simulate a legacy diskfile that might have persisted unicode metadata
with mock.patch.object(diskfile, '_encode_metadata', lambda x: x):
with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, metadata)
# sanity check, while still mocked, that we did persist unicode
@ -334,8 +334,8 @@ class TestDiskFileModuleMethods(unittest.TestCase):
actual = diskfile.read_metadata(fd)
for k, v in actual.items():
if k == u'X-Object-Meta-Strange':
self.assertIsInstance(k, six.text_type)
self.assertIsInstance(v, six.text_type)
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
break
else:
self.fail('Did not find X-Object-Meta-Strange')
@ -375,17 +375,6 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects-2", "9971", "8eb",
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects-99", "9972",
"8eb",
"fcd938702024c25fef6c32fef05298eb"))
# the bad
os.makedirs(os.path.join(tmpdir, "sdq", "objects-", "1135",
"6c3",
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects-fud", "foo"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects-+1", "foo"))
self._make_file(os.path.join(tmpdir, "sdp", "objects", "1519",
"fed"))
@ -404,26 +393,18 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"4f9eee668b66c6f0250bfa3c7ab9e51e"))
logger = debug_logger()
locations = [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False,
logger=logger)]
locations.sort()
loc_generators = []
datadirs = ["objects", "objects-1"]
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
# expect some warnings about those bad dirs
warnings = logger.get_lines_for_level('warning')
self.assertEqual(set(warnings), set([
("Directory 'objects-' does not map to a valid policy "
"(Unknown policy, for index '')"),
("Directory 'objects-2' does not map to a valid policy "
"(Unknown policy, for index '2')"),
("Directory 'objects-99' does not map to a valid policy "
"(Unknown policy, for index '99')"),
("Directory 'objects-fud' does not map to a valid policy "
"(Unknown policy, for index 'fud')"),
("Directory 'objects-+1' does not map to a valid policy "
"(Unknown policy, for index '+1')"),
]))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
expected = \
[(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
@ -448,12 +429,19 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
self.assertEqual(locations, expected)
# Reset status file for next run
diskfile.clear_auditor_status(tmpdir)
for datadir in datadirs:
diskfile.clear_auditor_status(tmpdir, datadir)
# now without a logger
locations = [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False)]
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
self.assertEqual(locations, expected)
@ -470,7 +458,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=True)]
devices=tmpdir, datadir="objects", mount_check=True)]
locations.sort()
self.assertEqual(
@ -485,7 +473,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=True, logger=logger)]
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping sdq as it is not mounted',
@ -502,7 +491,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False)]
devices=tmpdir, datadir="objects", mount_check=False)]
self.assertEqual(
locations,
@ -516,30 +505,22 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False, logger=logger)]
devices=tmpdir, datadir="objects", mount_check=False,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping garbage as it is not a dir',
], debug_lines)
logger.clear()
with mock_check_drive(isdir=True):
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False, logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping %s: Not a directory' % os.path.join(
tmpdir, "garbage"),
], debug_lines)
logger.clear()
with mock_check_drive() as mocks:
mocks['ismount'].side_effect = lambda path: (
False if path.endswith('garbage') else True)
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=True, logger=logger)]
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping garbage as it is not mounted',
@ -550,10 +531,10 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
# so that errors get logged and a human can see what's going wrong;
# only normal FS corruption should be skipped over silently.
def list_locations(dirname):
def list_locations(dirname, datadir):
return [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=dirname, mount_check=False)]
devices=dirname, datadir=datadir, mount_check=False)]
real_listdir = os.listdir
@ -570,30 +551,34 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"2607", "b54",
"fe450ec990a88cc4b252b181bab04b54"))
with mock.patch('os.listdir', splode_if_endswith("sdf/objects")):
self.assertRaises(OSError, list_locations, tmpdir)
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("2607")):
self.assertRaises(OSError, list_locations, tmpdir)
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("b54")):
self.assertRaises(OSError, list_locations, tmpdir)
self.assertRaises(OSError, list_locations, tmpdir, "objects")
def test_auditor_status(self):
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "2", "a", "b"))
datadir = "objects"
# Pretend that some time passed between each partition
with mock.patch('os.stat') as mock_stat, \
mock_check_drive(isdir=True):
mock_stat.return_value.st_mtime = time() - 60
# Auditor starts, there are two partitions to check
gen = diskfile.object_audit_location_generator(tmpdir, False)
gen = diskfile.object_audit_location_generator(tmpdir,
datadir,
False)
gen.next()
gen.next()
# Auditor stopped for some reason without raising StopIterator in
# the generator and restarts There is now only one remaining
# partition to check
gen = diskfile.object_audit_location_generator(tmpdir, False)
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
gen.next()
@ -602,17 +587,19 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
# There are no partitions to check if the auditor restarts another
# time and the status files have not been cleared
gen = diskfile.object_audit_location_generator(tmpdir, False)
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
self.assertRaises(StopIteration, gen.next)
# Reset status file
diskfile.clear_auditor_status(tmpdir)
diskfile.clear_auditor_status(tmpdir, datadir)
# If the auditor restarts another time, we expect to
# check two partitions again, because the remaining
# partitions were empty and a new listdir was executed
gen = diskfile.object_audit_location_generator(tmpdir, False)
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
gen.next()
gen.next()
@ -985,7 +972,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
self.df_mgr.logger.increment.assert_called_with('async_pendings')
def test_object_audit_location_generator(self):
locations = list(self.df_mgr.object_audit_location_generator())
locations = list(
self.df_mgr.object_audit_location_generator(POLICIES[0]))
self.assertEqual(locations, [])
def test_replication_one_per_device_deprecation(self):

View File

@ -19,6 +19,7 @@ from test.unit import FakeRing, mocked_http_conn, debug_logger
from tempfile import mkdtemp
from shutil import rmtree
from collections import defaultdict
from copy import deepcopy
import mock
import six
@ -68,8 +69,8 @@ class FakeInternalClient(object):
def iter_containers(self, account, prefix=''):
acc_dict = self.aco_dict[account]
return [{'name': six.text_type(container)} for container in
acc_dict if container.startswith(prefix)]
return sorted([{'name': six.text_type(container)} for container in
acc_dict if container.startswith(prefix)])
def delete_container(*a, **kw):
pass
@ -100,6 +101,41 @@ class TestObjectExpirer(TestCase):
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
self.past_time = str(int(time() - 86400))
self.future_time = str(int(time() + 86400))
# Dummy task queue for test
self.fake_swift = FakeInternalClient({
'.expiring_objects': {
# this task container will be checked
self.past_time: [
# tasks ready for execution
self.past_time + '-a0/c0/o0',
self.past_time + '-a1/c1/o1',
self.past_time + '-a2/c2/o2',
self.past_time + '-a3/c3/o3',
self.past_time + '-a4/c4/o4',
self.past_time + '-a5/c5/o5',
self.past_time + '-a6/c6/o6',
self.past_time + '-a7/c7/o7',
# task objects for unicode test
self.past_time + u'-a8/c8/o8\u2661',
self.past_time + u'-a9/c9/o9\xf8',
# this task will be skipped
self.future_time + '-a10/c10/o10'],
# this task container will be skipped
self.future_time: [
self.future_time + '-a11/c11/o11']}
})
self.expirer = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
# target object paths which should be expirerd now
self.expired_target_path_list = [
'a0/c0/o0', 'a1/c1/o1', 'a2/c2/o2', 'a3/c3/o3', 'a4/c4/o4',
'a5/c5/o5', 'a6/c6/o6', 'a7/c7/o7',
'a8/c8/o8\xe2\x99\xa1', 'a9/c9/o9\xc3\xb8',
]
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
@ -215,21 +251,12 @@ class TestObjectExpirer(TestCase):
self.deleted_objects = {}
def delete_object(self, target_path, delete_timestamp,
task_container, task_object):
task_account, task_container, task_object):
if task_container not in self.deleted_objects:
self.deleted_objects[task_container] = set()
self.deleted_objects[task_container].add(task_object)
aco_dict = {
'.expiring_objects': {
'0': set('1-a/c/one 2-a/c/two 3-a/c/three'.split()),
'1': set('2-a/c/two 3-a/c/three 4-a/c/four'.split()),
'2': set('5-a/c/five 6-a/c/six'.split()),
'3': set(u'7-a/c/seven\u2661'.split()),
},
}
fake_swift = FakeInternalClient(aco_dict)
x = ObjectExpirer(self.conf, swift=fake_swift)
x = ObjectExpirer(self.conf, swift=self.fake_swift)
deleted_objects = defaultdict(set)
for i in range(3):
@ -240,15 +267,22 @@ class TestObjectExpirer(TestCase):
for task_container, deleted in x.deleted_objects.items():
self.assertFalse(deleted_objects[task_container] & deleted)
deleted_objects[task_container] |= deleted
self.assertEqual(aco_dict['.expiring_objects']['3'].pop(),
deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(aco_dict['.expiring_objects'], deleted_objects)
# sort for comparison
deleted_objects = {
con: sorted(o_set) for con, o_set in deleted_objects.items()}
expected = {
self.past_time: [
self.past_time + '-' + target_path
for target_path in self.expired_target_path_list]}
self.assertEqual(deleted_objects, expected)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
account = 'account'
container = 'container'
obj = 'obj'
@ -266,12 +300,12 @@ class TestObjectExpirer(TestCase):
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, container, obj)
x.delete_object(actual_obj, ts, account, container, obj)
delete_actual.assert_called_once_with(actual_obj, ts)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(container, obj)
pop_queue.assert_called_once_with(account, container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
@ -281,11 +315,12 @@ class TestObjectExpirer(TestCase):
if isinstance(exc, internal_client.UnexpectedResponse):
self.assertEqual(
log_lines[0],
'Unexpected response while deleting object container '
'obj: %s' % exc.resp.status_int)
'Unexpected response while deleting object '
'account container obj: %s' % exc.resp.status_int)
else:
self.assertTrue(log_lines[0].startswith(
'Exception while deleting object container obj'))
'Exception while deleting object '
'account container obj'))
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
@ -322,65 +357,83 @@ class TestObjectExpirer(TestCase):
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_round_robin_order(self):
def make_task(delete_at, target):
return {
'task_container': delete_at,
'task_object': delete_at + '-' + target,
'delete_timestamp': Timestamp(delete_at),
'target_path': target,
}
def test_parse_task_obj(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
def assert_parse_task_obj(task_obj, expected_delete_at,
expected_account, expected_container,
expected_obj):
delete_at, account, container, obj = x.parse_task_obj(task_obj)
self.assertEqual(delete_at, expected_delete_at)
self.assertEqual(account, expected_account)
self.assertEqual(container, expected_container)
self.assertEqual(obj, expected_obj)
assert_parse_task_obj('0000-a/c/o', 0, 'a', 'c', 'o')
assert_parse_task_obj('0001-a/c/o', 1, 'a', 'c', 'o')
assert_parse_task_obj('1000-a/c/o', 1000, 'a', 'c', 'o')
assert_parse_task_obj('0000-acc/con/obj', 0, 'acc', 'con', 'obj')
def make_task(self, delete_at, target):
return {
'task_account': '.expiring_objects',
'task_container': delete_at,
'task_object': delete_at + '-' + target,
'delete_timestamp': Timestamp(delete_at),
'target_path': target,
}
def test_round_robin_order(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
task_con_obj_list = [
# objects in 0000 timestamp container
make_task('0000', 'a/c0/o0'),
make_task('0000', 'a/c0/o1'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
make_task('0001', 'a/c1/o0'),
make_task('0001', 'a/c1/o1'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
make_task('0002', 'a/c2/o0'),
make_task('0002', 'a/c2/o1'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# sorted by popping one object to delete for each target_container
expected = [
make_task('0000', 'a/c0/o0'),
make_task('0001', 'a/c1/o0'),
make_task('0002', 'a/c2/o0'),
make_task('0000', 'a/c0/o1'),
make_task('0001', 'a/c1/o1'),
make_task('0002', 'a/c2/o1'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# task containers have some task objects with invalid target paths
task_con_obj_list = [
# objects in 0000 timestamp container
make_task('0000', 'invalid0'),
make_task('0000', 'a/c0/o0'),
make_task('0000', 'a/c0/o1'),
self.make_task('0000', 'invalid0'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
make_task('0001', 'a/c1/o0'),
make_task('0001', 'invalid1'),
make_task('0001', 'a/c1/o1'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'invalid1'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
make_task('0002', 'a/c2/o0'),
make_task('0002', 'a/c2/o1'),
make_task('0002', 'invalid2'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
self.make_task('0002', 'invalid2'),
]
result = list(x.round_robin_order(task_con_obj_list))
# the invalid task objects are ignored
expected = [
make_task('0000', 'a/c0/o0'),
make_task('0001', 'a/c1/o0'),
make_task('0002', 'a/c2/o0'),
make_task('0000', 'a/c0/o1'),
make_task('0001', 'a/c1/o1'),
make_task('0002', 'a/c2/o1'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
@ -388,57 +441,87 @@ class TestObjectExpirer(TestCase):
# the same timestamp container
task_con_obj_list = [
# objects in 0000 timestamp container
make_task('0000', 'a/c0/o0'),
make_task('0000', 'a/c0/o1'),
make_task('0000', 'a/c2/o2'),
make_task('0000', 'a/c2/o3'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c2/o3'),
# objects in 0001 timestamp container
make_task('0001', 'a/c0/o2'),
make_task('0001', 'a/c0/o3'),
make_task('0001', 'a/c1/o0'),
make_task('0001', 'a/c1/o1'),
self.make_task('0001', 'a/c0/o2'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
make_task('0002', 'a/c2/o0'),
make_task('0002', 'a/c2/o1'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# so we go around popping by *target* container, not *task* container
expected = [
make_task('0000', 'a/c0/o0'),
make_task('0001', 'a/c1/o0'),
make_task('0000', 'a/c2/o2'),
make_task('0000', 'a/c0/o1'),
make_task('0001', 'a/c1/o1'),
make_task('0000', 'a/c2/o3'),
make_task('0001', 'a/c0/o2'),
make_task('0002', 'a/c2/o0'),
make_task('0001', 'a/c0/o3'),
make_task('0002', 'a/c2/o1'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0000', 'a/c2/o3'),
self.make_task('0001', 'a/c0/o2'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# all of the work to be done could be for different target containers
task_con_obj_list = [
# objects in 0000 timestamp container
make_task('0000', 'a/c0/o'),
make_task('0000', 'a/c1/o'),
make_task('0000', 'a/c2/o'),
make_task('0000', 'a/c3/o'),
self.make_task('0000', 'a/c0/o'),
self.make_task('0000', 'a/c1/o'),
self.make_task('0000', 'a/c2/o'),
self.make_task('0000', 'a/c3/o'),
# objects in 0001 timestamp container
make_task('0001', 'a/c4/o'),
make_task('0001', 'a/c5/o'),
make_task('0001', 'a/c6/o'),
make_task('0001', 'a/c7/o'),
self.make_task('0001', 'a/c4/o'),
self.make_task('0001', 'a/c5/o'),
self.make_task('0001', 'a/c6/o'),
self.make_task('0001', 'a/c7/o'),
# objects in 0002 timestamp container
make_task('0002', 'a/c8/o'),
make_task('0002', 'a/c9/o'),
self.make_task('0002', 'a/c8/o'),
self.make_task('0002', 'a/c9/o'),
]
result = list(x.round_robin_order(task_con_obj_list))
# in which case, we kind of hammer the task containers
self.assertEqual(task_con_obj_list, result)
def test_hash_mod(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
mod_count = [0, 0, 0]
for i in range(1000):
name = 'obj%d' % i
mod = x.hash_mod(name, 3)
mod_count[mod] += 1
# 1000 names are well shuffled
self.assertGreater(mod_count[0], 300)
self.assertGreater(mod_count[1], 300)
self.assertGreater(mod_count[2], 300)
def test_iter_task_accounts_to_expire(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 0, 1)])
self.conf['processes'] = '2'
self.conf['process'] = '1'
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 1, 2)])
def test_delete_at_time_of_task_container(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
self.assertEqual(x.delete_at_time_of_task_container('0000'), 0)
self.assertEqual(x.delete_at_time_of_task_container('0001'), 1)
self.assertEqual(x.delete_at_time_of_task_container('1000'), 1000)
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
@ -450,165 +533,128 @@ class TestObjectExpirer(TestCase):
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
fake_swift = FakeInternalClient({})
with mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(
self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'2 possible containers; 12 possible objects',
'Pass completed in 0s; 10 objects expired',
])
def test_skip_task_account_without_task_container(self):
fake_swift = FakeInternalClient({
# task account has no containers
'.expiring_objects': dict()
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 0 possible containers; 0 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_once_unicode_problem(self):
fake_swift = FakeInternalClient({
'.expiring_objects': {u'1234': [u'1234-a/c/troms\xf8']}
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
def test_iter_task_to_expire(self):
# In this test, all tasks are assigned to the tested expirer
my_index = 0
divisor = 1
task_account_container_list = [('.expiring_objects', self.past_time)]
expected = [
self.make_task(self.past_time, target_path)
for target_path in self.expired_target_path_list]
self.assertEqual(
list(self.expirer.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
# the task queue has invalid task object
invalid_aco_dict = deepcopy(self.fake_swift.aco_dict)
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
0, self.past_time + '-invalid0')
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
5, self.past_time + '-invalid1')
invalid_fake_swift = FakeInternalClient(invalid_aco_dict)
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=invalid_fake_swift)
# but the invalid tasks are skipped
self.assertEqual(
list(x.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
def test_run_once_unicode_problem(self):
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(200, 200, 200, give_connect=capture_requests):
x.run_once()
self.assertEqual(len(requests), 3)
# 3 DELETE requests for each 10 executed task objects to pop_queue
code_list = [200] * 3 * 10
with mocked_http_conn(*code_list, give_connect=capture_requests):
self.expirer.run_once()
self.assertEqual(len(requests), 30)
def test_container_timestamp_break(self):
def fail_to_iter_objects(*a, **kw):
raise Exception('This should not have been called')
with mock.patch.object(self.fake_swift, 'iter_objects') as mock_method:
self.expirer.run_once()
fake_swift = FakeInternalClient({
'.expiring_objects': {str(int(time() + 86400)): []}
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
with mock.patch.object(fake_swift, 'iter_objects',
fail_to_iter_objects):
x.run_once()
logs = x.logger.all_log_lines()
self.assertEqual(logs['info'], [
'Pass beginning; 1 possible containers; 0 possible objects',
'Pass completed in 0s; 0 objects expired',
])
self.assertNotIn('error', logs)
# Reverse test to be sure it still would blow up the way expected.
fake_swift = FakeInternalClient({
'.expiring_objects': {str(int(time() - 86400)): []}
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
with mock.patch.object(fake_swift, 'iter_objects',
fail_to_iter_objects):
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'), ['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][-1]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'This should not have been called')
# iter_objects is called only for past_time, not future_time
self.assertEqual(mock_method.call_args_list,
[mock.call('.expiring_objects', self.past_time)])
def test_object_timestamp_break(self):
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
with mock.patch.object(self.expirer, 'delete_actual_object') \
as mock_method, \
mock.patch.object(self.expirer, 'pop_queue'):
self.expirer.run_once()
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): [
'%d-a/c/actual-obj' % int(time() + 86400)],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertNotIn('error', x.logger.all_log_lines())
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 1 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): ['%d-a/c/actual-obj' % ts],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
# executed tasks are with past time
self.assertEqual(
x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-a/c/actual-obj '
'This should not have been called: ' % (ts, ts)])
mock_method.call_args_list,
[mock.call(target_path, self.past_time)
for target_path in self.expired_target_path_list])
def test_failed_delete_keeps_entry(self):
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
# any tasks are not done
with mock.patch.object(self.expirer, 'delete_actual_object',
deliberately_blow_up), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
ts = int(time() - 86400)
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): ['%d-a/c/actual-obj' % ts],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-a/c/actual-obj '
'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 1 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# no tasks are popped from the queue
self.assertEqual(mock_method.call_args_list, [])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): ['%d-a/c/actual-obj' % ts],
},
})
self.logger._clear()
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
# all tasks are done
with mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t: None), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
# all tasks are popped from the queue
self.assertEqual(
self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-a/c/actual-obj This '
'should not have been called: ' % (ts, ts)])
mock_method.call_args_list,
[mock.call('.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_path_list])
def test_success_gets_counted(self):
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): [
'%d-acc/c/actual-obj' % int(time() - 86400)],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'),
['Pass beginning; 1 possible containers; 1 possible objects',
'Pass completed in 0s; 1 objects expired'])
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0), \
mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t: None), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
def test_delete_actual_object_does_not_get_unicode(self):
got_unicode = [False]
@ -617,24 +663,15 @@ class TestObjectExpirer(TestCase):
if isinstance(actual_obj, six.text_type):
got_unicode[0] = True
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(int(time() - 86400)): [
'%d-a/c/actual-obj' % int(time() - 86400)],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 1 possible objects',
'Pass completed in 0s; 1 objects expired',
])
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch.object(self.expirer, 'delete_actual_object',
delete_actual_object_test_for_unicode), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
@ -644,39 +681,26 @@ class TestObjectExpirer(TestCase):
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
cts = int(time() - 86400)
ots = int(time() - 86400)
with mock.patch.object(self.fake_swift, 'delete_container',
fail_delete_container), \
mock.patch.object(self.expirer, 'delete_actual_object',
fail_delete_actual_object):
self.expirer.run_once()
fake_swift = FakeInternalClient({
'.expiring_objects': {
str(cts): [
'%d-a/c/actual-obj' % ots, '%d-a/c/next-obj' % ots],
str(cts + 1): [
'%d-a/c/actual-obj' % ots, '%d-a/c/next-obj' % ots],
},
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = fail_delete_actual_object
with mock.patch.object(fake_swift, 'delete_container',
fail_delete_container):
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-a/c/actual-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-a/c/next-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-a/c/actual-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-a/c/next-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
'container: ' % (cts + 1,)]))
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 2 possible containers; 4 possible objects',
error_lines = self.expirer.logger.get_lines_for_level('error')
self.assertEqual(error_lines, [
'Exception while deleting object %s %s %s '
'failed to delete actual object: ' % (
'.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_path_list] + [
'Exception while deleting container %s %s '
'failed to delete container: ' % (
'.expiring_objects', self.past_time)])
self.assertEqual(self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'2 possible containers; 12 possible objects',
'Pass completed in 0s; 0 objects expired',
])
@ -848,13 +872,13 @@ class TestObjectExpirer(TestCase):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('c', 'o')
x.pop_queue('a', 'c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, '.expiring_objects')
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')

View File

@ -27,10 +27,11 @@ from collections import defaultdict
from errno import ENOENT, ENOTEMPTY, ENOTDIR
from eventlet.green import subprocess
from eventlet import Timeout
from eventlet import Timeout, sleep
from test.unit import (debug_logger, patch_policies, make_timestamp_iter,
mocked_http_conn, mock_check_drive, skip_if_no_xattrs)
mocked_http_conn, mock_check_drive, skip_if_no_xattrs,
SkipTest)
from swift.common import utils
from swift.common.utils import (hash_path, mkdirs, normalize_timestamp,
storage_directory)
@ -132,6 +133,32 @@ def _mock_process(ret):
object_replicator.subprocess.Popen = orig_process
class MockHungProcess(object):
def __init__(self, *args, **kwargs):
class MockStdout(object):
def read(self):
pass
self.stdout = MockStdout()
self._state = 'running'
self._calls = []
def wait(self):
self._calls.append(('wait', self._state))
if self._state == 'running':
# Sleep so we trip either the lockup detector or the rsync timeout
sleep(1)
raise BaseException('You need to mock out some timeouts')
def terminate(self):
self._calls.append(('terminate', self._state))
if self._state == 'running':
self._state = 'terminating'
def kill(self):
self._calls.append(('kill', self._state))
self._state = 'killed'
def _create_test_rings(path, devs=None, next_part_power=None):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
@ -2009,6 +2036,68 @@ class TestObjectReplicator(unittest.TestCase):
self.assertIn(
"next_part_power set in policy 'one'. Skipping", warnings)
def test_replicate_lockup_detector(self):
raise SkipTest("this is not a reliable test and must be fixed")
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write('1234567890')
f.close()
mock_procs = []
def new_mock(*a, **kw):
proc = MockHungProcess()
mock_procs.append(proc)
return proc
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)), \
mock.patch.object(self.replicator, 'lockup_timeout', 0.01), \
mock.patch('eventlet.green.subprocess.Popen', new_mock):
self.replicator.replicate()
for proc in mock_procs:
self.assertEqual(proc._calls, [
('wait', 'running'),
('terminate', 'running'),
('wait', 'terminating'),
])
self.assertEqual(len(mock_procs), 1)
def test_replicate_rsync_timeout(self):
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write('1234567890')
f.close()
mock_procs = []
def new_mock(*a, **kw):
proc = MockHungProcess()
mock_procs.append(proc)
return proc
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)), \
mock.patch.object(self.replicator, 'rsync_timeout', 0.01), \
mock.patch('eventlet.green.subprocess.Popen', new_mock):
self.replicator.replicate()
for proc in mock_procs:
self.assertEqual(proc._calls, [
('wait', 'running'),
('kill', 'running'),
('wait', 'killed'),
])
self.assertEqual(len(mock_procs), 2)
if __name__ == '__main__':
unittest.main()

View File

@ -6064,6 +6064,68 @@ class TestObjectController(unittest.TestCase):
except OSError as err:
self.assertEqual(err.errno, errno.ENOENT)
def test_x_if_delete_at_formats(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
delete_time = test_time + 1
delete_at_timestamp = int(test_time + 10000)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
def do_test(if_delete_at, expected_status):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(delete_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'X-If-Delete-At': if_delete_at})
# Again, we don't care about async_pending files (for this test)
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, expected_status)
# Clean up the tombstone
objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=policy)
files = os.listdir(objfile._datadir)
self.assertEqual(len(files), 1,
'Expected to find one file, got %r' % files)
if expected_status == 204:
self.assertTrue(files[0].endswith('.ts'),
'Expected a tombstone, found %r' % files[0])
else:
self.assertTrue(files[0].endswith('.data'),
'Expected a data file, found %r' % files[0])
os.unlink(os.path.join(objfile._datadir, files[0]))
# More as a reminder than anything else
self.assertIsInstance(delete_at_timestamp, int)
do_test(str(delete_at_timestamp), 204)
do_test(str(delete_at_timestamp) + ':', 400)
do_test(Timestamp(delete_at_timestamp).isoformat, 400)
do_test(Timestamp(delete_at_timestamp).normal, 204)
do_test(Timestamp(delete_at_timestamp, delta=1).normal, 412)
do_test(Timestamp(delete_at_timestamp, delta=-1).normal, 412)
do_test(Timestamp(delete_at_timestamp, offset=1).internal, 412)
do_test(Timestamp(delete_at_timestamp, offset=15).internal, 412)
def test_DELETE_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)

View File

@ -25,6 +25,6 @@ function is_rhel7 {
if is_rhel7; then
# Install CentOS OpenStack repos so that we have access to some extra
# packages.
sudo yum install -y centos-release-openstack-pike
sudo yum install -y centos-release-openstack-queens
sudo yum install -y liberasurecode-devel
fi

13
tox.ini
View File

@ -29,6 +29,10 @@ setenv = VIRTUAL_ENV={envdir}
[testenv:py34]
commands =
nosetests \
test/unit/cli/test_dispersion_report.py \
test/unit/cli/test_form_signature.py \
test/unit/cli/test_info.py \
test/unit/cli/test_relinker.py \
test/unit/cli/test_ring_builder_analyzer.py \
test/unit/cli/test_ringbuilder.py \
test/unit/common/ring \
@ -36,10 +40,12 @@ commands =
test/unit/common/test_exceptions.py \
test/unit/common/test_header_key_dict.py \
test/unit/common/test_linkat.py \
test/unit/common/test_memcached.py \
test/unit/common/test_manager.py \
test/unit/common/test_splice.py \
test/unit/common/test_storage_policy.py \
test/unit/common/test_utils.py
test/unit/common/test_utils.py \
test/unit/common/test_wsgi.py
[testenv:py35]
commands = {[testenv:py34]commands}
@ -71,6 +77,11 @@ commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
[testenv:func-domain-remap-staticweb]
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
[testenv:func-ec]
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1