Merge 'remotes/origin/master' into feature/crypto

Change-Id: I4bf6297bcade7db29c9a8eb8eeaeb8f2101b1666
This commit is contained in:
Alistair Coles 2015-03-16 13:44:22 +00:00
commit 555ff2631e
25 changed files with 616 additions and 169 deletions

View File

@ -89,11 +89,11 @@ Developer Tools
---------------
* `vagrant-swift-all-in-one
<https://github.com/swiftstack/vagrant-swift-all-in-one>`_ - Quickly setup a
standard development using Vagrant and chef cookbooks in an Ubuntu virtual
machine.
standard development environment using Vagrant and Chef cookbooks in an
Ubuntu virtual machine.
* `SAIO Ansible playbook <https://github.com/thiagodasilva/swift-aio>`_ -
Quickly setup a standard develop enviornment using Vagrant and ansible in a
Fedora virtual machine (with built-in `Swift-on-File
Quickly setup a standard development environment using Vagrant and Ansible in
a Fedora virtual machine (with built-in `Swift-on-File
<https://github.com/stackforge/swiftonfile>`_ support).
Other

View File

@ -2,9 +2,28 @@
Instructions for a Multiple Server Swift Installation
=====================================================
Please refer to the installation guides at:
http://docs.openstack.org, including
* Debian 7.0: http://docs.openstack.org/icehouse/install-guide/install/apt-debian/content/ch_swift.html
* openSUSE and SUSE Linux Enterprise Server: http://docs.openstack.org/icehouse/install-guide/install/zypper/content/ch_swift.html
* Red Hat Enterprise Linux, CentOS, and Fedora: http://docs.openstack.org/icehouse/install-guide/install/yum/content/ch_swift.html
* Ubuntu 12.04/14.04: http://docs.openstack.org/trunk/install-guide/install/apt/content/ch_swift.html
Please refer to the latest offical
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
for the most up-to-date documentation.
Object Storage installation guide for Openstack Juno
----------------------------------------------------
* `openSUSE 13.1 and SUSE Linux Enterprise Server 11 <http://docs.openstack.org/juno/install-guide/install/zypper/content/ch_swift.html>`_
* `RHEL 7, CentOS 7, and Fedora 20 <http://docs.openstack.org/juno/install-guide/install/yum/content/ch_swift.html>`_
* `Ubuntu 14.04 <http://docs.openstack.org/juno/install-guide/install/apt/content/ch_swift.html>`_
Object Storage installation guide for Openstack Icehouse
--------------------------------------------------------
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/icehouse/install-guide/install/zypper/content/ch_swift.html>`_
* `Red Hat Enterprise Linux, CentOS, and Fedora <http://docs.openstack.org/icehouse/install-guide/install/yum/content/ch_swift.html>`_
* `Ubuntu 12.04/14.04 (LTS) <http://docs.openstack.org/icehouse/install-guide/install/apt/content/ch_swift.html>`_
Object Storage installation guide for Openstack Havana
------------------------------------------------------
* `Debian 7.0 <http://docs.openstack.org/havana/install-guide/install/apt-debian/content/ch_swift.html>`_
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/havana/install-guide/install/zypper/content/ch_swift.html>`_
* `Red Hat Enterprise Linux, CentOS, and Fedora <http://docs.openstack.org/havana/install-guide/install/yum/content/ch_swift.html>`_
* `Ubuntu 12.04 (LTS) <http://docs.openstack.org/havana/install-guide/install/apt/content/ch_swift.html>`_

View File

@ -109,6 +109,34 @@ class Scout(object):
url, content, status = self.scout_host(base_url, self.recon_type)
return url, content, status
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().getheader('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except urllib2.URLError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
@ -334,6 +362,29 @@ class SwiftRecon(object):
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def server_type_check(self, hosts):
"""
Check for server types on the ring
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
errors = {}
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Validating server type '%s' on %s hosts..." %
(self._ptime(), self.server_type, len(hosts)))
for url, response, status in self.pool.imap(
recon.scout_server_type, hosts):
if status == 200:
if response != self.server_type + '-server':
errors[url] = response
print("%s/%s hosts ok, %s error[s] while checking hosts." % (
len(hosts) - len(errors), len(hosts), len(errors)))
for host in errors:
print("Invalid: %s is %s" % (host, errors[host]))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
@ -872,6 +923,8 @@ class SwiftRecon(object):
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--validate-servers', action="store_true",
help="Validate servers on the ring")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
@ -938,6 +991,7 @@ class SwiftRecon(object):
self.get_ringmd5(hosts, swift_dir)
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
else:
if options.async:
if self.server_type == 'object':
@ -966,6 +1020,8 @@ class SwiftRecon(object):
self.expirer_check(hosts)
else:
print("Error: Can't check expired on non object servers.")
if options.validate_servers:
self.server_type_check(hosts)
if options.loadstats:
self.load_check(hosts)
if options.diskusage:

View File

@ -338,12 +338,12 @@ class DatabaseBroker(object):
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path)
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)

View File

@ -59,12 +59,12 @@ def quarantine_db(object_file, server_type):
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir)
renamer(object_dir, quarantine_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir)
renamer(object_dir, quarantine_dir, fsync=False)
def roundrobin_datadirs(datadirs):

View File

@ -32,7 +32,7 @@ from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.middleware.acl import (
clean_acl, parse_acl, referrer_allowed, acls_from_account_info)
from swift.common.utils import cache_from_env, get_logger, \
split_path, config_true_value
split_path, config_true_value, register_swift_info
from swift.common.utils import config_read_reseller_options
from swift.proxy.controllers.base import get_account_info
@ -769,6 +769,7 @@ def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('tempauth', account_acls=True)
def auth_filter(app):
return TempAuth(app, conf)

View File

@ -851,10 +851,8 @@ class RingBuilder(object):
# Only allow a part to be gathered if there are wanted
# parts on other tiers.
if (rep_at_tier > max_allowed_replicas[tier] and
self._last_part_moves[part] >=
self.min_part_hours and
available_parts_for_tier > 0):
if (self._last_part_moves[part] >= self.min_part_hours
and available_parts_for_tier > 0):
self._last_part_moves[part] = 0
spread_out_parts[part].append(replica)
dev['parts_wanted'] += 1

View File

@ -645,6 +645,27 @@ def fdatasync(fd):
fsync(fd)
def fsync_dir(dirpath):
"""
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced.
"""
dirfd = None
try:
dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY)
fsync(dirfd)
except OSError as err:
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warn(_("Unable to perform fsync() on directory %s: %s"),
dirpath, os.strerror(err.errno))
finally:
if dirfd:
os.close(dirfd)
def drop_buffer_cache(fd, offset, length):
"""
Drop 'buffer' cache for the given range of the given file.
@ -856,20 +877,66 @@ def mkdirs(path):
raise
def renamer(old, new):
def makedirs_count(path, count=0):
"""
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
count = makedirs_count(head, count)
if tail == os.path.curdir:
return
try:
os.mkdir(path)
except OSError as e:
# EEXIST may also be raised if path exists as a file
# Do not let that pass.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
else:
count += 1
return count
def renamer(old, new, fsync=True):
"""
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories.
"""
dirpath = os.path.dirname(new)
try:
mkdirs(os.path.dirname(new))
count = makedirs_count(dirpath)
os.rename(old, new)
except OSError:
mkdirs(os.path.dirname(new))
count = makedirs_count(dirpath)
os.rename(old, new)
if fsync:
# If count=0, no new directories were created. But we still need to
# fsync leaf dir after os.rename().
# If count>0, starting from leaf dir, fsync parent dirs of all
# directories created by makedirs_count()
for i in range(0, count + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath)
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
@ -2490,7 +2557,7 @@ def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2):
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
delete=False) as tf:
tf.write(json.dumps(cache_entry) + '\n')
os.rename(tf.name, cache_file)
renamer(tf.name, cache_file, fsync=False)
finally:
try:
os.unlink(tf.name)

View File

@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: swift 2.2.2.post63\n"
"Project-Id-Version: swift 2.2.2.post96\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-02-16 06:30+0000\n"
"POT-Creation-Date: 2015-02-27 06:14+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@ -63,7 +63,7 @@ msgstr ""
msgid "ERROR Could not get account info %s"
msgstr ""
#: swift/account/reaper.py:133 swift/common/utils.py:1992
#: swift/account/reaper.py:133 swift/common/utils.py:2058
#: swift/obj/diskfile.py:468 swift/obj/updater.py:87 swift/obj/updater.py:130
#, python-format
msgid "Skipping %s as it is not mounted"
@ -142,19 +142,19 @@ msgstr ""
msgid "Account %s has not been reaped since %s"
msgstr ""
#: swift/account/reaper.py:348 swift/account/reaper.py:392
#: swift/account/reaper.py:454 swift/container/updater.py:306
#: swift/account/reaper.py:348 swift/account/reaper.py:396
#: swift/account/reaper.py:463 swift/container/updater.py:306
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr ""
#: swift/account/reaper.py:364
#: swift/account/reaper.py:368
#, python-format
msgid "Exception with objects for container %(container)s for account %(account)s"
msgstr ""
#: swift/account/server.py:275 swift/container/server.py:582
#: swift/obj/server.py:723
#: swift/obj/server.py:730
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr ""
@ -377,95 +377,95 @@ msgstr ""
msgid "ERROR: An error occurred while retrieving segments"
msgstr ""
#: swift/common/utils.py:322
#: swift/common/utils.py:388
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
#: swift/common/utils.py:512
#: swift/common/utils.py:578
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
#: swift/common/utils.py:939
#: swift/common/utils.py:1005
msgid "STDOUT: Connection reset by peer"
msgstr ""
#: swift/common/utils.py:941 swift/common/utils.py:944
#: swift/common/utils.py:1007 swift/common/utils.py:1010
#, python-format
msgid "STDOUT: %s"
msgstr ""
#: swift/common/utils.py:1179
#: swift/common/utils.py:1245
msgid "Connection refused"
msgstr ""
#: swift/common/utils.py:1181
#: swift/common/utils.py:1247
msgid "Host unreachable"
msgstr ""
#: swift/common/utils.py:1183
#: swift/common/utils.py:1249
msgid "Connection timeout"
msgstr ""
#: swift/common/utils.py:1485
#: swift/common/utils.py:1551
msgid "UNCAUGHT EXCEPTION"
msgstr ""
#: swift/common/utils.py:1540
#: swift/common/utils.py:1606
msgid "Error: missing config path argument"
msgstr ""
#: swift/common/utils.py:1545
#: swift/common/utils.py:1611
#, python-format
msgid "Error: unable to locate %s"
msgstr ""
#: swift/common/utils.py:1853
#: swift/common/utils.py:1919
#, python-format
msgid "Unable to read config from %s"
msgstr ""
#: swift/common/utils.py:1859
#: swift/common/utils.py:1925
#, python-format
msgid "Unable to find %s config section in %s"
msgstr ""
#: swift/common/utils.py:2213
#: swift/common/utils.py:2279
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr ""
#: swift/common/utils.py:2218
#: swift/common/utils.py:2284
#, python-format
msgid "No realm key for %r"
msgstr ""
#: swift/common/utils.py:2222
#: swift/common/utils.py:2288
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr ""
#: swift/common/utils.py:2231
#: swift/common/utils.py:2297
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr ""
#: swift/common/utils.py:2235
#: swift/common/utils.py:2301
msgid "Path required in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2238
#: swift/common/utils.py:2304
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2243
#: swift/common/utils.py:2309
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2435
#: swift/common/utils.py:2501
msgid "Exception dumping recon cache"
msgstr ""
@ -697,8 +697,8 @@ msgstr ""
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr ""
#: swift/container/updater.py:91 swift/obj/replicator.py:479
#: swift/obj/replicator.py:565
#: swift/container/updater.py:91 swift/obj/replicator.py:483
#: swift/obj/replicator.py:569
#, python-format
msgid "%s is not mounted"
msgstr ""
@ -907,88 +907,88 @@ msgstr ""
msgid "Removing %s objects"
msgstr ""
#: swift/obj/replicator.py:281
#, python-format
msgid "Removing partition: %s"
msgstr ""
#: swift/obj/replicator.py:285
msgid "Error syncing handoff partition"
msgstr ""
#: swift/obj/replicator.py:342
#: swift/obj/replicator.py:291
#, python-format
msgid "Removing partition: %s"
msgstr ""
#: swift/obj/replicator.py:346
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr ""
#: swift/obj/replicator.py:347
#: swift/obj/replicator.py:351
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr ""
#: swift/obj/replicator.py:382
#: swift/obj/replicator.py:386
#, python-format
msgid "Error syncing with node: %s"
msgstr ""
#: swift/obj/replicator.py:386
#: swift/obj/replicator.py:390
msgid "Error syncing partition"
msgstr ""
#: swift/obj/replicator.py:399
#: swift/obj/replicator.py:403
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
#: swift/obj/replicator.py:410
#: swift/obj/replicator.py:414
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr ""
#: swift/obj/replicator.py:417
#: swift/obj/replicator.py:421
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
#: swift/obj/replicator.py:425
#: swift/obj/replicator.py:429
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr ""
#: swift/obj/replicator.py:454
#: swift/obj/replicator.py:458
msgid "Lockup detected.. killing live coros."
msgstr ""
#: swift/obj/replicator.py:568
#: swift/obj/replicator.py:572
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
#: swift/obj/replicator.py:589
#: swift/obj/replicator.py:593
msgid "Exception in top-level replication loop"
msgstr ""
#: swift/obj/replicator.py:598
#: swift/obj/replicator.py:602
msgid "Running object replicator in script mode."
msgstr ""
#: swift/obj/replicator.py:616
#: swift/obj/replicator.py:620
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr ""
#: swift/obj/replicator.py:623
#: swift/obj/replicator.py:627
msgid "Starting object replicator in daemon mode."
msgstr ""
#: swift/obj/replicator.py:627
#: swift/obj/replicator.py:631
msgid "Starting object replication pass."
msgstr ""
#: swift/obj/replicator.py:632
#: swift/obj/replicator.py:636
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr ""
@ -1137,7 +1137,7 @@ msgstr ""
msgid "%(type)s returning 503 for %(statuses)s"
msgstr ""
#: swift/proxy/controllers/container.py:95 swift/proxy/controllers/obj.py:117
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
msgid "Container"
msgstr ""

View File

@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-02-16 06:30+0000\n"
"PO-Revision-Date: 2015-02-13 19:15+0000\n"
"POT-Creation-Date: 2015-02-27 06:14+0000\n"
"PO-Revision-Date: 2015-02-25 18:23+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Chinese (China) "
"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n"
@ -65,7 +65,7 @@ msgstr "审计失败%s: %s"
msgid "ERROR Could not get account info %s"
msgstr "错误:无法获取账号信息%s"
#: swift/account/reaper.py:133 swift/common/utils.py:1992
#: swift/account/reaper.py:133 swift/common/utils.py:2058
#: swift/obj/diskfile.py:468 swift/obj/updater.py:87 swift/obj/updater.py:130
#, python-format
msgid "Skipping %s as it is not mounted"
@ -144,19 +144,19 @@ msgstr ",耗时:%.02fs"
msgid "Account %s has not been reaped since %s"
msgstr "账号%s自%s起未被reaped"
#: swift/account/reaper.py:348 swift/account/reaper.py:392
#: swift/account/reaper.py:454 swift/container/updater.py:306
#: swift/account/reaper.py:348 swift/account/reaper.py:396
#: swift/account/reaper.py:463 swift/container/updater.py:306
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s出现异常"
#: swift/account/reaper.py:364
#: swift/account/reaper.py:368
#, python-format
msgid "Exception with objects for container %(container)s for account %(account)s"
msgstr "账号%(account)s容器%(container)s的对象出现异常"
#: swift/account/server.py:275 swift/container/server.py:582
#: swift/obj/server.py:723
#: swift/obj/server.py:730
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr "%(method)s %(path)s出现错误__call__ error"
@ -381,95 +381,95 @@ msgstr "服务器出现错误%s "
msgid "ERROR: An error occurred while retrieving segments"
msgstr ""
#: swift/common/utils.py:322
#: swift/common/utils.py:388
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "无法查询到%s 保留为no-op"
#: swift/common/utils.py:512
#: swift/common/utils.py:578
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr "无法查询到fallocate posix_fallocate。保存为no-op"
#: swift/common/utils.py:939
#: swift/common/utils.py:1005
msgid "STDOUT: Connection reset by peer"
msgstr "STDOUT连接被peer重新设置"
#: swift/common/utils.py:941 swift/common/utils.py:944
#: swift/common/utils.py:1007 swift/common/utils.py:1010
#, python-format
msgid "STDOUT: %s"
msgstr "STDOUT: %s"
#: swift/common/utils.py:1179
#: swift/common/utils.py:1245
msgid "Connection refused"
msgstr "连接被拒绝"
#: swift/common/utils.py:1181
#: swift/common/utils.py:1247
msgid "Host unreachable"
msgstr "无法连接到主机"
#: swift/common/utils.py:1183
#: swift/common/utils.py:1249
msgid "Connection timeout"
msgstr "连接超时"
#: swift/common/utils.py:1485
#: swift/common/utils.py:1551
msgid "UNCAUGHT EXCEPTION"
msgstr "未捕获的异常"
#: swift/common/utils.py:1540
#: swift/common/utils.py:1606
msgid "Error: missing config path argument"
msgstr "错误:设置路径信息丢失"
#: swift/common/utils.py:1545
#: swift/common/utils.py:1611
#, python-format
msgid "Error: unable to locate %s"
msgstr "错误:无法查询到 %s"
#: swift/common/utils.py:1853
#: swift/common/utils.py:1919
#, python-format
msgid "Unable to read config from %s"
msgstr "无法从%s读取设置"
#: swift/common/utils.py:1859
#: swift/common/utils.py:1925
#, python-format
msgid "Unable to find %s config section in %s"
msgstr "无法在%s中查找到%s设置部分"
#: swift/common/utils.py:2213
#: swift/common/utils.py:2279
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "无效的X-Container-Sync-To格式%r"
#: swift/common/utils.py:2218
#: swift/common/utils.py:2284
#, python-format
msgid "No realm key for %r"
msgstr "%r权限key不存在"
#: swift/common/utils.py:2222
#: swift/common/utils.py:2288
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr "%r %r的集群节点不存在"
#: swift/common/utils.py:2231
#: swift/common/utils.py:2297
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr "在X-Container-Sync-To中%r是无效的方案须为\"//\", \"http\", or \"https\"。"
#: swift/common/utils.py:2235
#: swift/common/utils.py:2301
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
#: swift/common/utils.py:2238
#: swift/common/utils.py:2304
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中变量查询和碎片不被允许"
#: swift/common/utils.py:2243
#: swift/common/utils.py:2309
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr "X-Container-Sync-To中无效主机%r"
#: swift/common/utils.py:2435
#: swift/common/utils.py:2501
msgid "Exception dumping recon cache"
msgstr "执行dump recon的时候出现异常"
@ -705,8 +705,8 @@ msgstr "同步错误 %(db_file)s %(row)s"
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr "%s未挂载"
#: swift/container/updater.py:91 swift/obj/replicator.py:479
#: swift/obj/replicator.py:565
#: swift/container/updater.py:91 swift/obj/replicator.py:483
#: swift/obj/replicator.py:569
#, python-format
msgid "%s is not mounted"
msgstr "%s未挂载"
@ -925,35 +925,35 @@ msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
msgid "Removing %s objects"
msgstr ""
#: swift/obj/replicator.py:281
#, python-format
msgid "Removing partition: %s"
msgstr "移除分区:%s"
#: swift/obj/replicator.py:285
msgid "Error syncing handoff partition"
msgstr "执行同步切换分区时发生错误"
#: swift/obj/replicator.py:342
#: swift/obj/replicator.py:291
#, python-format
msgid "Removing partition: %s"
msgstr "移除分区:%s"
#: swift/obj/replicator.py:346
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr "%(ip)s/%(device)s的回应为未挂载"
#: swift/obj/replicator.py:347
#: swift/obj/replicator.py:351
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "无效的回应%(resp)s来自%(ip)s"
#: swift/obj/replicator.py:382
#: swift/obj/replicator.py:386
#, python-format
msgid "Error syncing with node: %s"
msgstr "执行同步时节点%s发生错误"
#: swift/obj/replicator.py:386
#: swift/obj/replicator.py:390
msgid "Error syncing partition"
msgstr "执行同步分区时发生错误"
#: swift/obj/replicator.py:399
#: swift/obj/replicator.py:403
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -962,53 +962,53 @@ msgstr ""
"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
#: swift/obj/replicator.py:410
#: swift/obj/replicator.py:414
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
#: swift/obj/replicator.py:417
#: swift/obj/replicator.py:421
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
#: swift/obj/replicator.py:425
#: swift/obj/replicator.py:429
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s秒无复制"
#: swift/obj/replicator.py:454
#: swift/obj/replicator.py:458
msgid "Lockup detected.. killing live coros."
msgstr "检测到lockup。终止正在执行的coros"
#: swift/obj/replicator.py:568
#: swift/obj/replicator.py:572
msgid "Ring change detected. Aborting current replication pass."
msgstr "Ring改变被检测到。退出现有的复制通过"
#: swift/obj/replicator.py:589
#: swift/obj/replicator.py:593
msgid "Exception in top-level replication loop"
msgstr "top-level复制圈出现异常"
#: swift/obj/replicator.py:598
#: swift/obj/replicator.py:602
msgid "Running object replicator in script mode."
msgstr "在加密模式下执行对象复制"
#: swift/obj/replicator.py:616
#: swift/obj/replicator.py:620
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
#: swift/obj/replicator.py:623
#: swift/obj/replicator.py:627
msgid "Starting object replicator in daemon mode."
msgstr "在守护模式下开始对象复制"
#: swift/obj/replicator.py:627
#: swift/obj/replicator.py:631
msgid "Starting object replication pass."
msgstr "开始通过对象复制"
#: swift/obj/replicator.py:632
#: swift/obj/replicator.py:636
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr "对象复制完成。(%.02f minutes)"
@ -1157,7 +1157,7 @@ msgstr ""
msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s 返回 503 在 %(statuses)s"
#: swift/proxy/controllers/container.py:95 swift/proxy/controllers/obj.py:117
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
msgid "Container"
msgstr "容器"

View File

@ -203,12 +203,12 @@ def quarantine_renamer(device_path, corrupted_file_path):
basename(from_dir))
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir)
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir)
renamer(from_dir, to_dir, fsync=False)
return to_dir
@ -345,6 +345,8 @@ def invalidate_hash(suffix_dir):
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
hashes_file = join(partition_dir, HASH_FILE)
if not os.path.exists(hashes_file):
return
with lock_path(partition_dir):
try:
with open(hashes_file, 'rb') as fp:

View File

@ -242,7 +242,7 @@ class ObjectReplicator(Daemon):
for node in job['nodes']:
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method') == 'ssync':
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# cand_objs is a list of objects for deletion
@ -273,11 +273,12 @@ class ObjectReplicator(Daemon):
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
if delete_objs:
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
self.delete_handoff_objs(job, delete_objs)
elif self.conf.get('sync_method') == 'rsync':
else:
self.delete_partition(job['path'])
elif not suffixes:
self.delete_partition(job['path'])

View File

@ -216,9 +216,9 @@ class ObjectUpdater(Daemon):
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.logger.increment('quarantines')
renamer(update_path, os.path.join(
device, 'quarantined', 'objects',
os.path.basename(update_path)))
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(

View File

@ -1009,8 +1009,7 @@ class Controller(object):
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param headers: dictionary of headers
:param query: query string to send to the backend.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction

View File

@ -625,6 +625,7 @@ def retry(func, *args, **kwargs):
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
auth_failure = False
attempts += 1
try:
if not url[use_account] or not token[use_account]:
@ -654,13 +655,15 @@ def retry(func, *args, **kwargs):
if service_user:
service_token[service_user] = None
except AuthError:
auth_failure = True
url[use_account] = token[use_account] = None
if service_user:
service_token[service_user] = None
except InternalServerError:
pass
if attempts <= retries:
sleep(backoff)
if not auth_failure:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries)

View File

@ -107,13 +107,13 @@ class TestContainerFailures(ReplProbeTest):
# because the one node that knew about the delete replicated to the
# others.)
for cnode in cnodes:
exc = None
try:
direct_client.direct_get_container(cnode, cpart, self.account,
container1)
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Assert account level also indicates container1 is gone
headers, containers = client.get_account(self.url, self.token)
@ -150,12 +150,12 @@ class TestContainerFailures(ReplProbeTest):
db_conn.execute('begin exclusive transaction')
db_conns.append(db_conn)
if catch_503:
exc = None
try:
client.delete_container(self.url, self.token, container)
except client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 503)
self.assertEquals(err.http_status, 503)
else:
self.fail("Expected ClientException but didn't get it")
else:
client.delete_container(self.url, self.token, container)

View File

@ -124,15 +124,15 @@ class TestEmptyDevice(ReplProbeTest):
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
exc = None
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
self.assertEquals(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
else:
self.fail("Expected ClientException but didn't get it")
try:
port_num = onode['replication_port']
@ -160,14 +160,14 @@ class TestEmptyDevice(ReplProbeTest):
'it returned: %s' % repr(odata))
# Assert the handoff server no longer has container/obj
exc = None
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()

View File

@ -91,14 +91,14 @@ class TestObjectHandoff(ReplProbeTest):
start_server(onode['port'], self.port2server, self.pids)
# Assert that it doesn't have container/obj yet
exc = None
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication, ensuring we run the handoff node last so it
# will remove its extra handoff partition
@ -125,14 +125,14 @@ class TestObjectHandoff(ReplProbeTest):
'it returned: %s' % repr(odata))
# Assert the handoff server no longer has container/obj
exc = None
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Kill the first container/obj primary server again (we have two
# primaries and the handoff up now)
@ -150,12 +150,12 @@ class TestObjectHandoff(ReplProbeTest):
self.assertEqual(503, err.http_status)
# Assert we can't head container/obj
exc = None
try:
client.head_object(self.url, self.token, container, obj)
except client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Assert container/obj is not in the container listing, both indirectly
# and directly
@ -193,14 +193,14 @@ class TestObjectHandoff(ReplProbeTest):
Manager(['object-replicator']).once(number=another_node_id)
# Assert primary node no longer has container/obj
exc = None
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':

View File

@ -39,7 +39,7 @@ password3 = testing3
# the primary test account. The user must have a group/role that is unique
# and not given to the primary tester and is specified in the options
# <prefix>_require_group (tempauth) or <prefix>_service_roles (keystoneauth).
#account5 = service
#account5 = test5
#username5 = tester5
#password5 = testing5

View File

@ -56,6 +56,7 @@ class TestScout(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.scout_instance = recon.Scout("type", suppress_errors=True)
self.url = 'http://127.0.0.1:8080/recon/type'
self.server_type_url = 'http://127.0.0.1:8080/'
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_ok(self, mock_urlopen):
@ -85,6 +86,37 @@ class TestScout(unittest.TestCase):
self.assertTrue(isinstance(content, urllib2.HTTPError))
self.assertEqual(status, 404)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_ok(self, mock_urlopen):
def getheader(name):
d = {'Server': 'server-type'}
return d.get(name)
mock_urlopen.return_value.info.return_value.getheader = getheader
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertEqual(content, 'server-type')
self.assertEqual(status, 200)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("")
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertTrue(isinstance(content, urllib2.URLError))
self.assertEqual(url, self.server_type_url)
self.assertEqual(status, -1)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError(
self.server_type_url, 404, "Internal error", None, None)
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertTrue(isinstance(content, urllib2.HTTPError))
self.assertEqual(status, 404)
class TestRecon(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
@ -289,6 +321,86 @@ class TestReconCommands(unittest.TestCase):
return mock.patch('eventlet.green.urllib2.urlopen', fake_urlopen)
def test_server_type_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6011),
('127.0.0.1', 6012)]
# sample json response from http://<host>:<port>/
responses = {6010: 'object-server', 6011: 'container-server',
6012: 'account-server'}
def mock_scout_server_type(app, host):
url = 'http://%s:%s/' % (host[0], host[1])
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
patches = [
mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type),
mock.patch('sys.stdout', new=stdout),
]
res_object = 'Invalid: http://127.0.0.1:6010/ is object-server'
res_container = 'Invalid: http://127.0.0.1:6011/ is container-server'
res_account = 'Invalid: http://127.0.0.1:6012/ is account-server'
valid = "1/1 hosts ok, 0 error[s] while checking hosts."
#Test for object server type - default
with nested(*patches):
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_container in output.splitlines())
self.assertTrue(res_account in output.splitlines())
stdout.truncate(0)
#Test ok for object server type - default
with nested(*patches):
self.recon.server_type_check([hosts[0]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
stdout.truncate(0)
#Test for account server type
with nested(*patches):
self.recon.server_type = 'account'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_container in output.splitlines())
self.assertTrue(res_object in output.splitlines())
stdout.truncate(0)
#Test ok for account server type
with nested(*patches):
self.recon.server_type = 'account'
self.recon.server_type_check([hosts[2]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
stdout.truncate(0)
#Test for container server type
with nested(*patches):
self.recon.server_type = 'container'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_account in output.splitlines())
self.assertTrue(res_object in output.splitlines())
stdout.truncate(0)
#Test ok for container server type
with nested(*patches):
self.recon.server_type = 'container'
self.recon.server_type_check([hosts[1]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
def test_get_swiftconfmd5(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])

View File

@ -21,7 +21,7 @@ from time import time
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
from swift.common.swob import Request, Response
from swift.common.utils import split_path
from swift.common.utils import split_path, get_swift_info
NO_CONTENT_RESP = (('204 No Content', {}, ''),) # mock server response
@ -110,6 +110,10 @@ class TestAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory({})(FakeApp())
def test_swift_info(self):
info = get_swift_info()
self.assertTrue(info['tempauth']['account_acls'])
def _make_request(self, path, **kwargs):
req = Request.blank(path, **kwargs)
req.environ['swift.cache'] = FakeMemcache()

View File

@ -738,7 +738,8 @@ class TestDatabaseBroker(unittest.TestCase):
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
with patch('swift.common.db.renamer', lambda a, b: b):
with patch('swift.common.db.renamer', lambda a, b,
fsync: b):
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),

View File

@ -563,7 +563,7 @@ class TestDBReplicator(unittest.TestCase):
self._patch(patch.object, replicator.brokerclass,
'get_repl_missing_table', True)
def mock_renamer(was, new, cause_colision=False):
def mock_renamer(was, new, fsync=False, cause_colision=False):
if cause_colision and '-' not in new:
raise OSError(errno.EEXIST, "File already exists")
self.assertEquals('/a/b/c/d/e', was)
@ -573,8 +573,8 @@ class TestDBReplicator(unittest.TestCase):
else:
self.assertEquals('/a/quarantined/containers/e', new)
def mock_renamer_error(was, new):
return mock_renamer(was, new, cause_colision=True)
def mock_renamer_error(was, new, fsync):
return mock_renamer(was, new, fsync, cause_colision=True)
with patch.object(db_replicator, 'renamer', mock_renamer):
replicator._replicate_object('0', 'file', 'node_id')
# try the double quarantine

View File

@ -2805,6 +2805,113 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertEqual(None, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
class ResellerConfReader(unittest.TestCase):

View File

@ -117,14 +117,14 @@ def _mock_process(ret):
object_replicator.subprocess.Popen = orig_process
def _create_test_rings(path):
def _create_test_rings(path, devs=None):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5],
]
intended_devs = [
intended_devs = devs or [
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '127.0.0.0', 'port': 6000},
{'id': 1, 'device': 'sda', 'zone': 1,
@ -153,6 +153,8 @@ def _create_test_rings(path):
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift),
f)
for policy in POLICIES:
policy.object_ring = None # force reload
return
@ -418,6 +420,81 @@ class TestObjectReplicator(unittest.TestCase):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_default_sync_method(self):
self.replicator.conf.pop('sync_method')
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.get_object_ring(0)
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_ssync_single_region(self):
devs = [
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '127.0.0.0', 'port': 6000},
{'id': 1, 'device': 'sda', 'zone': 1,
'region': 1, 'ip': '127.0.0.1', 'port': 6000},
{'id': 2, 'device': 'sda', 'zone': 2,
'region': 1, 'ip': '127.0.0.2', 'port': 6000},
{'id': 3, 'device': 'sda', 'zone': 4,
'region': 1, 'ip': '127.0.0.3', 'port': 6000},
{'id': 4, 'device': 'sda', 'zone': 5,
'region': 1, 'ip': '127.0.0.4', 'port': 6000},
{'id': 5, 'device': 'sda', 'zone': 6,
'region': 1, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
{'id': 6, 'device': 'sda', 'zone': 7, 'region': 1,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000},
]
_create_test_rings(self.testdir, devs=devs)
self.conf['sync_method'] = 'ssync'
self.replicator = object_replicator.ObjectReplicator(self.conf)
self.replicator.logger = debug_logger()
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
def _fake_ssync(node, job, suffixes, **kwargs):
return True, set([ohash])
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_1(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):