Merge master to feature/ec

Conflicts:
	requirements.txt
	swift/common/storage_policy.py
	swift/proxy/controllers/obj.py
	test/unit/common/test_storage_policy.py
	test/unit/proxy/test_server.py

Change-Id: I280015c82af13f04dc0741540dbb7c1fc3f51761
This commit is contained in:
paul luse 2015-03-24 16:04:23 -07:00
commit 5b11780a10
26 changed files with 1014 additions and 508 deletions

View File

@ -176,6 +176,7 @@ if __name__ == '__main__':
if not devices:
logger.error("Error: No devices found!")
recon_errors = {}
total_errors = 0
for device in devices:
recon_errors[device['mount_point']] = 0
errors = get_errors(error_re, log_file_pattern, minutes, logger)
@ -198,8 +199,10 @@ if __name__ == '__main__':
comment_fstab(mount_point)
unmounts += 1
recon_errors[mount_point] = count
total_errors += count
recon_file = recon_cache_path + "/drive.recon"
dump_recon_cache(recon_errors, recon_file, logger)
dump_recon_cache({'drive_audit_errors': total_errors}, recon_file, logger)
if unmounts == 0:
logger.info("No drives were unmounted")

View File

@ -70,6 +70,35 @@ When using the 'in-process test' mode, the optional in-memory
object server may be selected by setting the environment variable
``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value.
The 'in-process test' mode searches for ``proxy-server.conf`` and
``swift.conf`` config files from which it copies config options and overrides
some options to suit in process testing. The search will first look for config
files in a ``<custom_conf_source_dir>`` that may optionally be specified using
the environment variable::
SWIFT_TEST_IN_PROCESS_CONF_DIR=<custom_conf_source_dir>
If ``SWIFT_TEST_IN_PROCESS_CONF_DIR`` is not set, or if a config file is not
found in ``<custom_conf_source_dir>``, the search will then look in the
``etc/`` directory in the source tree. If the config file is still not found,
the corresponding sample config file from ``etc/`` is used (e.g.
``proxy-server.conf-sample`` or ``swift.conf-sample``).
The environment variable ``SWIFT_TEST_POLICY`` may be set to specify
a particular storage policy *name* that will be used for testing. When set,
this policy must exist in the ``swift.conf`` file and its corresponding ring
file must exist in ``<custom_conf_source_dir>`` (if specified) or ``etc/``. The
test setup will set the specified policy to be the default and use its ring
file properties for constructing the test object ring. This allows in-process
testing to be run against various policy types and ring files.
For example, this command would run the in-process mode functional tests
using config files found in ``$HOME/my_tests`` and policy 'silver'::
SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_CONF_DIR=$HOME/my_tests \
SWIFT_TEST_POLICY=silver tox -e func
------------
Coding Style
------------

View File

@ -9,4 +9,4 @@ netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3
simplejson>=2.0.9
xattr>=0.4
pyeclib>=0.9.2
PyECLib>=1.0.3

View File

@ -330,7 +330,7 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
:param swift_dir: the path on disk to rings
:param policy_name: optionally the name to use when finding the ring
"""
if not os.path.exists(datafile) or not datafile.endswith('.data'):
if not os.path.exists(datafile):
print "Data file doesn't exist"
raise InfoSystemExit()
if not datafile.startswith(('/', './')):

View File

@ -330,6 +330,27 @@ class SwiftRecon(object):
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[drive_audit_errors] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
@ -930,6 +951,8 @@ class SwiftRecon(object):
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--all', action="store_true",
@ -992,6 +1015,7 @@ class SwiftRecon(object):
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
self.driveaudit_check(hosts)
else:
if options.async:
if self.server_type == 'object':
@ -1033,6 +1057,8 @@ class SwiftRecon(object):
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
if options.driveaudit:
self.driveaudit_check(hosts)
def main():

View File

@ -53,6 +53,8 @@ class ReconMiddleware(object):
'container.recon')
self.account_recon_cache = os.path.join(self.recon_cache_path,
'account.recon')
self.drive_recon_cache = os.path.join(self.recon_cache_path,
'drive.recon')
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path]
@ -124,6 +126,11 @@ class ReconMiddleware(object):
return self._from_recon_cache(['async_pending'],
self.object_recon_cache)
def get_driveaudit_error(self):
"""get # of drive audit errors"""
return self._from_recon_cache(['drive_audit_errors'],
self.drive_recon_cache)
def get_replication_info(self, recon_type):
"""get replication info"""
if recon_type == 'account':
@ -359,6 +366,8 @@ class ReconMiddleware(object):
content = self.get_socket_info()
elif rcheck == "version":
content = self.get_version()
elif rcheck == "driveaudit":
content = self.get_driveaudit_error()
else:
content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found",

View File

@ -399,7 +399,7 @@ class TempAuth(object):
s = base64.encodestring(hmac.new(key, msg, sha1).digest()).strip()
if s != sign:
return None
groups = self._get_user_groups(account, account_user)
groups = self._get_user_groups(account, account_user, account_id)
return groups

View File

@ -185,6 +185,12 @@ class StoragePolicy(object):
raise NotImplementedError("quorum_size is undefined for base "
"StoragePolicy class ")
def get_options(self):
"""Return the valid conf file options for this policy."""
return {'name': self.name,
'default': self.is_default,
'deprecated': self.is_deprecated}
@property
def stores_objects_verbatim(self):
raise NotImplementedError

View File

@ -936,6 +936,10 @@ class Request(object):
if entity_path is not None:
return '/' + entity_path
@property
def is_chunked(self):
return 'chunked' in self.headers.get('transfer-encoding', '')
@property
def url(self):
"Provides the full url of the request"

View File

@ -1062,19 +1062,21 @@ class NullLogger(object):
class LoggerFileObject(object):
def __init__(self, logger):
def __init__(self, logger, log_type='STDOUT'):
self.logger = logger
self.log_type = log_type
def write(self, value):
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error(_('STDOUT: Connection reset by peer'))
self.logger.error(
_('%s: Connection reset by peer'), self.log_type)
else:
self.logger.error(_('STDOUT: %s'), value)
self.logger.error(_('%s: %s'), self.log_type, value)
def writelines(self, values):
self.logger.error(_('STDOUT: %s'), '#012'.join(values))
self.logger.error(_('%s: %s'), self.log_type, '#012'.join(values))
def close(self):
pass
@ -1527,11 +1529,11 @@ def get_logger(conf, name=None, log_to_console=False, log_route=None,
logger_hook(conf, name, log_to_console, log_route, fmt,
logger, adapted_logger)
except (AttributeError, ImportError):
print(
'Error calling custom handler [%s]' % hook,
file=sys.stderr)
print('Error calling custom handler [%s]' % hook,
file=sys.stderr)
except ValueError:
print('Invalid custom handler format [%s]' % hook, sys.stderr)
print('Invalid custom handler format [%s]' % hook,
file=sys.stderr)
# Python 2.6 has the undesirable property of keeping references to all log
# handlers around forever in logging._handlers and logging._handlerList.
@ -1641,7 +1643,7 @@ def capture_stdio(logger, **kwargs):
if kwargs.pop('capture_stdout', True):
sys.stdout = LoggerFileObject(logger)
if kwargs.pop('capture_stderr', True):
sys.stderr = LoggerFileObject(logger)
sys.stderr = LoggerFileObject(logger, 'STDERR')
def parse_options(parser=None, once=False, test_args=None):

View File

@ -158,6 +158,8 @@ class ContainerBroker(DatabaseBroker):
if not self.container:
raise ValueError(
'Attempting to create a new database with no container set')
if storage_policy_index is None:
storage_policy_index = 0
self.create_object_table(conn)
self.create_policy_stat_table(conn, storage_policy_index)
self.create_container_info_table(conn, put_timestamp,

View File

@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: swift 2.2.2.post96\n"
"Project-Id-Version: swift 2.2.2.post136\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-02-27 06:14+0000\n"
"POT-Creation-Date: 2015-03-24 06:06+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@ -63,8 +63,8 @@ msgstr ""
msgid "ERROR Could not get account info %s"
msgstr ""
#: swift/account/reaper.py:133 swift/common/utils.py:2058
#: swift/obj/diskfile.py:468 swift/obj/updater.py:87 swift/obj/updater.py:130
#: swift/account/reaper.py:133 swift/common/utils.py:2127
#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr ""
@ -386,86 +386,92 @@ msgstr ""
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
#: swift/common/utils.py:1005
msgid "STDOUT: Connection reset by peer"
msgstr ""
#: swift/common/utils.py:1007 swift/common/utils.py:1010
#: swift/common/utils.py:662
#, python-format
msgid "STDOUT: %s"
msgid "Unable to perform fsync() on directory %s: %s"
msgstr ""
#: swift/common/utils.py:1245
#: swift/common/utils.py:1074
#, python-format
msgid "%s: Connection reset by peer"
msgstr ""
#: swift/common/utils.py:1076 swift/common/utils.py:1079
#, python-format
msgid "%s: %s"
msgstr ""
#: swift/common/utils.py:1314
msgid "Connection refused"
msgstr ""
#: swift/common/utils.py:1247
#: swift/common/utils.py:1316
msgid "Host unreachable"
msgstr ""
#: swift/common/utils.py:1249
#: swift/common/utils.py:1318
msgid "Connection timeout"
msgstr ""
#: swift/common/utils.py:1551
#: swift/common/utils.py:1620
msgid "UNCAUGHT EXCEPTION"
msgstr ""
#: swift/common/utils.py:1606
#: swift/common/utils.py:1675
msgid "Error: missing config path argument"
msgstr ""
#: swift/common/utils.py:1611
#: swift/common/utils.py:1680
#, python-format
msgid "Error: unable to locate %s"
msgstr ""
#: swift/common/utils.py:1919
#: swift/common/utils.py:1988
#, python-format
msgid "Unable to read config from %s"
msgstr ""
#: swift/common/utils.py:1925
#: swift/common/utils.py:1994
#, python-format
msgid "Unable to find %s config section in %s"
msgstr ""
#: swift/common/utils.py:2279
#: swift/common/utils.py:2348
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr ""
#: swift/common/utils.py:2284
#: swift/common/utils.py:2353
#, python-format
msgid "No realm key for %r"
msgstr ""
#: swift/common/utils.py:2288
#: swift/common/utils.py:2357
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr ""
#: swift/common/utils.py:2297
#: swift/common/utils.py:2366
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr ""
#: swift/common/utils.py:2301
#: swift/common/utils.py:2370
msgid "Path required in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2304
#: swift/common/utils.py:2373
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2309
#: swift/common/utils.py:2378
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr ""
#: swift/common/utils.py:2501
#: swift/common/utils.py:2570
msgid "Exception dumping recon cache"
msgstr ""
@ -697,8 +703,8 @@ msgstr ""
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr ""
#: swift/container/updater.py:91 swift/obj/replicator.py:483
#: swift/obj/replicator.py:569
#: swift/container/updater.py:91 swift/obj/replicator.py:484
#: swift/obj/replicator.py:570
#, python-format
msgid "%s is not mounted"
msgstr ""
@ -815,31 +821,31 @@ msgstr ""
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:407
#: swift/obj/diskfile.py:409
msgid "Error hashing suffix"
msgstr ""
#: swift/obj/diskfile.py:482 swift/obj/updater.py:169
#: swift/obj/diskfile.py:484 swift/obj/updater.py:169
#, python-format
msgid "Directory %s does not map to a valid policy"
msgstr ""
#: swift/obj/diskfile.py:676
#: swift/obj/diskfile.py:678
#, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:867
#: swift/obj/diskfile.py:869
#, python-format
msgid "Problem cleaning up %s"
msgstr ""
#: swift/obj/diskfile.py:1166
#: swift/obj/diskfile.py:1168
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr ""
#: swift/obj/diskfile.py:1447
#: swift/obj/diskfile.py:1449
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata "
@ -902,93 +908,93 @@ msgstr ""
msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr ""
#: swift/obj/replicator.py:277
#: swift/obj/replicator.py:278
#, python-format
msgid "Removing %s objects"
msgstr ""
#: swift/obj/replicator.py:285
#: swift/obj/replicator.py:286
msgid "Error syncing handoff partition"
msgstr ""
#: swift/obj/replicator.py:291
#: swift/obj/replicator.py:292
#, python-format
msgid "Removing partition: %s"
msgstr ""
#: swift/obj/replicator.py:346
#: swift/obj/replicator.py:347
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr ""
#: swift/obj/replicator.py:351
#: swift/obj/replicator.py:352
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr ""
#: swift/obj/replicator.py:386
#: swift/obj/replicator.py:387
#, python-format
msgid "Error syncing with node: %s"
msgstr ""
#: swift/obj/replicator.py:390
#: swift/obj/replicator.py:391
msgid "Error syncing partition"
msgstr ""
#: swift/obj/replicator.py:403
#: swift/obj/replicator.py:404
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
#: swift/obj/replicator.py:414
#: swift/obj/replicator.py:415
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr ""
#: swift/obj/replicator.py:421
#: swift/obj/replicator.py:422
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
#: swift/obj/replicator.py:429
#: swift/obj/replicator.py:430
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr ""
#: swift/obj/replicator.py:458
#: swift/obj/replicator.py:459
msgid "Lockup detected.. killing live coros."
msgstr ""
#: swift/obj/replicator.py:572
#: swift/obj/replicator.py:573
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
#: swift/obj/replicator.py:593
#: swift/obj/replicator.py:594
msgid "Exception in top-level replication loop"
msgstr ""
#: swift/obj/replicator.py:602
#: swift/obj/replicator.py:603
msgid "Running object replicator in script mode."
msgstr ""
#: swift/obj/replicator.py:620
#: swift/obj/replicator.py:621
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr ""
#: swift/obj/replicator.py:627
#: swift/obj/replicator.py:628
msgid "Starting object replicator in daemon mode."
msgstr ""
#: swift/obj/replicator.py:631
#: swift/obj/replicator.py:632
msgid "Starting object replication pass."
msgstr ""
#: swift/obj/replicator.py:636
#: swift/obj/replicator.py:637
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr ""
@ -1085,9 +1091,9 @@ msgid "Account"
msgstr ""
#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
#: swift/proxy/controllers/obj.py:191 swift/proxy/controllers/obj.py:318
#: swift/proxy/controllers/obj.py:358 swift/proxy/controllers/obj.py:376
#: swift/proxy/controllers/obj.py:502
#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
#: swift/proxy/controllers/obj.py:593
msgid "Object"
msgstr ""
@ -1112,13 +1118,13 @@ msgstr ""
msgid "Trying to send to client"
msgstr ""
#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1050
#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr ""
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1038
#: swift/proxy/controllers/obj.py:350 swift/proxy/controllers/obj.py:390
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
msgid "ERROR Insufficient Storage"
msgstr ""
@ -1127,86 +1133,74 @@ msgstr ""
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr ""
#: swift/proxy/controllers/base.py:1041
#: swift/proxy/controllers/base.py:1040
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
msgstr ""
#: swift/proxy/controllers/base.py:1153
#: swift/proxy/controllers/base.py:1152
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr ""
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
msgid "Container"
msgstr ""
#: swift/proxy/controllers/obj.py:319
#: swift/proxy/controllers/obj.py:320
#, python-format
msgid "Trying to write to %s"
msgstr ""
#: swift/proxy/controllers/obj.py:353
#: swift/proxy/controllers/obj.py:361
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
#: swift/proxy/controllers/obj.py:359
#: swift/proxy/controllers/obj.py:367
#, python-format
msgid "Expect: 100-continue on %s"
msgstr ""
#: swift/proxy/controllers/obj.py:377
#: swift/proxy/controllers/obj.py:383
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr ""
#: swift/proxy/controllers/obj.py:394
#: swift/proxy/controllers/obj.py:406
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr ""
#: swift/proxy/controllers/obj.py:665
#: swift/proxy/controllers/obj.py:663
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr ""
#: swift/proxy/controllers/obj.py:674
#: swift/proxy/controllers/obj.py:672
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
#: swift/proxy/controllers/obj.py:682
#, python-format
msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
msgstr ""
#: swift/proxy/controllers/obj.py:713
#, python-format
msgid ""
"Object PUT exceptions during send, %(conns)s/%(nodes)s required "
"connections"
msgstr ""
#: swift/proxy/controllers/obj.py:724
#: swift/proxy/controllers/obj.py:755
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr ""
#: swift/proxy/controllers/obj.py:729
#: swift/proxy/controllers/obj.py:762
msgid "ERROR Exception causing client disconnect"
msgstr ""
#: swift/proxy/controllers/obj.py:734
#: swift/proxy/controllers/obj.py:767
msgid "Client disconnected without sending enough data"
msgstr ""
#: swift/proxy/controllers/obj.py:743
#: swift/proxy/controllers/obj.py:813
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr ""
#: swift/proxy/controllers/obj.py:747
#: swift/proxy/controllers/obj.py:817
msgid "Object PUT"
msgstr ""

View File

@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-02-27 06:14+0000\n"
"PO-Revision-Date: 2015-02-25 18:23+0000\n"
"POT-Creation-Date: 2015-03-24 06:06+0000\n"
"PO-Revision-Date: 2015-03-24 04:20+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Chinese (China) "
"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n"
@ -65,8 +65,8 @@ msgstr "审计失败%s: %s"
msgid "ERROR Could not get account info %s"
msgstr "错误:无法获取账号信息%s"
#: swift/account/reaper.py:133 swift/common/utils.py:2058
#: swift/obj/diskfile.py:468 swift/obj/updater.py:87 swift/obj/updater.py:130
#: swift/account/reaper.py:133 swift/common/utils.py:2127
#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "挂载失败 跳过%s"
@ -390,86 +390,92 @@ msgstr "无法查询到%s 保留为no-op"
msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr "无法查询到fallocate posix_fallocate。保存为no-op"
#: swift/common/utils.py:1005
msgid "STDOUT: Connection reset by peer"
msgstr "STDOUT连接被peer重新设置"
#: swift/common/utils.py:1007 swift/common/utils.py:1010
#: swift/common/utils.py:662
#, python-format
msgid "STDOUT: %s"
msgstr "STDOUT: %s"
msgid "Unable to perform fsync() on directory %s: %s"
msgstr ""
#: swift/common/utils.py:1245
#: swift/common/utils.py:1074
#, python-format
msgid "%s: Connection reset by peer"
msgstr ""
#: swift/common/utils.py:1076 swift/common/utils.py:1079
#, python-format
msgid "%s: %s"
msgstr ""
#: swift/common/utils.py:1314
msgid "Connection refused"
msgstr "连接被拒绝"
#: swift/common/utils.py:1247
#: swift/common/utils.py:1316
msgid "Host unreachable"
msgstr "无法连接到主机"
#: swift/common/utils.py:1249
#: swift/common/utils.py:1318
msgid "Connection timeout"
msgstr "连接超时"
#: swift/common/utils.py:1551
#: swift/common/utils.py:1620
msgid "UNCAUGHT EXCEPTION"
msgstr "未捕获的异常"
#: swift/common/utils.py:1606
#: swift/common/utils.py:1675
msgid "Error: missing config path argument"
msgstr "错误:设置路径信息丢失"
#: swift/common/utils.py:1611
#: swift/common/utils.py:1680
#, python-format
msgid "Error: unable to locate %s"
msgstr "错误:无法查询到 %s"
#: swift/common/utils.py:1919
#: swift/common/utils.py:1988
#, python-format
msgid "Unable to read config from %s"
msgstr "无法从%s读取设置"
#: swift/common/utils.py:1925
#: swift/common/utils.py:1994
#, python-format
msgid "Unable to find %s config section in %s"
msgstr "无法在%s中查找到%s设置部分"
#: swift/common/utils.py:2279
#: swift/common/utils.py:2348
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "无效的X-Container-Sync-To格式%r"
#: swift/common/utils.py:2284
#: swift/common/utils.py:2353
#, python-format
msgid "No realm key for %r"
msgstr "%r权限key不存在"
#: swift/common/utils.py:2288
#: swift/common/utils.py:2357
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr "%r %r的集群节点不存在"
#: swift/common/utils.py:2297
#: swift/common/utils.py:2366
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr "在X-Container-Sync-To中%r是无效的方案须为\"//\", \"http\", or \"https\"。"
#: swift/common/utils.py:2301
#: swift/common/utils.py:2370
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
#: swift/common/utils.py:2304
#: swift/common/utils.py:2373
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中变量查询和碎片不被允许"
#: swift/common/utils.py:2309
#: swift/common/utils.py:2378
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr "X-Container-Sync-To中无效主机%r"
#: swift/common/utils.py:2501
#: swift/common/utils.py:2570
msgid "Exception dumping recon cache"
msgstr "执行dump recon的时候出现异常"
@ -705,8 +711,8 @@ msgstr "同步错误 %(db_file)s %(row)s"
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr "%s未挂载"
#: swift/container/updater.py:91 swift/obj/replicator.py:483
#: swift/obj/replicator.py:569
#: swift/container/updater.py:91 swift/obj/replicator.py:484
#: swift/obj/replicator.py:570
#, python-format
msgid "%s is not mounted"
msgstr "%s未挂载"
@ -833,31 +839,31 @@ msgstr "审计错误:%s"
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
#: swift/obj/diskfile.py:407
#: swift/obj/diskfile.py:409
msgid "Error hashing suffix"
msgstr "执行Hashing后缀时发生错误"
#: swift/obj/diskfile.py:482 swift/obj/updater.py:169
#: swift/obj/diskfile.py:484 swift/obj/updater.py:169
#, python-format
msgid "Directory %s does not map to a valid policy"
msgstr "目录%s无法映射到一个有效的policy"
#: swift/obj/diskfile.py:676
#: swift/obj/diskfile.py:678
#, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
#: swift/obj/diskfile.py:867
#: swift/obj/diskfile.py:869
#, python-format
msgid "Problem cleaning up %s"
msgstr "问题清除%s"
#: swift/obj/diskfile.py:1166
#: swift/obj/diskfile.py:1168
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
#: swift/obj/diskfile.py:1447
#: swift/obj/diskfile.py:1449
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata "
@ -920,40 +926,40 @@ msgstr "Bad rsync返还代码%(ret)d <- %(args)s"
msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
#: swift/obj/replicator.py:277
#: swift/obj/replicator.py:278
#, python-format
msgid "Removing %s objects"
msgstr ""
#: swift/obj/replicator.py:285
#: swift/obj/replicator.py:286
msgid "Error syncing handoff partition"
msgstr "执行同步切换分区时发生错误"
#: swift/obj/replicator.py:291
#: swift/obj/replicator.py:292
#, python-format
msgid "Removing partition: %s"
msgstr "移除分区:%s"
#: swift/obj/replicator.py:346
#: swift/obj/replicator.py:347
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr "%(ip)s/%(device)s的回应为未挂载"
#: swift/obj/replicator.py:351
#: swift/obj/replicator.py:352
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "无效的回应%(resp)s来自%(ip)s"
#: swift/obj/replicator.py:386
#: swift/obj/replicator.py:387
#, python-format
msgid "Error syncing with node: %s"
msgstr "执行同步时节点%s发生错误"
#: swift/obj/replicator.py:390
#: swift/obj/replicator.py:391
msgid "Error syncing partition"
msgstr "执行同步分区时发生错误"
#: swift/obj/replicator.py:403
#: swift/obj/replicator.py:404
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -962,53 +968,53 @@ msgstr ""
"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
#: swift/obj/replicator.py:414
#: swift/obj/replicator.py:415
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
#: swift/obj/replicator.py:421
#: swift/obj/replicator.py:422
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
#: swift/obj/replicator.py:429
#: swift/obj/replicator.py:430
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s秒无复制"
#: swift/obj/replicator.py:458
#: swift/obj/replicator.py:459
msgid "Lockup detected.. killing live coros."
msgstr "检测到lockup。终止正在执行的coros"
#: swift/obj/replicator.py:572
#: swift/obj/replicator.py:573
msgid "Ring change detected. Aborting current replication pass."
msgstr "Ring改变被检测到。退出现有的复制通过"
#: swift/obj/replicator.py:593
#: swift/obj/replicator.py:594
msgid "Exception in top-level replication loop"
msgstr "top-level复制圈出现异常"
#: swift/obj/replicator.py:602
#: swift/obj/replicator.py:603
msgid "Running object replicator in script mode."
msgstr "在加密模式下执行对象复制"
#: swift/obj/replicator.py:620
#: swift/obj/replicator.py:621
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
#: swift/obj/replicator.py:627
#: swift/obj/replicator.py:628
msgid "Starting object replicator in daemon mode."
msgstr "在守护模式下开始对象复制"
#: swift/obj/replicator.py:631
#: swift/obj/replicator.py:632
msgid "Starting object replication pass."
msgstr "开始通过对象复制"
#: swift/obj/replicator.py:636
#: swift/obj/replicator.py:637
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr "对象复制完成。(%.02f minutes)"
@ -1105,9 +1111,9 @@ msgid "Account"
msgstr "账号"
#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
#: swift/proxy/controllers/obj.py:191 swift/proxy/controllers/obj.py:318
#: swift/proxy/controllers/obj.py:358 swift/proxy/controllers/obj.py:376
#: swift/proxy/controllers/obj.py:502
#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
#: swift/proxy/controllers/obj.py:593
msgid "Object"
msgstr "对象"
@ -1132,13 +1138,13 @@ msgstr "客户读取时中断"
msgid "Trying to send to client"
msgstr "尝试发送到客户端"
#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1050
#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1038
#: swift/proxy/controllers/obj.py:350 swift/proxy/controllers/obj.py:390
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
msgid "ERROR Insufficient Storage"
msgstr "错误 存储空间不足"
@ -1147,86 +1153,74 @@ msgstr "错误 存储空间不足"
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
#: swift/proxy/controllers/base.py:1041
#: swift/proxy/controllers/base.py:1040
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
msgstr ""
#: swift/proxy/controllers/base.py:1153
#: swift/proxy/controllers/base.py:1152
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s 返回 503 在 %(statuses)s"
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
msgid "Container"
msgstr "容器"
#: swift/proxy/controllers/obj.py:319
#: swift/proxy/controllers/obj.py:320
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
#: swift/proxy/controllers/obj.py:353
#: swift/proxy/controllers/obj.py:361
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
#: swift/proxy/controllers/obj.py:359
#: swift/proxy/controllers/obj.py:367
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "已知100-continue on %s"
#: swift/proxy/controllers/obj.py:377
#: swift/proxy/controllers/obj.py:383
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "尝试执行获取最后的PUT状态%s"
#: swift/proxy/controllers/obj.py:394
#: swift/proxy/controllers/obj.py:406
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
#: swift/proxy/controllers/obj.py:665
#: swift/proxy/controllers/obj.py:663
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr "对象PUT返还 412%(statuses)r "
#: swift/proxy/controllers/obj.py:674
#: swift/proxy/controllers/obj.py:672
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
#: swift/proxy/controllers/obj.py:682
#, python-format
msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
msgstr "对象PUT返回503%(conns)s/%(nodes)s 请求连接"
#: swift/proxy/controllers/obj.py:713
#, python-format
msgid ""
"Object PUT exceptions during send, %(conns)s/%(nodes)s required "
"connections"
msgstr "对象PUT发送时出现异常%(conns)s/%(nodes)s请求连接"
#: swift/proxy/controllers/obj.py:724
#: swift/proxy/controllers/obj.py:755
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr "错误 客户读取超时(%ss)"
#: swift/proxy/controllers/obj.py:729
#: swift/proxy/controllers/obj.py:762
msgid "ERROR Exception causing client disconnect"
msgstr "错误 异常导致客户端中断连接"
#: swift/proxy/controllers/obj.py:734
#: swift/proxy/controllers/obj.py:767
msgid "Client disconnected without sending enough data"
msgstr "客户中断 尚未发送足够"
#: swift/proxy/controllers/obj.py:743
#: swift/proxy/controllers/obj.py:813
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr "对象服务器返还%s不匹配etags"
#: swift/proxy/controllers/obj.py:747
#: swift/proxy/controllers/obj.py:817
msgid "Object PUT"
msgstr "对象上传"

View File

@ -53,17 +53,17 @@ from swift.common.exceptions import ChunkReadTimeout, \
InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \
PutterConnectError
from swift.common.http import (
is_success, is_client_error, is_server_error, HTTP_CONTINUE,
HTTP_CREATED, HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND,
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE,
HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT)
is_success, is_client_error, is_server_error, HTTP_CONTINUE, HTTP_CREATED,
HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR,
HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
HTTP_PRECONDITION_FAILED, HTTP_CONFLICT)
from swift.common.storage_policy import POLICIES
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, HeaderKeyDict, \
HTTPClientDisconnect, HTTPUnprocessableEntity, Response
HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException
from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \
remove_items, copy_header_subset, close_if_possible
@ -796,57 +796,135 @@ class BaseObjectController(Controller):
chunk_index[p] = hole
return chunk_index
@public
@cors_validation
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
if req.if_none_match is not None and '*' not in req.if_none_match:
# Sending an etag with if-none-match isn't currently supported
return HTTPBadRequest(request=req, content_type='text/plain',
body='If-None-Match only supports *')
def _handle_copy_request(self, req):
"""
This method handles copying objects based on values set in the headers
'X-Copy-From' and 'X-Copy-From-Account'
This method was added as part of the refactoring of the PUT method and
the functionality is expected to be moved to middleware
"""
if req.environ.get('swift.orig_req_method', req.method) != 'POST':
req.environ.setdefault('swift.log_info', []).append(
'x-copy-from:%s' % req.headers['X-Copy-From'])
ver, acct, _rest = req.split_path(2, 3, True)
src_account_name = req.headers.get('X-Copy-From-Account', None)
if src_account_name:
src_account_name = check_account_format(req, src_account_name)
else:
src_account_name = acct
src_container_name, src_obj_name = check_copy_from_header(req)
source_header = '/%s/%s/%s/%s' % (
ver, src_account_name, src_container_name, src_obj_name)
source_req = req.copy_get()
# make sure the source request uses it's container_info
source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
orig_obj_name = self.object_name
orig_container_name = self.container_name
orig_account_name = self.account_name
sink_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
self.object_name = src_obj_name
self.container_name = src_container_name
self.account_name = src_account_name
source_resp = self.GET(source_req)
# This gives middlewares a way to change the source; for example,
# this lets you COPY a SLO manifest and have the new object be the
# concatenation of the segments (like what a GET request gives
# the client), not a copy of the manifest file.
hook = req.environ.get(
'swift.copy_hook',
(lambda source_req, source_resp, sink_req: source_resp))
source_resp = hook(source_req, source_resp, sink_req)
# reset names
self.object_name = orig_obj_name
self.container_name = orig_container_name
self.account_name = orig_account_name
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
# this is a bit of ugly code, but I'm willing to live with it
# until copy request handling moves to middleware
return source_resp, None, None, None
if source_resp.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
raise HTTPRequestEntityTooLarge(request=req)
if source_resp.content_length > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
data_source = iter(source_resp.app_iter)
sink_req.content_length = source_resp.content_length
sink_req.etag = source_resp.etag
# we no longer need the X-Copy-From header
del sink_req.headers['X-Copy-From']
if 'X-Copy-From-Account' in sink_req.headers:
del sink_req.headers['X-Copy-From-Account']
if not req.content_type_manually_set:
sink_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
if config_true_value(
sink_req.headers.get('x-fresh-metadata', 'false')):
# post-as-copy: ignore new sysmeta, copy existing sysmeta
condition = lambda k: is_sys_meta('object', k)
remove_items(sink_req.headers, condition)
copy_header_subset(source_resp, sink_req, condition)
else:
# copy/update existing sysmeta and user meta
copy_headers_into(source_resp, sink_req)
copy_headers_into(req, sink_req)
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
req.params.get('multipart-manifest') == 'get':
sink_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
req = sink_req
def update_response(req, resp):
acct, path = source_resp.environ['PATH_INFO'].split('/', 3)[2:4]
resp.headers['X-Copied-From-Account'] = quote(acct)
resp.headers['X-Copied-From'] = quote(path)
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
copy_headers_into(req, resp)
return resp
# this is a bit of ugly code, but I'm willing to live with it
# until copy request handling moves to middleware
return None, req, data_source, update_response
def _handle_object_versions(self, req):
"""
This method handles versionining of objects in containers that
have the feature enabled.
When a new PUT request is sent, the proxy checks for previous versions
of that same object name. If found, it is copied to a different
container and the new version is stored in its place.
This method was added as part of the PUT method refactoring and the
functionality is expected to be moved to middleware
"""
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy']))
policy = POLICIES.get_by_index(policy_index)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
# Sometimes the 'content-type' header exists, but is set to None.
content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
content_type_manually_set = False
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
object_versions = container_info['versions']
# do a HEAD request for checking object versions
if object_versions and not req.environ.get('swift_versioned_copy'):
@ -860,20 +938,6 @@ class BaseObjectController(Controller):
hreq, _('Object'), hnode_iter, partition,
hreq.swift_entity_path)
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req_timestamp = Timestamp(req.headers['X-Timestamp'])
except ValueError:
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
req.headers['X-Timestamp'] = req_timestamp.internal
else:
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
if object_versions and not req.environ.get('swift_versioned_copy'):
is_manifest = 'X-Object-Manifest' in req.headers or \
'X-Object-Manifest' in hresp.headers
if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
@ -901,114 +965,46 @@ class BaseObjectController(Controller):
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
return HTTPPreconditionFailed(request=req)
raise HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
raise HTTPServiceUnavailable(request=req)
reader = req.environ['wsgi.input'].read
data_source = iter(lambda: reader(self.app.client_chunk_size), '')
source_header = req.headers.get('X-Copy-From')
source_resp = None
if source_header:
if req.environ.get('swift.orig_req_method', req.method) != 'POST':
req.environ.setdefault('swift.log_info', []).append(
'x-copy-from:%s' % source_header)
ver, acct, _rest = req.split_path(2, 3, True)
src_account_name = req.headers.get('X-Copy-From-Account', None)
if src_account_name:
src_account_name = check_account_format(req, src_account_name)
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
src_account_name = acct
src_container_name, src_obj_name = check_copy_from_header(req)
source_header = '/%s/%s/%s/%s' % (
ver, src_account_name, src_container_name, src_obj_name)
source_req = req.copy_get()
req.content_type_manually_set = False
# make sure the source request uses it's container_info
source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
orig_obj_name = self.object_name
orig_container_name = self.container_name
orig_account_name = self.account_name
self.object_name = src_obj_name
self.container_name = src_container_name
self.account_name = src_account_name
sink_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
source_resp = self.GET(source_req)
sink_req.headers['etag'] = source_resp.etag
# This gives middlewares a way to change the source; for example,
# this lets you COPY a SLO manifest and have the new object be the
# concatenation of the segments (like what a GET request gives
# the client), not a copy of the manifest file.
hook = req.environ.get(
'swift.copy_hook',
(lambda source_req, source_resp, sink_req: source_resp))
source_resp = hook(source_req, source_resp, sink_req)
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
return source_resp
self.object_name = orig_obj_name
self.container_name = orig_container_name
self.account_name = orig_account_name
data_source = iter(source_resp.app_iter)
sink_req.content_length = source_resp.content_length
if sink_req.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
return HTTPRequestEntityTooLarge(request=req)
if sink_req.content_length > constraints.MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
sink_req.etag = source_resp.etag
# we no longer need the X-Copy-From header
del sink_req.headers['X-Copy-From']
if 'X-Copy-From-Account' in sink_req.headers:
del sink_req.headers['X-Copy-From-Account']
if not content_type_manually_set:
sink_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
if config_true_value(
sink_req.headers.get('x-fresh-metadata', 'false')):
# post-as-copy: ignore new sysmeta, copy existing sysmeta
condition = lambda k: is_sys_meta('object', k)
remove_items(sink_req.headers, condition)
copy_header_subset(source_resp, sink_req, condition)
else:
# copy/update existing sysmeta and user meta
copy_headers_into(source_resp, sink_req)
copy_headers_into(req, sink_req)
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
req.params.get('multipart-manifest') == 'get':
sink_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
req = sink_req
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
def _update_x_timestamp(self, req):
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req_timestamp = Timestamp(req.headers['X-Timestamp'])
except ValueError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
req.headers['X-Timestamp'] = req_timestamp.internal
else:
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
return None
def _get_put_connections(self, req, nodes, partition, outgoing_headers,
policy):
obj_ring = policy.object_ring
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(obj_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get('transfer-encoding', '')
chunked = ('chunked' in te)
# If the request body sent from client -> proxy is the same as the
# request body sent proxy -> object, then we can rely on the object
# server to handle any Etag checking. If not, we have to do it here.
etag_hasher = None if policy.stores_objects_verbatim else md5()
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
for nheaders in outgoing_headers:
if not policy.stores_objects_verbatim:
@ -1018,16 +1014,15 @@ class BaseObjectController(Controller):
nheaders.pop('Content-Length', None)
nheaders.pop('Etag', None)
# RFC2616:8.2.3 disallows 100-continue without a body
if (int(nheaders.get('content-length', 0)) > 0) or chunked:
if (int(nheaders.get('content-length', 0)) > 0) or req.is_chunked:
nheaders['Expect'] = '100-continue'
pile.spawn(
self._connect_put_node, node_iter, partition,
req.swift_entity_path, nheaders,
self.app.logger.thread_locals, chunked,
self.app.logger.thread_locals, req.is_chunked,
need_metadata_footer=policy.needs_trailing_object_metadata,
need_multiphase_put=policy.needs_multiphase_put)
min_puts = policy.quorum_size(len(nodes))
putters = []
chunk_hashers = [None] * len(nodes)
for i, p in enumerate(pile):
@ -1036,6 +1031,18 @@ class BaseObjectController(Controller):
p.hshr_index = i
chunk_hashers[p.hshr_index] = (
None if policy.stores_objects_verbatim else md5())
return chunk_hashers, putters
def _check_min_putters(self, req, putters, min_puts, msg=None):
msg = msg or 'Object PUT returning 503, %(conns)s/%(nodes)s ' \
'required connections'
if len(putters) < min_puts:
self.app.logger.error((msg),
{'conns': len(putters), 'nodes': min_puts})
raise HTTPServiceUnavailable(request=req)
def _transfer_data(self, req, data_source, chunk_hashers, putters, nodes,
policy, etag_hasher, min_puts):
statuses = [p.current_status() for p in putters]
if (req.if_none_match is not None
@ -1045,7 +1052,7 @@ class BaseObjectController(Controller):
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
return HTTPPreconditionFailed(request=req)
raise HTTPPreconditionFailed(request=req)
if HTTP_CONFLICT in statuses:
timestamps = [HeaderKeyDict(p.resp.getheaders()).get(
@ -1055,14 +1062,9 @@ class BaseObjectController(Controller):
'%(req_timestamp)s <= %(timestamps)r'),
{'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(timestamps)})
return HTTPAccepted(request=req)
raise HTTPAccepted(request=req)
if len(putters) < min_puts:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(putters), 'nodes': min_puts})
return HTTPServiceUnavailable(request=req)
self._check_min_putters(req, putters, min_puts)
bytes_transferred = 0
chunk_transform = policy.chunk_transformer(len(nodes))
@ -1085,12 +1087,9 @@ class BaseObjectController(Controller):
putter.send_chunk(backend_chunk)
else:
putters.remove(putter)
if len(putters) < min_puts:
self.app.logger.error(_(
'Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections'),
{'conns': len(putters), 'nodes': min_puts})
raise HTTPServiceUnavailable(request=req)
self._check_min_putters(
req, putters, min_puts, msg='Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections')
final_phase = True
need_quorum = True
@ -1118,7 +1117,7 @@ class BaseObjectController(Controller):
'etag', '').strip('"')
if (computed_etag and received_etag and
computed_etag != received_etag):
return HTTPUnprocessableEntity(request=req)
raise HTTPUnprocessableEntity(request=req)
send_chunk('') # flush out any buffered data
@ -1134,7 +1133,7 @@ class BaseObjectController(Controller):
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
raise HTTPRequestEntityTooLarge(request=req)
send_chunk(chunk)
@ -1163,7 +1162,7 @@ class BaseObjectController(Controller):
self.app.logger.error(
_('Not enough object servers ack\'ed (got %d)'),
statuses.count(HTTP_CONTINUE))
return HTTPServerError(request=req)
raise HTTPServerError(request=req)
for putter in putters:
putter.wait()
final_phase = True
@ -1175,21 +1174,45 @@ class BaseObjectController(Controller):
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
return HTTPRequestTimeout(request=req)
raise HTTPRequestTimeout(request=req)
except HTTPException:
raise
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
return HTTPClientDisconnect(request=req)
raise HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
return HTTPClientDisconnect(request=req)
raise HTTPClientDisconnect(request=req)
statuses, reasons, bodies, etags, _junk = self._get_put_responses(
req, putters, len(nodes), final_phase, min_responses,
need_quorum=need_quorum)
return final_phase, min_responses, need_quorum, putters
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index'))
policy = POLICIES.get_by_index(policy_index)
# If the request body sent from client -> proxy is the same as the
# request body sent proxy -> object, then we can rely on the object
# server to handle any Etag checking. If not, we have to do it here.
etag_hasher = None if policy.stores_objects_verbatim else md5()
min_puts = policy.quorum_size(len(nodes))
chunk_hashers, putters = self._get_put_connections(
req, nodes, partition, outgoing_headers, policy)
try:
final_phase, min_resp, need_quorum, putters = self._transfer_data(
req, data_source, chunk_hashers, putters, nodes,
policy, etag_hasher, min_puts)
statuses, reasons, bodies, etags, _junk = self._get_put_responses(
req, putters, len(nodes), final_phase, min_resp,
need_quorum=need_quorum)
except HTTPException as resp:
return resp
if len(etags) > 1 and policy.stores_objects_verbatim:
self.app.logger.error(
@ -1201,18 +1224,83 @@ class BaseObjectController(Controller):
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag,
quorum_size=min_puts)
if source_header:
acct, path = source_header.split('/', 3)[2:4]
resp.headers['X-Copied-From-Account'] = quote(acct)
resp.headers['X-Copied-From'] = quote(path)
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
copy_headers_into(req, resp)
resp.last_modified = math.ceil(
float(Timestamp(req.headers['X-Timestamp'])))
return resp
@public
@cors_validation
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
if req.if_none_match is not None and '*' not in req.if_none_match:
# Sending an etag with if-none-match isn't currently supported
return HTTPBadRequest(request=req, content_type='text/plain',
body='If-None-Match only supports *')
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy']))
obj_ring = self.app.get_object_ring(policy_index)
container_nodes = container_info['nodes']
container_partition = container_info['partition']
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
# is request authorized
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not container_info['nodes']:
return HTTPNotFound(request=req)
# update content type in case it is missing
self._update_content_type(req)
# check constraints on object name and request headers
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
self._update_x_timestamp(req)
# check if versioning is enabled and handle copying previous version
self._handle_object_versions(req)
# check if request is a COPY of an existing object
source_header = req.headers.get('X-Copy-From')
if source_header:
error_response, req, data_source, update_response = \
self._handle_copy_request(req)
if error_response:
return error_response
else:
reader = req.environ['wsgi.input'].read
data_source = iter(lambda: reader(self.app.client_chunk_size), '')
update_response = lambda req, resp: resp
# check if object is set to be automaticaly deleted (i.e. expired)
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
# add special headers to be handled by storage nodes
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, container_nodes,
delete_at_container, delete_at_part, delete_at_nodes)
# send object to storage nodes
resp = self._store_object(
req, data_source, nodes, partition, outgoing_headers)
return update_response(req, resp)
@public
@cors_validation
@delay_denial

View File

@ -23,6 +23,7 @@ import eventlet
import eventlet.debug
import functools
import random
from ConfigParser import ConfigParser, NoSectionError
from time import time, sleep
from httplib import HTTPException
from urlparse import urlparse
@ -32,6 +33,7 @@ from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from swift.common.middleware.memcache import MemcacheMiddleware
from swift.common.storage_policy import parse_storage_policies, PolicyError
from test import get_config
from test.functional.swift_test_client import Account, Connection, \
@ -50,6 +52,9 @@ from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
DEBUG = True
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
@ -99,7 +104,7 @@ orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
_testdir = _test_servers = _test_sockets = _test_coros = None
_testdir = _test_servers = _test_coros = None
class FakeMemcacheMiddleware(MemcacheMiddleware):
@ -113,29 +118,187 @@ class FakeMemcacheMiddleware(MemcacheMiddleware):
self.memcache = FakeMemcache()
# swift.conf contents for in-process functional test runs
functests_swift_conf = '''
[swift-hash]
swift_hash_path_suffix = inprocfunctests
swift_hash_path_prefix = inprocfunctests
class InProcessException(BaseException):
pass
[swift-constraints]
max_file_size = %d
''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
def _info(msg):
print >> sys.stderr, msg
def _debug(msg):
if DEBUG:
_info('DEBUG: ' + msg)
def _in_process_setup_swift_conf(swift_conf_src, testdir):
# override swift.conf contents for in-process functional test runs
conf = ConfigParser()
conf.read(swift_conf_src)
try:
section = 'swift-hash'
conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
section = 'swift-constraints'
max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
conf.set(section, 'max_file_size', max_file_size)
except NoSectionError:
msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
raise InProcessException(msg)
test_conf_file = os.path.join(testdir, 'swift.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file
def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
"""
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found
"""
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir,
'etc'))
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
if use_sample:
# fall back to using the corresponding sample conf file
conf_file_name += '-sample'
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
msg = 'Failed to find config file %s' % conf_file_name
raise InProcessException(msg)
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
"""
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file.
"""
conf = ConfigParser()
conf.read(swift_conf)
sp_prefix = 'storage-policy:'
try:
# policy index 0 will be created if no policy exists in conf
policies = parse_storage_policies(conf)
except PolicyError as e:
raise InProcessException(e)
# clear all policies from test swift.conf before adding test policy back
for policy in policies:
conf.remove_section(sp_prefix + str(policy.idx))
policy_specified = os.environ.get('SWIFT_TEST_POLICY')
if policy_specified:
policy_to_test = policies.get_by_name(policy_specified)
if policy_to_test is None:
raise InProcessException('Failed to find policy name "%s"'
% policy_specified)
_info('Using specified policy %s' % policy_to_test.name)
else:
policy_to_test = policies.default
_info('Defaulting to policy %s' % policy_to_test.name)
# make policy_to_test be policy index 0 and default for the test config
sp_zero_section = sp_prefix + '0'
conf.add_section(sp_zero_section)
for (k, v) in policy_to_test.get_options().items():
conf.set(sp_zero_section, k, v)
conf.set(sp_zero_section, 'default', True)
with open(swift_conf, 'w') as fp:
conf.write(fp)
# look for a source ring file
ring_file_src = ring_file_test = 'object.ring.gz'
if policy_to_test.idx:
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
try:
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
use_sample=False)
except InProcessException as e:
if policy_specified:
raise InProcessException('Failed to find ring file %s'
% ring_file_src)
ring_file_src = None
ring_file_test = os.path.join(testdir, ring_file_test)
if ring_file_src:
# copy source ring file to a policy-0 test ring file, re-homing servers
_info('Using source ring file %s' % ring_file_src)
ring_data = ring.RingData.load(ring_file_src)
obj_sockets = []
for dev in ring_data.devs:
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
obj_socket = eventlet.listen(('localhost', 0))
obj_sockets.append(obj_socket)
dev['port'] = obj_socket.getsockname()[1]
dev['ip'] = '127.0.0.1'
dev['device'] = device
dev['replication_port'] = dev['port']
dev['replication_ip'] = dev['ip']
ring_data.save(ring_file_test)
else:
# make default test ring, 2 replicas, 4 partitions, 2 devices
_info('No source object ring file, creating 2rep/4part/2dev ring')
obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
ring_data = ring.RingData(
[[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj_sockets[0].getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj_sockets[1].getsockname()[1]}],
30)
with closing(GzipFile(ring_file_test, 'wb')) as f:
pickle.dump(ring_data, f)
for dev in ring_data.devs:
_debug('Ring file dev: %s' % dev)
return obj_sockets
def in_process_setup(the_object_server=object_server):
print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
print >>sys.stderr, 'Using object_server: %s' % the_object_server.__name__
_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir))
proxy_conf = os.path.join(_dir, 'etc', 'proxy-server.conf-sample')
if os.path.exists(proxy_conf):
print >>sys.stderr, 'Using proxy-server config from %s' % proxy_conf
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
_info('Using object_server class: %s' % the_object_server.__name__)
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
else:
print >>sys.stderr, 'Failed to find conf file %s' % proxy_conf
return
if conf_src_dir is not None:
if not os.path.isdir(conf_src_dir):
msg = 'Config source %s is not a dir' % conf_src_dir
raise InProcessException(msg)
_info('Using config source dir: %s' % conf_src_dir)
# If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
# prefer config files from there, otherwise read config from source tree
# sample files. A mixture of files from the two sources is allowed.
proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
_info('Using proxy config from %s' % proxy_conf)
swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
_info('Using swift config from %s' % swift_conf_src)
monkey_patch_mimetools()
@ -148,9 +311,8 @@ def in_process_setup(the_object_server=object_server):
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
swift_conf = os.path.join(_testdir, "swift.conf")
with open(swift_conf, "w") as scfp:
scfp.write(functests_swift_conf)
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
global orig_swift_conf_name
orig_swift_conf_name = utils.SWIFT_CONF_FILE
@ -221,11 +383,6 @@ def in_process_setup(the_object_server=object_server):
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
obj1lis = eventlet.listen(('localhost', 0))
obj2lis = eventlet.listen(('localhost', 0))
global _test_sockets
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
@ -243,14 +400,6 @@ def in_process_setup(the_object_server=object_server):
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]}], 30),
f)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
@ -270,10 +419,13 @@ def in_process_setup(the_object_server=object_server):
config, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
config, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
config, logger=debug_logger('obj2'))
objsrvs = [
(obj_sockets[index],
the_object_server.ObjectController(
config, logger=debug_logger('obj%d' % (index + 1))))
for index in range(len(obj_sockets))
]
logger = debug_logger('proxy')
@ -283,7 +435,10 @@ def in_process_setup(the_object_server=object_server):
with mock.patch('swift.common.utils.get_logger', get_logger):
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
FakeMemcacheMiddleware):
app = loadapp(proxy_conf, global_conf=config)
try:
app = loadapp(proxy_conf, global_conf=config)
except Exception as e:
raise InProcessException(e)
nl = utils.NullLogger()
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
@ -291,11 +446,13 @@ def in_process_setup(the_object_server=object_server):
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
for objsrv in objsrvs]
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
(prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
# Create accounts "test" and "test2"
def create_account(act):
@ -396,8 +553,13 @@ def setup_package():
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
try:
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
except InProcessException as exc:
print >> sys.stderr, ('Exception during in-process setup: %s'
% str(exc))
raise
global web_front_end
web_front_end = config.get('web_front_end', 'integral')

View File

@ -756,6 +756,9 @@ def fake_http_connect(*code_iter, **kwargs):
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):

View File

@ -758,7 +758,7 @@ def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):
The AccountBroker initialze() function before we added the
policy stat table. Used by test_policy_table_creation() to
make sure that the AccountBroker will correctly add the table
for cases where the DB existed before the policy suport was added.
for cases where the DB existed before the policy support was added.
:param conn: DB connection object
:param put_timestamp: put timestamp

View File

@ -392,6 +392,17 @@ class TestPrintObjFullMeta(TestCliInfoBase):
print_obj(self.datafile, swift_dir=self.testdir)
self.assertTrue('/objects-1/' in out.getvalue())
def test_print_obj_meta_and_ts_files(self):
# verify that print_obj will also read from meta and ts files
base = os.path.splitext(self.datafile)[0]
for ext in ('.meta', '.ts'):
test_file = '%s%s' % (base, ext)
os.link(self.datafile, test_file)
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(test_file, swift_dir=self.testdir)
self.assertTrue('/objects-1/' in out.getvalue())
def test_print_obj_no_ring(self):
no_rings_dir = os.path.join(self.testdir, 'no_rings_here')
os.mkdir(no_rings_dir)

View File

@ -293,6 +293,43 @@ class TestRecon(unittest.TestCase):
% ex)
self.assertFalse(expected)
def test_drive_audit_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040)]
# sample json response from http://<host>:<port>/recon/driveaudit
responses = {6010: {'drive_audit_errors': 15},
6020: {'drive_audit_errors': 0},
6030: {'drive_audit_errors': 257},
6040: {'drive_audit_errors': 56}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = (0, 257, 82.0, 328, 0.0, 0, 4)
def mock_scout_driveaudit(app, host):
url = 'http://%s:%s/recon/driveaudit' % host
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
patches = [
mock.patch('swift.cli.recon.Scout.scout', mock_scout_driveaudit),
mock.patch('sys.stdout', new=stdout),
]
with nested(*patches):
self.recon_instance.driveaudit_check(hosts)
output = stdout.getvalue()
r = re.compile("\[drive_audit_errors(.*)\](.*)")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:
m = r.match(line)
if m:
self.assertEquals(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% expected)
class TestReconCommands(unittest.TestCase):
def setUp(self):

View File

@ -172,6 +172,9 @@ class FakeRecon(object):
def fake_sockstat(self):
return {'sockstattest': "1"}
def fake_driveaudit(self):
return {'driveaudittest': "1"}
def nocontent(self):
return None
@ -829,6 +832,15 @@ class TestReconSuccess(TestCase):
(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
def test_get_driveaudit_info(self):
from_cache_response = {'drive_audit_errors': 7}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_driveaudit_error()
self.assertEquals(self.fakecache.fakeout_calls,
[((['drive_audit_errors'],
'/var/cache/swift/drive.recon'), {})])
self.assertEquals(rv, {'drive_audit_errors': 7})
class TestReconMiddleware(unittest.TestCase):
@ -857,6 +869,7 @@ class TestReconMiddleware(unittest.TestCase):
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
self.app.get_driveaudit_error = self.frecon.fake_driveaudit
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
@ -1084,5 +1097,12 @@ class TestReconMiddleware(unittest.TestCase):
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
def test_recon_get_driveaudit(self):
get_driveaudit_resp = ['{"driveaudittest": "1"}']
req = Request.blank('/recon/driveaudit',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_driveaudit_resp)
if __name__ == '__main__':
unittest.main()

View File

@ -14,9 +14,10 @@
# limitations under the License.
import unittest
from contextlib import contextmanager
from contextlib import contextmanager, nested
from base64 import b64encode
from time import time
import mock
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
@ -266,6 +267,25 @@ class TestAuth(unittest.TestCase):
self.assertEquals(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_with_s3_authorization(self):
local_app = FakeApp()
local_auth = auth.filter_factory(
{'user_s3_s3': 's3 .admin'})(local_app)
req = self._make_request('/v1/AUTH_s3',
headers={'X-Auth-Token': 't',
'AUTHORIZATION': 'AWS s3:s3:pass'})
with nested(mock.patch('base64.urlsafe_b64decode'),
mock.patch('base64.encodestring')) as (msg, sign):
msg.return_value = ''
sign.return_value = 'pass'
resp = req.get_response(local_auth)
self.assertEquals(resp.status_int, 404)
self.assertEquals(local_app.calls, 1)
self.assertEquals(req.environ['swift.authorize'],
local_auth.authorize)
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.
local_auth = auth.filter_factory({'reseller_prefix': ''})(FakeApp())

View File

@ -995,6 +995,25 @@ class TestStoragePolicies(unittest.TestCase):
self.assertEquals(policies.get_by_index(0).needs_multiphase_put, False)
self.assertEquals(policies.get_by_index(10).needs_multiphase_put, True)
def test_storage_policy_get_options(self):
test_policies = [
StoragePolicy.from_conf(
REPL_POLICY, {'idx': 0, 'name': 'zero', 'is_default': True}),
StoragePolicy.from_conf(
EC_POLICY, {'idx': 10, 'name': 'ten', 'is_default': False,
'is_deprecated': True,
'ec_type': 'jerasure_rs_vand',
'ec_ndata': 10, 'ec_nparity': 3})]
policies = StoragePolicyCollection(test_policies)
self.assertEqual({'name': 'zero',
'default': True,
'deprecated': False},
policies.get_by_index(0).get_options())
self.assertEqual({'name': 'ten',
'default': False,
'deprecated': True},
policies.get_by_index(10).get_options())
if __name__ == '__main__':
unittest.main()

View File

@ -1043,54 +1043,58 @@ class TestUtils(unittest.TestCase):
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo = utils.LoggerFileObject(logger)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print 'test1'
self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo
sys.stdout = lfo_stdout
print 'test2'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo
sys.stderr = lfo_stderr
print >> sys.stderr, 'test4'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print 'test5'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print >> sys.stderr, 'test6'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print 'test8'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
lfo.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\n')
lfo.close()
lfo.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk

View File

@ -555,6 +555,44 @@ class TestContainerBroker(unittest.TestCase):
self.assertEqual(stat['bytes_used'],
sum(stats[policy_index].values()))
def test_initialize_container_broker_in_default(self):
broker = ContainerBroker(':memory:', account='test1',
container='test2')
# initialize with no storage_policy_index argument
broker.initialize(Timestamp(1).internal)
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['container'], 'test2')
self.assertEquals(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
policy_stats = broker.get_policy_stats()
# Act as policy-0
self.assertTrue(0 in policy_stats)
self.assertEquals(policy_stats[0]['bytes_used'], 0)
self.assertEquals(policy_stats[0]['object_count'], 0)
broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 123)
policy_stats = broker.get_policy_stats()
self.assertTrue(0 in policy_stats)
self.assertEquals(policy_stats[0]['object_count'], 1)
self.assertEquals(policy_stats[0]['bytes_used'], 123)
def test_get_info(self):
# Test ContainerBroker.get_info
broker = ContainerBroker(':memory:', account='test1',

View File

@ -1067,7 +1067,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEquals(res.status_int, expected)
# repeat test
@ -1077,7 +1080,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEquals(res.status_int, expected)
@unpatch_policies
@ -2337,7 +2343,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
@ -2366,7 +2375,10 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
res = controller.PUT(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
@ -2408,7 +2420,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEquals(res.status[:len(str(expected))],
str(expected))
@ -3998,7 +4013,10 @@ class TestObjectController(unittest.TestCase):
self.app.update_request(req)
self.app.memcache.store = {}
resp = controller.PUT(req)
try:
resp = controller.PUT(req)
except HTTPException as resp:
pass
self.assertEquals(resp.status_int, 413)
def test_basic_COPY(self):
@ -4239,7 +4257,10 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
resp = controller.COPY(req)
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEquals(resp.status_int, 413)
@_limit_max_file_size
@ -4263,7 +4284,10 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
resp = controller.COPY(req)
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEquals(resp.status_int, 413)
def test_COPY_newest(self):
@ -4307,45 +4331,48 @@ class TestObjectController(unittest.TestCase):
def test_COPY_delete_at(self):
with save_globals():
given_headers = {}
backend_requests = []
def fake_connect_put_node(nodes, part, path, headers,
logger_thread_locals, chunked,
need_metadata_footer=False,
need_multiphase_put=False):
given_headers.update(headers)
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = proxy_server.ReplicatedObjectController(self.app, 'a',
'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
controller = proxy_server.ReplicatedObjectController(
self.app, 'a', 'c', 'o')
controller._connect_put_node = fake_connect_put_node
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
controller.COPY(req)
self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEquals(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_COPY_account_delete_at(self):
with save_globals():
given_headers = {}
backend_requests = []
def fake_connect_put_node(nodes, part, path, headers,
logger_thread_locals, chunked,
need_metadata_footer=False,
need_multiphase_put=False):
given_headers.update(headers)
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = proxy_server.ReplicatedObjectController(self.app, 'a',
'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
controller = proxy_server.ReplicatedObjectController(
self.app, 'a', 'c', 'o')
controller._connect_put_node = fake_connect_put_node
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@ -4353,12 +4380,17 @@ class TestObjectController(unittest.TestCase):
'Destination-Account': 'a1'})
self.app.update_request(req)
controller.COPY(req)
self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEquals(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):

View File

@ -75,6 +75,9 @@ class FakeServerConnection(WSGIContext):
def send(self, data):
self.data += data
def close(self):
pass
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)