merge from master to ec

Change-Id: Ic7016b29792518907ba7a5df491e9246b72bb643
This commit is contained in:
John Dickinson 2014-05-15 18:35:57 -04:00
commit aa8d5518d2
53 changed files with 1095 additions and 297 deletions

View File

@ -53,3 +53,4 @@ Peter Portante <peter.portante@redhat.com> <peter.a.portante@gmail.com>
Christian Schwede <info@cschwede.de> <christian.schwede@enovance.com>
Constantine Peresypkin <constantine.peresypk@rackspace.com> <constantine@litestack.com>
Madhuri Kumari <madhuri.rai07@gmail.com> madhuri <madhuri@madhuri-VirtualBox.(none)>
Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>

View File

@ -11,7 +11,7 @@ else
cover_branches="--cover-branches --cover-html --cover-html-dir=$TOP_DIR/cover"
fi
cd $TOP_DIR/test/unit
nosetests --exe --with-coverage --cover-package swift. --cover-erase $cover_branches $@
nosetests --exe --with-coverage --cover-package swift --cover-erase $cover_branches $@
rvalue=$?
rm -f .coverage
cd -

View File

@ -39,7 +39,7 @@ Brian Curtin (brian.curtin@rackspace.com)
Julien Danjou (julien@danjou.info)
Ksenia Demina (kdemina@mirantis.com)
Dan Dillinger (dan.dillinger@sonian.net)
Morgan Fainberg (m@metacloud.com)
Morgan Fainberg (morgan.fainberg@gmail.com)
ZhiQiang Fan (aji.zqfan@gmail.com)
Flaper Fesp (flaper87@gmail.com)
Tom Fifield (tom@openstack.org)

View File

@ -65,7 +65,12 @@ def get_errors(error_re, log_file_pattern, minutes):
# to the config where one can set it with the desired
# globbing pattern.
log_files = [f for f in glob.glob(log_file_pattern)]
log_files.sort()
try:
log_files.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)
except (IOError, OSError) as exc:
logger.error(exc)
print(exc)
sys.exit(1)
now_time = datetime.datetime.now()
end_time = now_time - datetime.timedelta(minutes=minutes)

View File

@ -116,7 +116,7 @@ class AccountAuditor(Daemon):
broker.get_info()
self.logger.increment('passes')
self.account_passes += 1
self.logger.debug(_('Audit passed for %s') % broker)
self.logger.debug('Audit passed for %s' % broker)
except (Exception, Timeout):
self.logger.increment('failures')
self.account_failures += 1

View File

@ -105,7 +105,7 @@ class AccountReaper(Daemon):
This repeatedly calls :func:`reap_once` no quicker than the
configuration interval.
"""
self.logger.debug(_('Daemon started.'))
self.logger.debug('Daemon started.')
sleep(random.random() * self.interval)
while True:
begin = time()
@ -121,7 +121,7 @@ class AccountReaper(Daemon):
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
once for each device on the server.
"""
self.logger.debug(_('Begin devices pass: %s'), self.devices)
self.logger.debug('Begin devices pass: %s', self.devices)
begin = time()
try:
for device in os.listdir(self.devices):

View File

@ -36,7 +36,7 @@ def print_ring_locations(ring, datadir, account, container=None):
:param ring: ring instance
:param datadir: high level directory to store account/container/objects
:param acount: account name
:param account: account name
:param container: container name
"""
if ring is None or datadir is None or account is None:

View File

@ -16,8 +16,10 @@
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import urllib2
from swift.common.utils import SWIFT_CONF_FILE
from swift.common.ring import Ring
from urlparse import urlparse
try:
@ -82,16 +84,16 @@ class Scout(object):
body = urllib2.urlopen(url, timeout=self.timeout).read()
content = json.loads(body)
if self.verbose:
print "-> %s: %s" % (url, content)
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print "-> %s: %s" % (url, err)
print("-> %s: %s" % (url, err))
content = err
status = err.code
except urllib2.URLError as err:
if not self.suppress_errors or self.verbose:
print "-> %s: %s" % (url, err)
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
@ -143,10 +145,10 @@ class SwiftRecon(object):
:param stats: dict of stats generated by _gen_stats
"""
print '[%(name)s] low: %(low)d, high: %(high)d, avg: ' \
'%(average).1f, total: %(total)d, ' \
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, ' \
'reported: %(reported)d' % stats
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
@ -158,6 +160,21 @@ class SwiftRecon(object):
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def _md5_file(self, path):
"""
Get the MD5 checksum of a file.
:param path: path to file
:returns: MD5 checksum, hex encoded
"""
md5sum = md5()
with open(path, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
return md5sum.hexdigest()
def get_devices(self, zone_filter, swift_dir, ring_name):
"""
Get a list of hosts in the ring
@ -206,10 +223,10 @@ class SwiftRecon(object):
rings[ring_name] = ring_sum
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking ring md5sums" % self._ptime()
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print "-> On disk %s md5sum: %s" % (ring_file, ring_sum)
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status != 200:
errors = errors + 1
@ -220,17 +237,48 @@ class SwiftRecon(object):
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print "!! %s (%s => %s) doesn't match on disk md5sum" % \
(url, remote_ring_name, remote_ring_sum)
print("!! %s (%s => %s) doesn't match on disk md5sum" %
(url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print "-> %s matches." % url
print "%s/%s hosts matched, %s error[s] while checking hosts." \
% (matches, len(hosts), errors)
print "=" * 79
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." %
(matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = self._md5_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
@ -242,7 +290,7 @@ class SwiftRecon(object):
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking async pendings" % self._ptime()
print("[%s] Checking async pendings" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
@ -250,8 +298,8 @@ class SwiftRecon(object):
if stats['reported'] > 0:
self._print_stats(stats)
else:
print "[async_pending] - No hosts returned valid data."
print "=" * 79
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
@ -264,8 +312,8 @@ class SwiftRecon(object):
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Getting unmounted drives from %s hosts..." % \
(self._ptime(), len(hosts))
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
unmounted[url] = []
@ -278,12 +326,12 @@ class SwiftRecon(object):
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print "Not mounted: %s on %s" % (entry, node)
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print "Device errors: %s on %s" % (entry, node)
print "=" * 79
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def expirer_check(self, hosts):
"""
@ -295,7 +343,7 @@ class SwiftRecon(object):
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print "[%s] Checking on expirers" % self._ptime()
print("[%s] Checking on expirers" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
@ -308,10 +356,10 @@ class SwiftRecon(object):
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[%s] - No hosts returned valid data." % k
print("[%s] - No hosts returned valid data." % k)
else:
print "[%s] - No hosts returned valid data." % k
print "=" * 79
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def replication_check(self, hosts):
"""
@ -324,7 +372,7 @@ class SwiftRecon(object):
'attempted': []}
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print "[%s] Checking on replication" % self._ptime()
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
@ -354,29 +402,29 @@ class SwiftRecon(object):
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[%s] - No hosts returned valid data." % k
print("[%s] - No hosts returned valid data." % k)
else:
print "[%s] - No hosts returned valid data." % k
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print 'Oldest completion was NEVER by %s.' % host
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print 'Oldest completion was %s (%d %s ago) by %s.' % (
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host)
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print 'Most recent completion was %s (%d %s ago) by %s.' % (
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host)
print "=" * 79
elapsed, elapsed_unit, host))
print("=" * 79)
def object_replication_check(self, hosts):
"""
@ -388,7 +436,7 @@ class SwiftRecon(object):
stats = {}
recon = Scout("replication", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking on replication" % self._ptime()
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
@ -409,29 +457,29 @@ class SwiftRecon(object):
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[replication_time] - No hosts returned valid data."
print("[replication_time] - No hosts returned valid data.")
else:
print "[replication_time] - No hosts returned valid data."
print("[replication_time] - No hosts returned valid data.")
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print 'Oldest completion was NEVER by %s.' % host
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print 'Oldest completion was %s (%d %s ago) by %s.' % (
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host)
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print 'Most recent completion was %s (%d %s ago) by %s.' % (
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host)
print "=" * 79
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts):
"""
@ -443,7 +491,7 @@ class SwiftRecon(object):
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print "[%s] Checking updater times" % self._ptime()
print("[%s] Checking updater times" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
@ -454,10 +502,10 @@ class SwiftRecon(object):
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[updater_last_sweep] - No hosts returned valid data."
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print "[updater_last_sweep] - No hosts returned valid data."
print "=" * 79
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
@ -473,12 +521,12 @@ class SwiftRecon(object):
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print "[%s] Checking auditor stats" % self._ptime()
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print "Error: No hosts available"
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
@ -491,7 +539,7 @@ class SwiftRecon(object):
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print "[%s] - No hosts returned valid data." % k
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
@ -502,9 +550,9 @@ class SwiftRecon(object):
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print '[last_pass] oldest: %s, newest: %s, avg: %s' % \
(self._ptime(low), self._ptime(high), self._ptime(average))
print "=" * 79
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
@ -540,7 +588,7 @@ class SwiftRecon(object):
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking auditor stats " % self._ptime()
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
@ -563,16 +611,16 @@ class SwiftRecon(object):
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print "[Auditor %s] - No hosts returned valid data." % k
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[ALL_auditor] - No hosts returned valid data."
print("[ALL_auditor] - No hosts returned valid data.")
else:
print "[ALL_auditor] - No hosts returned valid data."
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [(self.nested_get_value(atime, zbf_scan[i]))
@ -587,17 +635,17 @@ class SwiftRecon(object):
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print "[Auditor %s] - No hosts returned valid data." % k
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print "[ZBF_auditor] - No hosts returned valid data."
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print "[ZBF_auditor] - No hosts returned valid data."
print "=" * 79
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def load_check(self, hosts):
"""
@ -611,7 +659,7 @@ class SwiftRecon(object):
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking load averages" % self._ptime()
print("[%s] Checking load averages" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
@ -624,8 +672,8 @@ class SwiftRecon(object):
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print "[%s_load_avg] - No hosts returned valid data." % item
print "=" * 79
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
@ -639,7 +687,7 @@ class SwiftRecon(object):
acctq = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking quarantine" % self._ptime()
print("[%s] Checking quarantine" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
@ -652,8 +700,8 @@ class SwiftRecon(object):
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print "No hosts returned valid data."
print "=" * 79
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
@ -669,7 +717,7 @@ class SwiftRecon(object):
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking socket usage" % self._ptime()
print("[%s] Checking socket usage" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
@ -685,8 +733,8 @@ class SwiftRecon(object):
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print "No hosts returned valid data."
print "=" * 79
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, human_readable=False):
"""
@ -704,14 +752,14 @@ class SwiftRecon(object):
top_percents = [(None, 0)] * top
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
print "[%s] Checking disk usage now" % self._ptime()
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print "-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted'])
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
@ -737,17 +785,17 @@ class SwiftRecon(object):
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print "-> %s: Error. No drive info available." % url
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print "Distribution Graph:"
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print '% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul))
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
@ -756,26 +804,26 @@ class SwiftRecon(object):
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print "Disk usage: space used: %s of %s" % (raw_used, raw_total)
print "Disk usage: space free: %s of %s" % (raw_avail, raw_total)
print "Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" % \
(low, high, avg_used)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print "No hosts returned valid data."
print "=" * 79
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print 'TOP %s' % top
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print '%.02f%% %s' % (used, '%-15s %s' % (host, device))
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print "=" * 79
print("=" * 79)
usage = '''
usage: %prog <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
@ -838,7 +886,7 @@ class SwiftRecon(object):
if arguments[0] in self.check_types:
self.server_type = arguments[0]
else:
print "Invalid Server Type"
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else:
@ -854,8 +902,8 @@ class SwiftRecon(object):
else:
hosts = self.get_devices(None, swift_dir, self.server_type)
print "--> Starting reconnaissance on %s hosts" % len(hosts)
print "=" * 79
print("--> Starting reconnaissance on %s hosts" % len(hosts))
print("=" * 79)
if options.all:
if self.server_type == 'object':
@ -882,7 +930,7 @@ class SwiftRecon(object):
if self.server_type == 'object':
self.async_check(hosts)
else:
print "Error: Can't check asyncs on non object servers."
print("Error: Can't check asyncs on non object servers.")
if options.unmounted:
self.umount_check(hosts)
if options.replication:
@ -897,20 +945,21 @@ class SwiftRecon(object):
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print "Error: Can't check updaters on account servers."
print("Error: Can't check updaters on account servers.")
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print "Error: Can't check expired on non object servers."
print("Error: Can't check expired on non object servers.")
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
@ -922,7 +971,7 @@ def main():
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print '\n'
print('\n')
if __name__ == '__main__':

View File

@ -121,8 +121,8 @@ class BufferedHTTPConnection(HTTPConnection):
def getresponse(self):
response = HTTPConnection.getresponse(self)
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)"),
logging.debug("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)",
{'time': time.time() - self._connected_time,
'method': self._method, 'host': self.host,
'port': self.port, 'path': self._path})

View File

@ -209,7 +209,7 @@ class DatabaseBroker(object):
def __str__(self):
"""
Returns a string indentifying the entity under broker to a human.
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""

View File

@ -276,7 +276,7 @@ class Replicator(Daemon):
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug(_('Syncing chunks with %s'), http.host)
self.logger.debug('Syncing chunks with %s', http.host)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
@ -294,9 +294,9 @@ class Replicator(Daemon):
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(_(
self.logger.debug(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.'),
'%s rows behind; moving on and will try again next pass.',
broker, self.max_diffs * self.per_diff)
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
@ -407,7 +407,7 @@ class Replicator(Daemon):
:param node_id: node id of the node to be replicated to
"""
start_time = now = time.time()
self.logger.debug(_('Replicating db %s'), object_file)
self.logger.debug('Replicating db %s', object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
shouldbehere = True
@ -611,15 +611,15 @@ class ReplicatorRpc(object):
raise
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for info: %.02fs') %
self.logger.debug('replicator-rpc-sync time for info: %.02fs' %
timespan)
if metadata:
timemark = time.time()
broker.update_metadata(simplejson.loads(metadata))
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'update_metadata: %.02fs') % timespan)
self.logger.debug('replicator-rpc-sync time for '
'update_metadata: %.02fs' % timespan)
if info['put_timestamp'] != put_timestamp or \
info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp:
@ -628,14 +628,14 @@ class ReplicatorRpc(object):
created_at, put_timestamp, delete_timestamp)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_timestamps: %.02fs') % timespan)
self.logger.debug('replicator-rpc-sync time for '
'merge_timestamps: %.02fs' % timespan)
timemark = time.time()
info['point'] = broker.get_sync(id_)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
'%.02fs') % timespan)
self.logger.debug('replicator-rpc-sync time for get_sync: '
'%.02fs' % timespan)
if hash_ == info['hash'] and info['point'] < remote_sync:
timemark = time.time()
broker.merge_syncs([{'remote_id': id_,
@ -643,8 +643,8 @@ class ReplicatorRpc(object):
info['point'] = remote_sync
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_syncs: %.02fs') % timespan)
self.logger.debug('replicator-rpc-sync time for '
'merge_syncs: %.02fs' % timespan)
return Response(simplejson.dumps(info))
def merge_syncs(self, broker, args):

View File

@ -735,10 +735,14 @@ class SimpleClient(object):
self.retries = retries
def base_request(self, method, container=None, name=None, prefix=None,
headers={}, proxy=None, contents=None, full_listing=None):
headers=None, proxy=None, contents=None,
full_listing=None):
# Common request method
url = self.url
if headers is None:
headers = {}
if self.token:
headers['X-Auth-Token'] = self.token

View File

@ -242,7 +242,7 @@ class Manager(object):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers

View File

@ -207,7 +207,7 @@ class MemcacheRing(object):
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retreived
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
now = time.time()
self._errors[server].append(time.time())

View File

@ -191,7 +191,7 @@ class ProxyLoggingMiddleware(object):
start_time_str,
end_time_str
)))
# Log timing and bytes-transfered data to StatsD
# Log timing and bytes-transferred data to StatsD
metric_name = self.statsd_metric_name(req, status_int, method)
# Only log data for valid controllers (or SOS) to keep the metric count
# down (egregious errors will get logged by the proxy server itself).

View File

@ -19,7 +19,8 @@ from swift import gettext_ as _
from swift import __version__ as swiftver
from swift.common.swob import Request, Response
from swift.common.utils import get_logger, config_true_value, json
from swift.common.utils import get_logger, config_true_value, json, \
SWIFT_CONF_FILE
from swift.common.constraints import check_mount
from resource import getpagesize
from hashlib import md5
@ -246,6 +247,23 @@ class ReconMiddleware(object):
self.logger.exception(_('Error reading ringfile'))
return sums
def get_swift_conf_md5(self, openr=open):
"""get md5 of swift.conf"""
md5sum = md5()
try:
with openr(SWIFT_CONF_FILE, 'r') as fh:
chunk = fh.read(4096)
while chunk:
md5sum.update(chunk)
chunk = fh.read(4096)
except IOError as err:
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading swift.conf'))
hexsum = None
else:
hexsum = md5sum.hexdigest()
return {SWIFT_CONF_FILE: hexsum}
def get_quarantine_count(self):
"""get obj/container/account quarantine counts"""
qcounts = {"objects": 0, "containers": 0, "accounts": 0}
@ -320,6 +338,8 @@ class ReconMiddleware(object):
content = self.get_diskusage()
elif rcheck == "ringmd5":
content = self.get_ring_md5()
elif rcheck == "swiftconfmd5":
content = self.get_swift_conf_md5()
elif rcheck == "quarantined":
content = self.get_quarantine_count()
elif rcheck == "sockstat":

View File

@ -1,4 +1,19 @@
# Copyright (c) 2010-2012 OpenStack Foundation
# Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 John Dickinson
# Copyright (c) 2012 Felipe Reyes
# Copyright (c) 2012 Peter Portante
# Copyright (c) 2012 Victor Rodionov
# Copyright (c) 2013-2014 Samuel Merritt
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2013 Dirk Mueller
# Copyright (c) 2013 Donagh McCabe
# Copyright (c) 2013 Fabien Boucher
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -106,7 +121,7 @@ from urlparse import parse_qs
from swift.proxy.controllers.base import get_account_info
from swift.common.swob import HeaderKeyDict, HTTPUnauthorized
from swift.common.utils import split_path, get_valid_utf8_str, \
register_swift_info, get_hmac, streq_const_time
register_swift_info, get_hmac, streq_const_time, quote
#: Default headers to remove from incoming requests. Simply a whitespace
@ -147,6 +162,10 @@ def get_tempurl_keys_from_metadata(meta):
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
def disposition_format(filename):
return 'attachment; filename="%s"' % quote(filename, safe='/ ')
class TempURL(object):
"""
WSGI Middleware to grant temporary URLs specific access to Swift
@ -319,14 +338,12 @@ class TempURL(object):
if inline_disposition:
disposition_value = 'inline'
elif filename:
disposition_value = 'attachment; filename="%s"' % (
filename.replace('"', '\\"'))
disposition_value = disposition_format(filename)
elif existing_disposition:
disposition_value = existing_disposition
else:
name = basename(env['PATH_INFO'].rstrip('/'))
disposition_value = 'attachment; filename="%s"' % (
name.replace('"', '\\"'))
disposition_value = disposition_format(name)
out_headers.append(('Content-Disposition', disposition_value))
headers = out_headers
return start_response(status, headers, exc_info)

View File

@ -174,7 +174,7 @@ class Ring(object):
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
# Do this now, when we know the data has changed, rather then
# Do this now, when we know the data has changed, rather than
# doing it on every call to get_more_nodes().
regions = set()
zones = set()
@ -241,7 +241,7 @@ class Ring(object):
dev_id = r2p2d[part]
if dev_id not in seen_ids:
part_nodes.append(self.devs[dev_id])
seen_ids.add(dev_id)
seen_ids.add(dev_id)
return part_nodes
def get_part(self, account, container=None, obj=None):

View File

@ -953,7 +953,7 @@ class Request(object):
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existant
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
"""

View File

@ -630,7 +630,7 @@ def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existant
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
"""
@ -1535,7 +1535,7 @@ def unlink_older_than(path, mtime):
pass
def item_from_env(env, item_name):
def item_from_env(env, item_name, allow_none=False):
"""
Get a value from the wsgi environment
@ -1545,12 +1545,12 @@ def item_from_env(env, item_name):
:returns: the value from the environment
"""
item = env.get(item_name, None)
if item is None:
if item is None and not allow_none:
logging.error("ERROR: %s could not be found in env!" % item_name)
return item
def cache_from_env(env):
def cache_from_env(env, allow_none=False):
"""
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
@ -1559,7 +1559,7 @@ def cache_from_env(env):
:returns: swift.common.memcached.MemcacheRing from environment
"""
return item_from_env(env, 'swift.cache')
return item_from_env(env, 'swift.cache', allow_none)
def read_conf_dir(parser, conf_dir):

View File

@ -116,7 +116,7 @@ class ContainerAuditor(Daemon):
broker.get_info()
self.logger.increment('passes')
self.container_passes += 1
self.logger.debug(_('Audit passed for %s'), broker)
self.logger.debug('Audit passed for %s', broker)
except (Exception, Timeout):
self.logger.increment('failures')
self.container_failures += 1

View File

@ -279,6 +279,8 @@ class ObjectAuditor(Daemon):
if self.conf_zero_byte_fps and pid == zbf_pid and \
len(pids) > 1:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.append(zbf_pid)
pids.remove(pid)
@ -296,8 +298,8 @@ class ObjectAuditor(Daemon):
while True:
try:
self.audit_loop(parent, zbo_fps, **kwargs)
except (Exception, Timeout):
self.logger.exception(_('ERROR auditing'))
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s' % err))
self._sleep()
def run_once(self, *args, **kwargs):
@ -317,5 +319,5 @@ class ObjectAuditor(Daemon):
try:
self.audit_loop(parent, zbo_fps, override_devices=override_devices,
**kwargs)
except (Exception, Timeout):
self.logger.exception(_('ERROR auditing'))
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s' % err))

View File

@ -26,7 +26,7 @@ classes. An example alternative implementation can be found in the
The `DiskFileManager` is a reference implemenation specific class and is not
part of the backend API.
The remaining methods in this module are considered implementation specifc and
The remaining methods in this module are considered implementation specific and
are also not considered part of the backend API.
"""

View File

@ -100,7 +100,7 @@ class ObjectExpirer(Daemon):
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
@ -138,7 +138,7 @@ class ObjectExpirer(Daemon):
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug(_('Run end'))
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))

View File

@ -553,6 +553,6 @@ class ObjectReplicator(Daemon):
dump_recon_cache({'object_replication_time': total,
'object_replication_last': time.time()},
self.rcache, self.logger)
self.logger.debug(_('Replication sleeping for %s seconds.'),
self.logger.debug('Replication sleeping for %s seconds.',
self.run_pause)
sleep(self.run_pause)

View File

@ -233,14 +233,14 @@ class ObjectUpdater(Daemon):
if success:
self.successes += 1
self.logger.increment('successes')
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.failures += 1
self.logger.increment('failures')
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes

View File

@ -141,11 +141,11 @@ class Application(object):
conf.get('max_large_object_get_time', '86400'))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.request_node_count = lambda replicas: value
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.request_node_count = lambda replicas: value * replicas
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value * replicas
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
@ -167,11 +167,12 @@ class Application(object):
value = conf.get('write_affinity_node_count',
'2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.write_affinity_node_count = lambda replicas: value
wanc_value = int(value[0])
self.write_affinity_node_count = lambda replicas: wanc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.write_affinity_node_count = lambda replicas: value * replicas
wanc_value = int(value[0])
self.write_affinity_node_count = \
lambda replicas: wanc_value * replicas
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' % ''.join(value))
@ -267,7 +268,7 @@ class Application(object):
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env)
self.memcache = cache_from_env(env, True)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:

View File

@ -6,5 +6,5 @@ nosexcover
openstack.nose_plugin
nosehtmloutput
sphinx>=1.1.2,<1.2
mock>=0.8.0
mock>=1.0
python-swiftclient

View File

@ -53,12 +53,12 @@ def get_config(section_name=None, defaults=None):
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except SystemExit:

View File

@ -15,21 +15,51 @@
import os
import sys
import pickle
import socket
import locale
import eventlet
import eventlet.debug
import functools
import random
from time import sleep
from time import time, sleep
from httplib import HTTPException
from urlparse import urlparse
from nose import SkipTest
from swift.common import constraints
from swiftclient import get_auth, http_connection
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from test import get_config
from test.functional.swift_test_client import Connection
from test.functional.swift_test_client import Connection, ResponseError
# This has the side effect of mocking out the xattr module so that unit tests
# (and in this case, when in-process functional tests are called for) can run
# on file systems that don't support extended attributes.
from test.unit import debug_logger, FakeMemcache
from swift.common import constraints, utils, ring
from swift.common.wsgi import monkey_patch_mimetools
from swift.common.middleware import catch_errors, gatekeeper, healthcheck, \
proxy_logging, container_sync, bulk, tempurl, slo, dlo, ratelimit, \
tempauth, container_quotas, account_quotas
from swift.proxy import server as proxy_server
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server
import swift.proxy.controllers.obj
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
from swiftclient import get_auth, http_connection
config = {}
@ -47,25 +77,314 @@ swift_test_perm = ['', '', '']
skip, skip2, skip3 = False, False, False
orig_collate = ''
orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
_testdir = _test_servers = _test_sockets = _test_coros = None
class FakeMemcacheMiddleware(object):
"""
Caching middleware that fakes out caching in swift.
"""
def __init__(self, app, conf):
self.app = app
self.memcache = FakeMemcache()
def __call__(self, env, start_response):
env['swift.cache'] = self.memcache
return self.app(env, start_response)
def fake_memcache_filter_factory(conf):
def filter_app(app):
return FakeMemcacheMiddleware(app, conf)
return filter_app
# swift.conf contents for in-process functional test runs
functests_swift_conf = '''
[swift-hash]
swift_hash_path_suffix = inprocfunctests
swift_hash_path_prefix = inprocfunctests
[swift-constraints]
max_file_size = %d
''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
def in_process_setup(the_object_server=object_server):
print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
monkey_patch_mimetools()
global _testdir
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
utils.mkdirs(_testdir)
rmtree(_testdir)
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
swift_conf = os.path.join(_testdir, "swift.conf")
with open(swift_conf, "w") as scfp:
scfp.write(functests_swift_conf)
global orig_swift_conf_name
orig_swift_conf_name = utils.SWIFT_CONF_FILE
utils.SWIFT_CONF_FILE = swift_conf
constraints.reload_constraints()
global config
if constraints.SWIFT_CONSTRAINTS_LOADED:
# Use the swift constraints that are loaded for the test framework
# configuration
config.update(constraints.EFFECTIVE_CONSTRAINTS)
else:
# In-process swift constraints were not loaded, somethings wrong
raise SkipTest
global orig_hash_path_suff_pref
orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.validate_hash_conf()
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = eventlet.listen(('localhost', 0))
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
# and object servers.
config.update({
# Values needed by the various in-process swift servers
'devices': _testdir,
'swift_dir': _testdir,
'mount_check': 'false',
'client_timeout': 4,
'allow_account_management': 'true',
'account_autocreate': 'true',
'allowed_headers':
'content-disposition, content-encoding, x-delete-at,'
' x-object-manifest, x-static-large-object',
'allow_versions': 'True',
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_host': '127.0.0.1',
'auth_port': str(prolis.getsockname()[1]),
'auth_ssl': 'no',
'auth_prefix': '/auth/',
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
'username': 'tester',
'password': 'testing',
# User on a second account (needs admin access to the account)
'account2': 'test2',
'username2': 'tester2',
'password2': 'testing2',
# User on same account as first, but without admin access
'username3': 'tester3',
'password3': 'testing3',
# For tempauth middleware
'user_admin_admin': 'admin .admin .reseller_admin',
'user_test_tester': 'testing .admin',
'user_test2_tester2': 'testing2 .admin',
'user_test_tester3': 'testing3'
})
acc1lis = eventlet.listen(('localhost', 0))
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
obj1lis = eventlet.listen(('localhost', 0))
obj2lis = eventlet.listen(('localhost', 0))
global _test_sockets
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]}], 30),
f)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
# Redirect logging other messages by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
# Default to only 4 seconds for in-process functional test runs
eventlet.wsgi.WRITE_TIMEOUT = 4
prosrv = proxy_server.Application(config, logger=debug_logger('proxy'))
acc1srv = account_server.AccountController(
config, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
config, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
config, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
config, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
config, logger=debug_logger('obj2'))
global _test_servers
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
pipeline = [
catch_errors.filter_factory,
gatekeeper.filter_factory,
healthcheck.filter_factory,
proxy_logging.filter_factory,
fake_memcache_filter_factory,
container_sync.filter_factory,
bulk.filter_factory,
tempurl.filter_factory,
slo.filter_factory,
dlo.filter_factory,
ratelimit.filter_factory,
tempauth.filter_factory,
container_quotas.filter_factory,
account_quotas.filter_factory,
proxy_logging.filter_factory,
]
app = prosrv
import mock
for filter_factory in reversed(pipeline):
app_filter = filter_factory(config)
with mock.patch('swift.common.utils') as mock_utils:
mock_utils.get_logger.return_value = None
app = app_filter(app)
app.logger = prosrv.logger
nl = utils.NullLogger()
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
# Create accounts "test" and "test2"
def create_account(act):
ts = utils.normalize_timestamp(time())
partition, nodes = prosrv.account_ring.get_nodes(act)
for node in nodes:
# Note: we are just using the http_connect method in the object
# controller here to talk to the account server nodes.
conn = swift.proxy.controllers.obj.http_connect(
node['ip'], node['port'], node['device'], partition, 'PUT',
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
resp = conn.getresponse()
assert(resp.status == 201)
create_account('AUTH_test')
create_account('AUTH_test2')
cluster_info = {}
def get_cluster_info():
# The fallback constraints used for testing will come from the current
# effective constraints.
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
try:
conn = Connection(config)
conn.authenticate()
cluster_info.update(conn.cluster_info())
except (ResponseError, socket.error):
# Failed to get cluster_information via /info API, so fall back on
# test.conf data
pass
else:
eff_constraints.update(cluster_info['swift'])
# Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those
# constraints defined in the constraints module are converted to integers.
test_constraints = get_config('swift-constraints')
for k in constraints.DEFAULT_CONSTRAINTS:
try:
test_constraints[k] = int(test_constraints[k])
except KeyError:
pass
except ValueError:
print >>sys.stderr, "Invalid constraint value: %s = %s" % (
k, test_constraints[k])
eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call,
# even if the /info call failed, or when they are overridden by values
# from the swift-constraints section of test.conf
cluster_info['swift'] = eff_constraints
def setup_package():
global config
config.update(get_config('func_test'))
for k in constraints.DEFAULT_CONSTRAINTS:
if k in config:
# prefer what's in test.conf
config[k] = int(config[k])
elif constraints.SWIFT_CONSTRAINTS_LOADED:
# swift.conf exists, so use what's defined there (or swift
# defaults) This normally happens when the test is running locally
# to the cluster as in a SAIO.
config[k] = constraints.EFFECTIVE_CONSTRAINTS[k]
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
if in_process_env is not None:
use_in_process = utils.config_true_value(in_process_env)
else:
use_in_process = None
global in_process
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
in_process = True
# NOTE: No attempt is made to a read local test.conf file.
else:
if use_in_process is None:
# Not explicitly set, default to using in-process functional tests
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if config:
in_process = False
else:
in_process = True
else:
# .functests don't know what the constraints of the tested cluster
# are, so the tests can't reliably pass or fail. Therefore, skip
# those tests.
config[k] = '%s constraint is not defined' % k
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
# test.conf file.
in_process = False
config.update(get_config('func_test'))
if in_process:
in_process_setup()
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
@ -161,11 +480,29 @@ def setup_package():
print >>sys.stderr, \
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
get_cluster_info()
def teardown_package():
global orig_collate
locale.setlocale(locale.LC_COLLATE, orig_collate)
global in_process
if in_process:
try:
for server in _test_coros:
server.kill()
except Exception:
pass
try:
rmtree(os.path.dirname(_testdir))
except Exception:
pass
utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = \
orig_hash_path_suff_pref
utils.SWIFT_CONF_FILE = orig_swift_conf_name
constraints.reload_constraints()
class AuthError(Exception):
pass
@ -250,21 +587,14 @@ def check_response(conn):
def load_constraint(name):
global config
c = config[name]
if not isinstance(c, int):
raise SkipTest(c)
return c
cluster_info = {}
def get_cluster_info():
conn = Connection(config)
conn.authenticate()
global cluster_info
cluster_info = conn.cluster_info()
try:
c = cluster_info['swift'][name]
except KeyError:
raise SkipTest("Missing constraint: %s" % name)
if not isinstance(c, int):
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
return c
def reset_acl():
@ -281,10 +611,9 @@ def reset_acl():
def requires_acls(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if skip:
global skip, cluster_info
if skip or not cluster_info:
raise SkipTest
if not cluster_info:
get_cluster_info()
# Determine whether this cluster has account ACLs; if not, skip test
if not cluster_info.get('tempauth', {}).get('account_acls'):
raise SkipTest

View File

@ -103,7 +103,8 @@ class Connection(object):
def __init__(self, config):
for key in 'auth_host auth_port auth_ssl username password'.split():
if key not in config:
raise SkipTest
raise SkipTest(
"Missing required configuration parameter: %s" % key)
self.auth_host = config['auth_host']
self.auth_port = int(config['auth_port'])
@ -117,6 +118,7 @@ class Connection(object):
self.storage_host = None
self.storage_port = None
self.storage_url = None
self.conn_class = None
@ -195,7 +197,12 @@ class Connection(object):
port=self.storage_port)
#self.connection.set_debuglevel(3)
def make_path(self, path=[], cfg={}):
def make_path(self, path=None, cfg=None):
if path is None:
path = []
if cfg is None:
cfg = {}
if cfg.get('version_only_path'):
return '/' + self.storage_url.split('/')[1]
@ -208,7 +215,9 @@ class Connection(object):
else:
return self.storage_url
def make_headers(self, hdrs, cfg={}):
def make_headers(self, hdrs, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_auth_token'):
@ -218,8 +227,16 @@ class Connection(object):
headers.update(hdrs)
return headers
def make_request(self, method, path=[], data='', hdrs={}, parms={},
cfg={}):
def make_request(self, method, path=None, data='', hdrs=None, parms=None,
cfg=None):
if path is None:
path = []
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if not cfg.get('absolute_path'):
# Set absolute_path=True to make a request to exactly the given
# path, not storage path + given path. Useful for
@ -277,7 +294,14 @@ class Connection(object):
'Attempts: %s, Failures: %s' %
(request, len(fail_messages), fail_messages))
def put_start(self, path, hdrs={}, parms={}, cfg={}, chunked=False):
def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.http_connect()
path = self.make_path(path, cfg)
@ -322,7 +346,10 @@ class Base(object):
def __str__(self):
return self.name
def header_fields(self, required_fields, optional_fields=()):
def header_fields(self, required_fields, optional_fields=None):
if optional_fields is None:
optional_fields = ()
headers = dict(self.conn.response.getheaders())
ret = {}
@ -352,7 +379,11 @@ class Account(Base):
self.conn = conn
self.name = str(name)
def update_metadata(self, metadata={}, cfg={}):
def update_metadata(self, metadata=None, cfg=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
headers = dict(("X-Account-Meta-%s" % k, v)
for k, v in metadata.items())
@ -365,7 +396,14 @@ class Account(Base):
def container(self, container_name):
return Container(self.conn, self.name, container_name)
def containers(self, hdrs={}, parms={}, cfg={}):
def containers(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
@ -411,7 +449,13 @@ class Account(Base):
return listing_empty(self.containers)
def info(self, hdrs={}, parms={}, cfg={}):
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 204:
@ -435,11 +479,21 @@ class Container(Base):
self.account = str(account)
self.name = str(name)
def create(self, hdrs={}, parms={}, cfg={}):
def create(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) in (201, 202)
def delete(self, hdrs={}, parms={}):
def delete(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) == 204
@ -457,7 +511,13 @@ class Container(Base):
def file(self, file_name):
return File(self.conn, self.account, self.name, file_name)
def files(self, hdrs={}, parms={}, cfg={}):
def files(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
@ -507,7 +567,13 @@ class Container(Base):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
def info(self, hdrs={}, parms={}, cfg={}):
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
@ -538,7 +604,9 @@ class File(Base):
self.size = None
self.metadata = {}
def make_headers(self, cfg={}):
def make_headers(self, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
@ -575,7 +643,13 @@ class File(Base):
data.seek(0)
return checksum.hexdigest()
def copy(self, dest_cont, dest_file, hdrs={}, parms={}, cfg={}):
def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if 'destination' in cfg:
headers = {'Destination': cfg['destination']}
elif cfg.get('no_destination'):
@ -590,7 +664,11 @@ class File(Base):
return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201
def delete(self, hdrs={}, parms={}):
def delete(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) != 204:
@ -599,7 +677,13 @@ class File(Base):
return True
def info(self, hdrs={}, parms={}, cfg={}):
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 200:
@ -615,7 +699,11 @@ class File(Base):
header_fields['etag'] = header_fields['etag'].strip('"')
return header_fields
def initialize(self, hdrs={}, parms={}):
def initialize(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if not self.name:
return False
@ -660,7 +748,11 @@ class File(Base):
return data
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
callback=None, cfg={}, parms={}):
callback=None, cfg=None, parms=None):
if cfg is None:
cfg = {}
if parms is None:
parms = {}
if size > 0:
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
@ -717,7 +809,12 @@ class File(Base):
finally:
fobj.close()
def sync_metadata(self, metadata={}, cfg={}):
def sync_metadata(self, metadata=None, cfg=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
self.metadata.update(metadata)
if self.metadata:
@ -737,7 +834,14 @@ class File(Base):
return True
def chunked_write(self, data=None, hdrs={}, parms={}, cfg={}):
def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if data is not None and self.chunked_write_in_progress:
self.conn.put_data(data, True)
elif data is not None:
@ -756,8 +860,15 @@ class File(Base):
else:
raise RuntimeError
def write(self, data='', hdrs={}, parms={}, callback=None, cfg={},
def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
return_resp=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
block_size = 2 ** 20
if isinstance(data, file):
@ -808,7 +919,14 @@ class File(Base):
return True
def write_random(self, size=None, hdrs={}, parms={}, cfg={}):
def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
raise ResponseError(self.conn.response, 'PUT',
@ -816,7 +934,15 @@ class File(Base):
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
return data
def write_random_return_resp(self, size=None, hdrs={}, parms={}, cfg={}):
def write_random_return_resp(self, size=None, hdrs=None, parms=None,
cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
return_resp=True)

View File

@ -916,14 +916,6 @@ class TestObject(unittest.TestCase):
if tf.skip:
raise SkipTest
def is_strict_mode(url, token, parsed, conn):
conn.request('GET', '/info')
resp = conn.getresponse()
if resp.status // 100 == 2:
info = json.loads(resp.read())
return info.get('swift', {}).get('strict_cors_mode', False)
return False
def put_cors_cont(url, token, parsed, conn, orig):
conn.request(
'PUT', '%s/%s' % (parsed.path, self.container),
@ -946,8 +938,6 @@ class TestObject(unittest.TestCase):
'', headers)
return conn.getresponse()
strict_cors = retry(is_strict_mode)
resp = retry(put_cors_cont, '*')
resp.read()
self.assertEquals(resp.status // 100, 2)
@ -999,6 +989,11 @@ class TestObject(unittest.TestCase):
resp.read()
self.assertEquals(resp.status, 401)
try:
strict_cors = tf.cluster_info['swift']['strict_cors_mode']
except KeyError:
strict_cors = False
if strict_cors:
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})

View File

@ -26,29 +26,15 @@ import unittest
import urllib
import uuid
import eventlet
import eventlet.debug
from nose import SkipTest
from swift.common.utils import get_hub
from swift.common.storage_policy import POLICY
from test.functional import normalized_urls, load_constraint
from test.functional import normalized_urls, load_constraint, cluster_info
import test.functional as tf
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(True)
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
@ -1791,7 +1777,6 @@ class TestSloEnv(object):
cls.conn.authenticate()
if cls.slo_enabled is None:
cluster_info = cls.conn.cluster_info()
cls.slo_enabled = 'slo' in cluster_info
if not cls.slo_enabled:
return
@ -2204,7 +2189,6 @@ class TestTempurlEnv(object):
cls.conn.authenticate()
if cls.tempurl_enabled is None:
cluster_info = cls.conn.cluster_info()
cls.tempurl_enabled = 'tempurl' in cluster_info
if not cls.tempurl_enabled:
return
@ -2379,7 +2363,6 @@ class TestSloTempurlEnv(object):
cls.conn.authenticate()
if cls.enabled is None:
cluster_info = cls.conn.cluster_info()
cls.enabled = 'tempurl' in cluster_info and 'slo' in cluster_info
cls.tempurl_key = Utils.create_name()

View File

@ -71,7 +71,7 @@ class TestReplicatorFunctions(TestCase):
Class for testing replicators and replication servers.
By default configuration - replication servers not used.
For testing separete replication servers servers need to change
For testing separate replication servers servers need to change
ring's files using set_info command or new ring's files with
different port values.
"""

View File

@ -1,5 +1,5 @@
[func_test]
# sample config
# sample config for Swift with tempauth
auth_host = 127.0.0.1
auth_port = 8080
auth_ssl = no
@ -25,11 +25,32 @@ password2 = testing2
username3 = tester3
password3 = testing3
# If not defined here, the test runner will try to use the default constraint
# values as constructed by the constraints module, which will attempt to get
# them from /etc/swift/swift.conf, if possible. Then, if the swift.conf file
# isn't found, the test runner will skip tests that depend on those values.
# Note that the cluster must have "sane" values for the test suite to pass.
collate = C
[unit_test]
fake_syslog = False
[probe_test]
# check_server_timeout = 30
# validate_rsync = false
[swift-constraints]
# The functional test runner will try to use the constraint values provided in
# the swift-constraints section of test.conf.
#
# If a constraint value does not exist in that section, or because the
# swift-constraints section does not exist, the constraints values found in
# the /info API call (if successful) will be used.
#
# If a constraint value cannot be found in the /info results, either because
# the /info API call failed, or a value is not present, the constraint value
# used will fall back to those loaded by the constraints module at time of
# import (which will attempt to load /etc/swift/swift.conf, see the
# swift.common.constraints module for more information).
#
# Note that the cluster must have "sane" values for the test suite to pass
# (for some definition of sane).
#
#max_file_size = 5368709122
#max_meta_name_length = 128
#max_meta_value_length = 256
@ -42,11 +63,6 @@ password3 = testing3
#max_account_name_length = 256
#max_container_name_length = 256
collate = C
[unit_test]
fake_syslog = False
[probe_test]
# check_server_timeout = 30
# validate_rsync = false
# Newer swift versions default to strict cors mode, but older ones were the
# opposite.
#strict_cors_mode = true

View File

@ -196,7 +196,12 @@ class TestReaper(unittest.TestCase):
fd.write('')
return devices_path
def init_reaper(self, conf={}, myips=['10.10.10.1'], fakelogger=False):
def init_reaper(self, conf=None, myips=None, fakelogger=False):
if conf is None:
conf = {}
if myips is None:
myips = ['10.10.10.1']
r = reaper.AccountReaper(conf)
r.stats_return_codes = {}
r.stats_containers_deleted = 0

View File

@ -23,6 +23,7 @@ from StringIO import StringIO
import tempfile
import time
import unittest
import urlparse
from eventlet.green import urllib2
@ -189,3 +190,71 @@ class TestRecon(unittest.TestCase):
else:
self.fail('Did not find expected substring %r '
'in output:\n%s' % (expected, output))
class TestReconCommands(unittest.TestCase):
def setUp(self):
self.recon = recon.SwiftRecon()
self.hosts = set([('127.0.0.1', 10000)])
def mock_responses(self, resps):
def fake_urlopen(url, timeout):
scheme, netloc, path, _, _, _ = urlparse.urlparse(url)
self.assertEqual(scheme, 'http') # can't handle anything else
self.assertTrue(path.startswith('/recon/'))
if ':' in netloc:
host, port = netloc.split(':', 1)
port = int(port)
else:
host = netloc
port = 80
response_body = resps[(host, port, path[7:])]
resp = mock.MagicMock()
resp.read = mock.MagicMock(side_effect=[response_body])
return resp
return mock.patch('eventlet.green.urllib2.urlopen', fake_urlopen)
def test_get_swiftconfmd5(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '729cf900f2876dead617d088ece7fe8c'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum})}
printed = []
with self.mock_responses(responses):
with mock.patch.object(self.recon, '_md5_file', lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertTrue("2/2 hosts matched" in output)
def test_get_swiftconfmd5_mismatch(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '29d5912b1fcfcc1066a7f51412769c1d'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': 'bogus'})}
printed = []
with self.mock_responses(responses):
with mock.patch.object(self.recon, '_md5_file', lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertTrue("1/2 hosts matched" in output)
self.assertTrue("http://10.2.2.2:10000/recon/swiftconfmd5 (bogus) "
"doesn't match on disk md5sum" in output)

View File

@ -34,7 +34,9 @@ class FakeCache(object):
class FakeBadApp(object):
def __init__(self, headers=[]):
def __init__(self, headers=None):
if headers is None:
headers = []
self.headers = headers
def __call__(self, env, start_response):
@ -43,7 +45,9 @@ class FakeBadApp(object):
class FakeApp(object):
def __init__(self, headers=[]):
def __init__(self, headers=None):
if headers is None:
headers = []
self.headers = headers
def __call__(self, env, start_response):

View File

@ -20,7 +20,10 @@ from swift.common.middleware import gatekeeper
class FakeApp(object):
def __init__(self, headers={}):
def __init__(self, headers=None):
if headers is None:
headers = {}
self.headers = headers
self.req = None

View File

@ -188,7 +188,9 @@ class TestAuthorize(unittest.TestCase):
identity['HTTP_X_TENANT_ID'])
def _get_identity(self, tenant_id='tenant_id', tenant_name='tenant_name',
user_id='user_id', user_name='user_name', roles=[]):
user_id='user_id', user_name='user_name', roles=None):
if roles is None:
roles = []
if isinstance(roles, list):
roles = ','.join(roles)
return {'HTTP_X_USER_ID': user_id,

View File

@ -27,7 +27,10 @@ from swift.common.swob import Request, Response
class FakeApp(object):
def __init__(self, body=['FAKE APP'], response_str='200 OK'):
def __init__(self, body=None, response_str='200 OK'):
if body is None:
body = ['FAKE APP']
self.body = body
self.response_str = response_str
@ -48,7 +51,10 @@ class FakeAppThatExcepts(object):
class FakeAppNoContentLengthNoTransferEncoding(object):
def __init__(self, body=['FAKE APP']):
def __init__(self, body=None):
if body is None:
body = ['FAKE APP']
self.body = body
def __call__(self, env, start_response):

View File

@ -163,6 +163,9 @@ class FakeRecon(object):
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_swiftconfmd5(self):
return {'/etc/swift/swift.conf': "abcdef"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
@ -829,6 +832,7 @@ class TestReconMiddleware(unittest.TestCase):
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
@ -1020,6 +1024,13 @@ class TestReconMiddleware(unittest.TestCase):
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_ringmd5_resp)
def test_recon_get_swiftconfmd5(self):
get_swiftconfmd5_resp = ['{"/etc/swift/swift.conf": "abcdef"}']
req = Request.blank('/recon/swiftconfmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_swiftconfmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = ['{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined',

View File

@ -1,4 +1,19 @@
# Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 Peter Portante
# Copyright (c) 2012 Iryoung Jeong
# Copyright (c) 2012 Michael Barton
# Copyright (c) 2013 Alex Gaynor
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2013 Donagh McCabe
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 John Dickinson
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Samuel Merritt
# Copyright (c) 2013 Shri Javadekar
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -142,7 +157,7 @@ class TestTempURL(unittest.TestCase):
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="bob \\\"killer\\\".txt"')
'attachment; filename="bob %22killer%22.txt"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@ -195,6 +210,24 @@ class TestTempURL(unittest.TestCase):
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_odd_chars(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/a\r\nb'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="a%0D%0Ab"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)

View File

@ -312,7 +312,7 @@ class TestConstraintsConfig(unittest.TestCase):
# file is now deleted...
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
# no constraints have been loaded from non-existant swift.conf
# no constraints have been loaded from non-existent swift.conf
self.assertFalse(constraints.SWIFT_CONSTRAINTS_LOADED)
# no constraints are in OVERRIDE
self.assertEquals([], constraints.OVERRIDE_CONSTRAINTS.keys())

View File

@ -1171,7 +1171,8 @@ class TestSimpleClient(unittest.TestCase):
urlopen.return_value.read.return_value = ''
req = urllib2.Request('http://127.0.0.1', method='GET')
request.side_effect = [urllib2.URLError(''), req]
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1)
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1,
token='token')
retval = sc.retry_request('GET')
self.assertEqual(request.call_count, 3)

View File

@ -147,7 +147,9 @@ class TestManagerModule(unittest.TestCase):
class MockOs(object):
WNOHANG = os.WNOHANG
def __init__(self, pid_map={}):
def __init__(self, pid_map=None):
if pid_map is None:
pid_map = {}
self.pid_map = {}
for pid, v in pid_map.items():
self.pid_map[pid] = (x for x in v)
@ -1030,7 +1032,7 @@ class TestServer(unittest.TestCase):
manager.WARNING_WAIT = 0.01
manager.time = MockTime()
with open(os.path.join(t, 'output'), 'w+') as f:
# acctually capture the read stdout (for prints)
# actually capture the read stdout (for prints)
sys.stdout = f
# test closing pipe in subprocess unblocks read
with MockProcess() as proc:

View File

@ -386,7 +386,7 @@ class TestRequest(unittest.TestCase):
else:
self.assert_(False, "invalid req_environ_property "
"didn't raise error!")
# non-existant attribute
# non-existent attribute
try:
swift.common.swob.Request.blank('/', params_cache={'a': 'b'})
except TypeError as e:

View File

@ -62,7 +62,14 @@ from test.unit import FakeLogger
class MockOs(object):
def __init__(self, pass_funcs=[], called_funcs=[], raise_funcs=[]):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
@ -1910,6 +1917,38 @@ cluster_dfw1 = http://dfw1.host/v1/
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
class TestSwiftInfo(unittest.TestCase):

View File

@ -447,7 +447,11 @@ class TestWSGI(unittest.TestCase):
def test_pre_auth_req(self):
class FakeReq(object):
@classmethod
def fake_blank(cls, path, environ={}, body='', headers={}):
def fake_blank(cls, path, environ=None, body='', headers=None):
if environ is None:
environ = {}
if headers is None:
headers = {}
self.assertEquals(environ['swift.authorize']('test'), None)
self.assertFalse('HTTP_X_TRANS_ID' in environ)
was_blank = Request.blank

View File

@ -253,6 +253,34 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(auditor_worker.stats_buckets[1024], 2)
self.assertEquals(auditor_worker.stats_buckets[10240], 0)
# pick up some additional code coverage, large file
data = '0' * 1024 * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb'])
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
self.assertEquals(auditor_worker.stats_buckets[1024], 3)
self.assertEquals(auditor_worker.stats_buckets[10240], 0)
self.assertEquals(auditor_worker.stats_buckets['OVER'], 1)
# pick up even more additional code coverage, misc paths
auditor_worker.log_time = -1
auditor_worker.stats_sizes = []
auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb'])
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
self.assertEquals(auditor_worker.stats_buckets[1024], 3)
self.assertEquals(auditor_worker.stats_buckets[10240], 0)
self.assertEquals(auditor_worker.stats_buckets['OVER'], 1)
def test_object_run_once_no_sda(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
@ -432,6 +460,9 @@ class TestAuditor(unittest.TestCase):
class StopForever(Exception):
pass
class Bogus(Exception):
pass
class ObjectAuditorMock(object):
check_args = ()
check_kwargs = {}
@ -446,9 +477,16 @@ class TestAuditor(unittest.TestCase):
if 'zero_byte_fps' in kwargs:
self.check_device_dir = kwargs.get('device_dirs')
def mock_sleep(self):
def mock_sleep_stop(self):
raise StopForever('stop')
def mock_sleep_continue(self):
return
def mock_audit_loop_error(self, parent, zbo_fps,
override_devices=None, **kwargs):
raise Bogus('exception')
def mock_fork(self):
self.fork_called += 1
if self.master:
@ -467,22 +505,34 @@ class TestAuditor(unittest.TestCase):
mount_check='false',
zero_byte_files_per_second=89))
mocker = ObjectAuditorMock()
my_auditor.logger.exception = mock.MagicMock()
real_audit_loop = my_auditor.audit_loop
my_auditor.audit_loop = mocker.mock_audit_loop_error
my_auditor.run_audit = mocker.mock_run
my_auditor._sleep = mocker.mock_sleep
was_fork = os.fork
was_wait = os.wait
os.fork = mocker.mock_fork
os.wait = mocker.mock_wait
try:
os.fork = mocker.mock_fork
os.wait = mocker.mock_wait
my_auditor._sleep = mocker.mock_sleep_stop
my_auditor.run_once(zero_byte_fps=50)
my_auditor.logger.exception.assert_called_once_with(
'ERROR auditing: exception')
my_auditor.logger.exception.reset_mock()
self.assertRaises(StopForever, my_auditor.run_forever)
my_auditor.logger.exception.assert_called_once_with(
'ERROR auditing: exception')
my_auditor.audit_loop = real_audit_loop
self.assertRaises(StopForever,
my_auditor.run_forever, zero_byte_fps=50)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 50)
self.assertEquals(mocker.fork_called, 0)
self.assertRaises(SystemExit, my_auditor.run_forever)
self.assertRaises(SystemExit, my_auditor.run_once)
self.assertEquals(mocker.fork_called, 1)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89)
self.assertEquals(mocker.check_device_dir, None)
self.assertEquals(mocker.check_device_dir, [])
self.assertEquals(mocker.check_args, ())
device_list = ['sd%s' % i for i in string.ascii_letters[2:10]]
@ -500,6 +550,17 @@ class TestAuditor(unittest.TestCase):
mocker.fork_called = 0
self.assertRaises(StopForever, my_auditor.run_forever)
# Fork is called 2 times since the zbf process is forked just
# once before self._sleep() is called and StopForever is raised
# Also wait is called just once before StopForever is raised
self.assertEquals(mocker.fork_called, 2)
self.assertEquals(mocker.wait_called, 1)
my_auditor._sleep = mocker.mock_sleep_continue
mocker.fork_called = 0
mocker.wait_called = 0
my_auditor.run_once()
# Fork is called 3 times since the zbf process is forked twice
self.assertEquals(mocker.fork_called, 3)
self.assertEquals(mocker.wait_called, 3)

View File

@ -41,7 +41,7 @@ class TestReceiver(unittest.TestCase):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
# Not sure why the test.unit stuff isn't taking effect here; so I'm
# reenforcing it.
# reinforcing it.
diskfile.getxattr = unit._getxattr
diskfile.setxattr = unit._setxattr
self.testdir = os.path.join(

View File

@ -2051,6 +2051,13 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 503)
def test_node_request_setting(self):
baseapp = proxy_server.Application({'request_node_count': '3'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEquals(baseapp.request_node_count(3), 3)
def test_iter_nodes(self):
with save_globals():
try:
@ -4654,17 +4661,17 @@ class TestContainerController(unittest.TestCase):
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 dont cache container
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, dont cache container
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
# return 404 (as account is not found) and dont cache container
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
@ -5528,7 +5535,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existance check
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@ -5537,7 +5544,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existance check
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@ -5553,7 +5560,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existance check
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@ -5562,7 +5569,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existance check
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@ -6340,7 +6347,7 @@ class TestProxyObjectPerformance(unittest.TestCase):
self.assertEqual(total, self.obj_len)
end = time.time()
print "Run %02d took %07.03f" % (i, end - start)
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated'),

View File

@ -35,6 +35,9 @@ commands =
flake8 swift test doc setup.py
flake8 --filename=swift* bin
[testenv:func]
commands = nosetests {posargs:test/functional}
[testenv:venv]
commands = {posargs}