Merge from trunk (i18n)

This commit is contained in:
gholt 2010-12-22 12:33:06 -08:00
commit a7edf44b16
54 changed files with 411 additions and 327 deletions

2
bin/st
View File

@ -44,6 +44,8 @@ except:
try:
from swift.common.bufferedhttp \
import BufferedHTTPConnection as HTTPConnection
import gettext
gettext.install('swift', unicode=1)
except:
from httplib import HTTPConnection

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -23,6 +24,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='Usage: %prog [options] <account>')
parser.add_option('-s', '--suffix', dest='suffix',
default='', help='The suffix to use with the reseller prefix as the '

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -23,6 +24,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(
usage='Usage: %prog [options] <account> <user> <password>')
parser.add_option('-a', '--admin', dest='admin', action='store_true',

View File

@ -18,6 +18,7 @@ try:
import simplejson as json
except ImportError:
import json
import gettext
import re
from datetime import datetime, timedelta
from optparse import OptionParser
@ -28,6 +29,7 @@ from swift.common.client import Connection
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='Usage: %prog [options]')
parser.add_option('-t', '--token-life', dest='token_life',
default='86400', help='The expected life of tokens; token objects '

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -23,6 +24,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='Usage: %prog [options] <account>')
parser.add_option('-A', '--admin-url', dest='admin_url',
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -23,6 +24,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='Usage: %prog [options] <account> <user>')
parser.add_option('-A', '--admin-url', dest='admin_url',
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '

View File

@ -18,6 +18,7 @@ try:
import simplejson as json
except ImportError:
import json
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -27,6 +28,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='''
Usage: %prog [options] [account] [user]

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -23,6 +24,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='Usage: %prog [options]')
parser.add_option('-A', '--admin-url', dest='admin_url',
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '

View File

@ -18,6 +18,7 @@ try:
import simplejson as json
except ImportError:
import json
import gettext
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
@ -27,6 +28,7 @@ from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
parser = OptionParser(usage='''
Usage: %prog [options] <account> <service> <name> <value>

View File

@ -20,6 +20,7 @@ from urllib import quote
from hashlib import md5
import getopt
from itertools import chain
import gettext
import simplejson
from eventlet.greenpool import GreenPool
@ -324,6 +325,7 @@ class Auditor(object):
if __name__ == '__main__':
gettext.install('swift', unicode=1)
try:
optlist, args = getopt.getopt(sys.argv[1:], 'c:r:e:d')
except getopt.GetoptError, err:

View File

@ -18,11 +18,13 @@ from ConfigParser import ConfigParser
from optparse import OptionParser
from os.path import basename
from sys import argv, exit
import gettext
from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
default_conf = '/etc/swift/auth-server.conf'
parser = OptionParser(
usage='Usage: %prog [options] <account> <user> <password>')

View File

@ -17,10 +17,12 @@
from ConfigParser import ConfigParser
from optparse import OptionParser
from sys import argv, exit
import gettext
from swift.common.bufferedhttp import http_connect_raw as http_connect
if __name__ == '__main__':
gettext.install('swift', unicode=1)
default_conf = '/etc/swift/auth-server.conf'
parser = OptionParser(usage='Usage: %prog [options]')
parser.add_option('-c', '--conf', dest='conf', default=default_conf,

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
from subprocess import call
from sys import argv, exit
@ -21,6 +22,7 @@ import sqlite3
if __name__ == '__main__':
gettext.install('swift', unicode=1)
if len(argv) != 4 or argv[1] != '-K':
exit('Syntax: %s -K <super_admin_key> <path to auth.db>' % argv[0])
_, _, super_admin_key, auth_db = argv

View File

@ -16,11 +16,13 @@
from os.path import basename
from sys import argv, exit
import gettext
from swift.common.db import get_db_connection
if __name__ == '__main__':
gettext.install('swift', unicode=1)
app = basename(argv[0])
if len(argv) != 3:
exit('''

View File

@ -20,6 +20,7 @@ import sys
import signal
import uuid
from optparse import OptionParser
import gettext
from swift.common.bench import BenchController
from swift.common.utils import readconf, NamedLogger
@ -55,6 +56,7 @@ SAIO_DEFAULTS = {
}
if __name__ == '__main__':
gettext.install('swift', unicode=1)
usage = "usage: %prog [OPTIONS] [CONF_FILE]"
usage += """\n\nConf file with SAIO defaults:

View File

@ -20,6 +20,7 @@ import re
import subprocess
import sys
from ConfigParser import ConfigParser
import gettext
from swift.common.utils import get_logger
@ -86,6 +87,7 @@ def comment_fstab(mount_point):
os.rename('/etc/fstab.new', '/etc/fstab')
if __name__ == '__main__':
gettext.install('swift', unicode=1)
c = ConfigParser()
try:
conf_path = sys.argv[1]

View File

@ -16,11 +16,14 @@
import sys
import urllib
import gettext
from swift.common.ring import Ring
from swift.common.utils import hash_path
gettext.install('swift', unicode=1)
if len(sys.argv) < 3 or len(sys.argv) > 5:
print 'Usage: %s <ring.gz> <account> [<container>] [<object>]' \
% sys.argv[0]

View File

@ -15,12 +15,14 @@
# limitations under the License.
import sys
import gettext
from swift.stats.log_uploader import LogUploader
from swift.common.utils import parse_options
from swift.common import utils
if __name__ == '__main__':
gettext.install('swift', unicode=1)
conf_file, options = parse_options(usage="Usage: %prog CONFIG_FILE PLUGIN")
try:
plugin = options['extra_args'][0]

View File

@ -18,12 +18,14 @@ import sys
import cPickle as pickle
from datetime import datetime
from hashlib import md5
import gettext
from swift.common.ring import Ring
from swift.obj.server import read_metadata
from swift.common.utils import hash_path
if __name__ == '__main__':
gettext.install('swift', unicode=1)
if len(sys.argv) <= 1:
print "Usage: %s OBJECT_FILE" % sys.argv[0]
sys.exit(1)

View File

@ -21,6 +21,7 @@ from os import mkdir
from os.path import basename, dirname, exists, join as pathjoin
from sys import argv, exit
from time import time
import gettext
from swift.common.ring import RingBuilder
@ -174,6 +175,7 @@ swift-ring-builder <builder_file> set_min_part_hours <hours>
if __name__ == '__main__':
gettext.install('swift', unicode=1)
if len(argv) < 2:
print '''
swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s

View File

@ -21,6 +21,7 @@ from optparse import OptionParser
from sys import exit, argv
from time import time
from uuid import uuid4
import gettext
from eventlet import GreenPool, patcher, sleep
from eventlet.pools import Pool
@ -75,6 +76,7 @@ def report(success):
if __name__ == '__main__':
global begun, created, item_type, next_report, need_to_create, retries_done
gettext.install('swift', unicode=1)
patcher.monkey_patch()
parser = OptionParser()

View File

@ -23,6 +23,7 @@ from optparse import OptionParser
from sys import argv, exit, stderr
from time import time
from uuid import uuid4
import gettext
from eventlet import GreenPool, hubs, patcher, sleep, Timeout
from eventlet.pools import Pool
@ -746,6 +747,7 @@ def object_delete_report(coropool, connpool, options):
if __name__ == '__main__':
gettext.install('swift', unicode=1)
patcher.monkey_patch()
hubs.get_hub().debug_exceptions = False

View File

@ -49,11 +49,11 @@ class AccountAuditor(Daemon):
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Account audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.account_passes,
self.account_failures))
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit, %(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
reported = time.time()
self.account_passes = 0
self.account_failures = 0
@ -72,17 +72,17 @@ class AccountAuditor(Daemon):
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Account audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.account_passes,
self.account_failures))
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit, %(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
reported = time.time()
self.account_passes = 0
self.account_failures = 0
elapsed = time.time() - begin
self.logger.info(
'Account audit "once" mode completed: %.02fs' % elapsed)
'Account audit "once" mode completed: %.02fs', elapsed)
def account_audit(self, path):
"""
@ -97,8 +97,8 @@ class AccountAuditor(Daemon):
if not broker.is_deleted():
info = broker.get_info()
self.account_passes += 1
self.logger.debug('Audit passed for %s' % broker.db_file)
self.logger.debug(_('Audit passed for %s') % broker.db_file)
except Exception:
self.account_failures += 1
self.logger.exception('ERROR Could not get account info %s' %
self.logger.exception(_('ERROR Could not get account info %s'),
(broker.db_file))

View File

@ -77,7 +77,7 @@ class AccountReaper(Daemon):
""" The account :class:`swift.common.ring.Ring` for the cluster. """
if not self.account_ring:
self.logger.debug(
'Loading account ring from %s' % self.account_ring_path)
_('Loading account ring from %s'), self.account_ring_path)
self.account_ring = Ring(self.account_ring_path)
return self.account_ring
@ -85,7 +85,7 @@ class AccountReaper(Daemon):
""" The container :class:`swift.common.ring.Ring` for the cluster. """
if not self.container_ring:
self.logger.debug(
'Loading container ring from %s' % self.container_ring_path)
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
@ -93,7 +93,7 @@ class AccountReaper(Daemon):
""" The object :class:`swift.common.ring.Ring` for the cluster. """
if not self.object_ring:
self.logger.debug(
'Loading object ring from %s' % self.object_ring_path)
_('Loading object ring from %s'), self.object_ring_path)
self.object_ring = Ring(self.object_ring_path)
return self.object_ring
@ -103,7 +103,7 @@ class AccountReaper(Daemon):
This repeatedly calls :func:`reap_once` no quicker than the
configuration interval.
"""
self.logger.debug('Daemon started.')
self.logger.debug(_('Daemon started.'))
sleep(random.random() * self.interval)
while True:
begin = time()
@ -119,17 +119,17 @@ class AccountReaper(Daemon):
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
once for each device on the server.
"""
self.logger.debug('Begin devices pass: %s' % self.devices)
self.logger.debug(_('Begin devices pass: %s'), self.devices)
begin = time()
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.debug(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
self.reap_device(device)
elapsed = time() - begin
self.logger.info('Devices pass completed: %.02fs' % elapsed)
self.logger.info(_('Devices pass completed: %.02fs'), elapsed)
def reap_device(self, device):
"""
@ -212,7 +212,7 @@ class AccountReaper(Daemon):
"""
begin = time()
account = broker.get_info()['account']
self.logger.info('Beginning pass on account %s' % account)
self.logger.info(_('Beginning pass on account %s'), account)
self.stats_return_codes = {}
self.stats_containers_deleted = 0
self.stats_objects_deleted = 0
@ -235,12 +235,12 @@ class AccountReaper(Daemon):
self.container_pool.waitall()
except Exception:
self.logger.exception(
'Exception with containers for account %s' % account)
_('Exception with containers for account %s'), account)
marker = containers[-1][0]
log = 'Completed pass on account %s' % account
except Exception:
self.logger.exception(
'Exception with account %s' % account)
_('Exception with account %s'), account)
log = 'Incomplete pass on account %s' % account
if self.stats_containers_deleted:
log += ', %s containers deleted' % self.stats_containers_deleted
@ -317,7 +317,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
if not objects:
@ -330,8 +330,9 @@ class AccountReaper(Daemon):
nodes, obj['name'])
pool.waitall()
except Exception:
self.logger.exception('Exception with objects for container '
'%s for account %s' % (container, account))
self.logger.exception(_('Exception with objects for container '
'%(container)s for account %(account)s'),
{'container': container, 'account': account})
marker = objects[-1]['name']
successes = 0
failures = 0
@ -351,7 +352,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
@ -402,7 +403,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1

View File

@ -18,15 +18,14 @@ from __future__ import with_statement
import os
import time
import traceback
from urllib import unquote
from xml.sax import saxutils
from webob import Request, Response
from webob.exc import HTTPAccepted, HTTPBadRequest, \
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
import simplejson
from xml.sax import saxutils
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, get_param, hash_path, \
@ -297,6 +296,7 @@ class AccountController(object):
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8')
else:
@ -306,10 +306,8 @@ class AccountController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
additional_info = ''

View File

@ -82,10 +82,10 @@ class Bench(object):
def _log_status(self, title):
total = time.time() - self.beginbeat
self.logger.info('%s %s [%s failures], %.01f/s' % (
self.complete, title, self.failures,
(float(self.complete) / total),
))
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
'%(rate).01f/s'),
{'title': title, 'complete': self.complete, 'fail': self.failures,
'rate': (float(self.complete) / total)})
@contextmanager
def connection(self):
@ -94,7 +94,7 @@ class Bench(object):
try:
yield hc
except CannotSendRequest:
self.logger.info("CannotSendRequest. Skipping...")
self.logger.info(_("CannotSendRequest. Skipping..."))
try:
hc.close()
except:

View File

@ -82,15 +82,9 @@ class BufferedHTTPConnection(HTTPConnection):
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
self._method = method
self._path = url
self._txn_id = '-'
return HTTPConnection.putrequest(self, method, url, skip_host,
skip_accept_encoding)
def putheader(self, header, value):
if header.lower() == 'x-cf-trans-id':
self._txn_id = value
return HTTPConnection.putheader(self, header, value)
def getexpect(self):
response = BufferedHTTPResponse(self.sock, strict=self.strict,
method=self._method)
@ -99,9 +93,10 @@ class BufferedHTTPConnection(HTTPConnection):
def getresponse(self):
response = HTTPConnection.getresponse(self)
logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" %
(time.time() - self._connected_time, self._method, self.host,
self.port, self._path, self._txn_id))
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)"),
{'time': time.time() - self._connected_time, 'method': self._method,
'host': self.host, 'port': self.port, 'path': self._path})
return response

View File

@ -16,7 +16,9 @@
import os
import sys
import signal
import gettext
from re import sub
from swift.common import utils
@ -40,6 +42,7 @@ class Daemon(object):
utils.validate_configuration()
utils.capture_stdio(self.logger, **kwargs)
utils.drop_privileges(self.conf.get('user', 'swift'))
gettext.install('swift', unicode=1)
def kill_children(*args):
signal.signal(signal.SIGTERM, signal.SIG_IGN)

View File

@ -295,7 +295,7 @@ class DatabaseBroker(object):
self.conn = conn
except: # pragma: no cover
logging.exception(
'Broker error trying to rollback locked connection')
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
@ -750,8 +750,8 @@ class ContainerBroker(DatabaseBroker):
'deleted': deleted})
except:
self.logger.exception(
'Invalid pending entry %s: %s'
% (self.pending_file, entry))
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
@ -1217,8 +1217,8 @@ class AccountBroker(DatabaseBroker):
'deleted': deleted})
except:
self.logger.exception(
'Invalid pending entry %s: %s'
% (self.pending_file, entry))
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:

View File

@ -81,7 +81,7 @@ class ReplConnection(BufferedHTTPConnection):
return response
except:
self.logger.exception(
'ERROR reading HTTP response from %s' % self.node)
_('ERROR reading HTTP response from %s'), self.node)
return None
@ -120,12 +120,14 @@ class Replicator(Daemon):
def _report_stats(self):
"""Report the current stats to the logs."""
self.logger.info(
'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)'
% (self.stats['attempted'], time.time() - self.stats['start'],
self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)))
self.logger.info('Removed %(remove)d dbs' % self.stats)
self.logger.info('%(success)s successes, %(failure)s failures'
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': time.time() - self.stats['start'],
'rate': self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
@ -150,8 +152,8 @@ class Replicator(Daemon):
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error('ERROR rsync failed with %s: %s' %
(proc.returncode, popen_args))
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
@ -200,7 +202,7 @@ class Replicator(Daemon):
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.debug('Syncing chunks with %s', http.host)
self.logger.debug(_('Syncing chunks with %s'), http.host)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
while len(objects):
@ -208,8 +210,9 @@ class Replicator(Daemon):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error('ERROR Bad response %s from %s' %
(response.status, http.host))
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status, 'host': http.host})
return False
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
@ -272,7 +275,7 @@ class Replicator(Daemon):
http = self._http_connect(node, partition, broker.db_file)
if not http:
self.logger.error(
'ERROR Unable to connect to remote server: %s' % node)
_('ERROR Unable to connect to remote server: %s'), node)
return False
with Timeout(self.node_timeout):
response = http.replicate('sync', info['max_row'], info['hash'],
@ -310,7 +313,7 @@ class Replicator(Daemon):
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
self.logger.debug('Replicating db %s' % object_file)
self.logger.debug(_('Replicating db %s'), object_file)
self.stats['attempted'] += 1
try:
broker = self.brokerclass(object_file, pending_timeout=30)
@ -319,10 +322,10 @@ class Replicator(Daemon):
info = broker.get_replication_info()
except Exception, e:
if 'no such table' in str(e):
self.logger.error('Quarantining DB %s' % object_file)
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception('ERROR reading db %s' % object_file)
self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1
return
# The db is considered deleted if the delete_timestamp value is greater
@ -355,10 +358,10 @@ class Replicator(Daemon):
success = self._repl_to_node(node, broker, partition, info)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error('ERROR Remote drive not mounted %s' % node)
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except:
self.logger.exception('ERROR syncing %s with node %s' %
(object_file, node))
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'), {'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
responses.append(success)
if not shouldbehere and all(responses):
@ -399,14 +402,14 @@ class Replicator(Daemon):
dirs = []
ips = whataremyips()
if not ips:
self.logger.error('ERROR Failed to get my own IPs?')
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
for node in self.ring.devs:
if node and node['ip'] in ips and node['port'] == self.port:
if self.mount_check and not os.path.ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
'Skipping %(device)s as it is not mounted' % node)
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
@ -414,12 +417,12 @@ class Replicator(Daemon):
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
dirs.append((datadir, node['id']))
self.logger.info('Beginning replication run')
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info('Replication run OVER')
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self):
@ -430,7 +433,7 @@ class Replicator(Daemon):
try:
self.run_once()
except:
self.logger.exception('ERROR trying to replicate')
self.logger.exception(_('ERROR trying to replicate'))
sleep(self.run_pause)
@ -473,7 +476,7 @@ class ReplicatorRpc(object):
except Exception, e:
if 'no such table' in str(e):
# TODO(unknown): find a real logger
print "Quarantining DB %s" % broker.db_file
print _("Quarantining DB %s") % broker.db_file
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise

View File

@ -27,7 +27,6 @@ import time
from bisect import bisect
from hashlib import md5
CONN_TIMEOUT = 0.3
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
@ -67,9 +66,11 @@ class MemcacheRing(object):
def _exception_occurred(self, server, e, action='talking'):
if isinstance(e, socket.timeout):
logging.error("Timeout %s to memcached: %s" % (action, server))
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception("Error %s to memcached: %s" % (action, server))
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
@ -77,7 +78,7 @@ class MemcacheRing(object):
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error('Error limiting server %s' % server)
logging.error(_('Error limiting server %s'), server)
def _get_conns(self, key):
"""

View File

@ -32,7 +32,7 @@ class CatchErrorMiddleware(object):
try:
return self.app(env, start_response)
except Exception, err:
self.logger.exception('Error: %s' % err)
self.logger.exception(_('Error: %s'), err)
resp = HTTPServerError(request=Request(env),
body='An error occurred',
content_type='text/plain')

View File

@ -167,7 +167,7 @@ class RateLimitMiddleware(object):
:param obj_name: object name from path
'''
if account_name in self.ratelimit_blacklist:
self.logger.error('Returning 497 because of blacklisting')
self.logger.error(_('Returning 497 because of blacklisting'))
return Response(status='497 Blacklisted',
body='Your account has been blacklisted', request=req)
if account_name in self.ratelimit_whitelist:
@ -181,14 +181,15 @@ class RateLimitMiddleware(object):
need_to_sleep = self._get_sleep_time(key, max_rate)
if self.log_sleep_time_seconds and \
need_to_sleep > self.log_sleep_time_seconds:
self.logger.info("Ratelimit sleep log: %s for %s/%s/%s" % (
need_to_sleep, account_name,
container_name, obj_name))
self.logger.info(_("Ratelimit sleep log: %(sleep)s for "
"%(account)s/%(container)s/%(object)s"),
{'sleep': need_to_sleep, 'account': account_name,
'container': container_name, 'object': obj_name})
if need_to_sleep > 0:
eventlet.sleep(need_to_sleep)
except MaxSleepTimeHit, e:
self.logger.error('Returning 498 because of ops ' + \
'rate limiting (Max Sleep) %s' % e)
self.logger.error(_('Returning 498 because of ops rate '
'limiting (Max Sleep) %s') % str(e))
error_resp = Response(status='498 Rate Limited',
body='Slow down', request=req)
return error_resp
@ -207,7 +208,7 @@ class RateLimitMiddleware(object):
self.memcache_client = cache_from_env(env)
if not self.memcache_client:
self.logger.warning(
'Warning: Cannot ratelimit without a memcached client')
_('Warning: Cannot ratelimit without a memcached client'))
return self.app(env, start_response)
try:
version, account, container, obj = split_path(req.path, 1, 4, True)

View File

@ -35,7 +35,6 @@ from optparse import OptionParser
from tempfile import mkstemp
import cPickle as pickle
import eventlet
from eventlet import greenio, GreenPool, sleep, Timeout, listen
from eventlet.green import socket, subprocess, ssl, thread, threading
@ -85,8 +84,8 @@ def load_libc_function(func_name):
libc = ctypes.CDLL(ctypes.util.find_library('c'))
return getattr(libc, func_name)
except AttributeError:
logging.warn("Unable to locate %s in libc. Leaving as a no-op."
% func_name)
logging.warn(_("Unable to locate %s in libc. Leaving as a no-op."),
func_name)
def noop_libc_function(*args):
return 0
@ -252,12 +251,12 @@ class LoggerFileObject(object):
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error('STDOUT: Connection reset by peer')
self.logger.error(_('STDOUT: Connection reset by peer'))
else:
self.logger.error('STDOUT: %s' % value)
self.logger.error(_('STDOUT: %s'), value)
def writelines(self, values):
self.logger.error('STDOUT: %s' % '#012'.join(values))
self.logger.error(_('STDOUT: %s'), '#012'.join(values))
def close(self):
pass
@ -284,23 +283,24 @@ class LoggerFileObject(object):
return self
class NamedLogger(object):
class LogAdapter(object):
"""Cheesy version of the LoggerAdapter available in Python 3"""
def __init__(self, logger, server):
def __init__(self, logger):
self.logger = logger
self.server = server
for proxied_method in ('debug', 'info', 'log', 'warn', 'warning',
'error', 'critical'):
setattr(self, proxied_method,
self._proxy(getattr(logger, proxied_method)))
self._txn_id = threading.local()
for proxied_method in ('debug', 'log', 'warn', 'warning', 'error',
'critical', 'info'):
setattr(self, proxied_method, getattr(logger, proxied_method))
def _proxy(self, logger_meth):
@property
def txn_id(self):
if hasattr(self._txn_id, 'value'):
return self._txn_id.value
def _inner_proxy(msg, *args, **kwargs):
msg = '%s %s' % (self.server, msg)
logger_meth(msg, *args, **kwargs)
return _inner_proxy
@txn_id.setter
def txn_id(self, value):
self._txn_id.value = value
def getEffectiveLevel(self):
return self.logger.getEffectiveLevel()
@ -330,7 +330,22 @@ class NamedLogger(object):
emsg += ' %s' % exc.msg
else:
call = self.logger.exception
call('%s %s: %s' % (self.server, msg, emsg), *args)
call('%s: %s' % (msg, emsg), *args)
class NamedFormatter(logging.Formatter):
def __init__(self, server, logger):
logging.Formatter.__init__(self)
self.server = server
self.logger = logger
def format(self, record):
msg = logging.Formatter.format(self, record)
if self.logger.txn_id and (record.levelno != logging.INFO or
self.logger.txn_id not in msg):
return '%s %s (txn: %s)' % (self.server, msg, self.logger.txn_id)
else:
return '%s %s' % (self.server, msg)
def get_logger(conf, name=None, log_to_console=False):
@ -359,7 +374,8 @@ def get_logger(conf, name=None, log_to_console=False):
root_logger.addHandler(get_logger.console)
if conf is None:
root_logger.setLevel(logging.INFO)
return NamedLogger(root_logger, name)
adapted_logger = LogAdapter(root_logger)
return adapted_logger
if name is None:
name = conf.get('log_name', 'swift')
get_logger.handler = SysLogHandler(address='/dev/log',
@ -369,7 +385,9 @@ def get_logger(conf, name=None, log_to_console=False):
root_logger.addHandler(get_logger.handler)
root_logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
return NamedLogger(root_logger, name)
adapted_logger = LogAdapter(root_logger)
get_logger.handler.setFormatter(NamedFormatter(name, adapted_logger))
return adapted_logger
def drop_privileges(user):
@ -444,12 +462,12 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
if not args:
parser.print_usage()
print "Error: missing config file argument"
print _("Error: missing config file argument")
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print "Error: unable to locate %s" % config
print _("Error: unable to locate %s") % config
sys.exit(1)
extra_args = []
@ -672,14 +690,14 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
defaults = {}
c = ConfigParser(defaults)
if not c.read(conf):
print "Unable to read config file %s" % conf
print _("Unable to read config file %s") % conf
sys.exit(1)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
print "Unable to find %s config section in %s" % (section_name,
conf)
print _("Unable to find %s config section in %s") % \
(section_name, conf)
sys.exit(1)
if "log_name" not in conf:
if log_name is not None:
@ -731,7 +749,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
os.path.ismount(os.path.join(devices, device)):
if logger:
logger.debug(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
datadir = os.path.join(devices, device, datadir)
if not os.path.exists(datadir):

View File

@ -21,6 +21,7 @@ import signal
import sys
import time
import mimetools
import gettext
import eventlet
from eventlet import greenio, GreenPool, sleep, wsgi, listen
@ -120,6 +121,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
sock = get_socket(conf, default_port=kwargs.get('default_port', 8080))
# remaining tasks should not require elevated privileges
drop_privileges(conf.get('user', 'swift'))
gettext.install('swift', unicode=1)
# finally after binding to ports and privilege drop, run app __init__ code
app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name})

View File

@ -51,10 +51,11 @@ class ContainerAuditor(Daemon):
self.container_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Container audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.container_passes,
self.container_failures))
_('Since %(time)s: Container audits: %(pass)s passed '
'audit, %(fail)s failed audit'),
{'time': time.ctime(reported),
'pass': self.container_passes,
'fail': self.container_failures})
reported = time.time()
self.container_passes = 0
self.container_failures = 0
@ -64,7 +65,7 @@ class ContainerAuditor(Daemon):
def run_once(self):
"""Run the container audit once."""
self.logger.info('Begin container audit "once" mode')
self.logger.info(_('Begin container audit "once" mode'))
begin = reported = time.time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR,
@ -74,16 +75,17 @@ class ContainerAuditor(Daemon):
self.container_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Container audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.container_passes,
self.container_failures))
_('Since %(time)s: Container audits: %(pass)s passed '
'audit, %(fail)s failed audit'),
{'time': time.ctime(reported),
'pass': self.container_passes,
'fail': self.container_failures})
reported = time.time()
self.container_passes = 0
self.container_failures = 0
elapsed = time.time() - begin
self.logger.info(
'Container audit "once" mode completed: %.02fs' % elapsed)
_('Container audit "once" mode completed: %.02fs'), elapsed)
def container_audit(self, path):
"""
@ -98,8 +100,8 @@ class ContainerAuditor(Daemon):
if not broker.is_deleted():
info = broker.get_info()
self.container_passes += 1
self.logger.debug('Audit passed for %s' % broker.db_file)
self.logger.debug(_('Audit passed for %s'), broker.db_file)
except Exception:
self.container_failures += 1
self.logger.exception('ERROR Could not get container info %s' %
self.logger.exception(_('ERROR Could not get container info %s'),
(broker.db_file))

View File

@ -111,18 +111,18 @@ class ContainerController(object):
return HTTPNotFound(request=req)
elif account_response.status < 200 or \
account_response.status > 299:
self.logger.error('ERROR Account update failed '
'with %s:%s/%s transaction %s (will retry '
'later): Response %s %s' % (account_ip,
account_port, account_device,
req.headers.get('x-cf-trans-id'),
account_response.status,
account_response.reason))
self.logger.error(_('ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except:
self.logger.exception('ERROR account update failed with '
'%s:%s/%s transaction %s (will retry later)' %
(account_ip, account_port, account_device,
req.headers.get('x-cf-trans-id', '-')))
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
{'ip': account_ip, 'port': account_port,
'device': account_device})
return None
def DELETE(self, req):
@ -384,6 +384,7 @@ class ContainerController(object):
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8')
else:
@ -393,10 +394,8 @@ class ContainerController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (

View File

@ -56,7 +56,7 @@ class ContainerUpdater(Daemon):
"""Get the account ring. Load it if it hasn't been yet."""
if not self.account_ring:
self.logger.debug(
'Loading account ring from %s' % self.account_ring_path)
_('Loading account ring from %s'), self.account_ring_path)
self.account_ring = Ring(self.account_ring_path)
return self.account_ring
@ -70,7 +70,7 @@ class ContainerUpdater(Daemon):
for device in os.listdir(self.devices):
dev_path = os.path.join(self.devices, device)
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn('%s is not mounted' % device)
self.logger.warn(_('%s is not mounted'), device)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):
@ -86,7 +86,7 @@ class ContainerUpdater(Daemon):
"""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin container update sweep')
self.logger.info(_('Begin container update sweep'))
begin = time.time()
pids = []
# read from account ring to ensure it's fresh
@ -107,15 +107,17 @@ class ContainerUpdater(Daemon):
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
'Container update sweep of %s completed: '
'%.02fs, %s successes, %s failures, %s with no changes'
% (path, elapsed, self.successes, self.failures,
self.no_changes))
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info('Container update sweep completed: %.02fs' %
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
@ -133,9 +135,11 @@ class ContainerUpdater(Daemon):
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info('Container update single threaded sweep completed: '
'%.02fs, %s successes, %s failures, %s with no changes' %
(elapsed, self.successes, self.failures, self.no_changes))
self.logger.info(_('Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
def container_sweep(self, path):
"""
@ -181,14 +185,16 @@ class ContainerUpdater(Daemon):
if successes > failures:
self.successes += 1
self.logger.debug(
'Update report sent for %s %s' % (container, dbfile))
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.failures += 1
self.logger.debug(
'Update report failed for %s %s' % (container, dbfile))
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
else:
self.no_changes += 1
@ -216,8 +222,8 @@ class ContainerUpdater(Daemon):
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes'})
except:
self.logger.exception('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): ' % node)
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
return 500
with Timeout(self.node_timeout):
try:
@ -227,5 +233,5 @@ class ContainerUpdater(Daemon):
except:
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
return 500

View File

@ -52,10 +52,10 @@ class ObjectAuditor(Daemon):
for path, device, partition in all_locs:
self.object_audit(path, device, partition)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Locally: %d passed audit, %d quarantined, '
'%d errors' % (time.ctime(reported), self.passes,
self.quarantines, self.errors))
self.logger.info(_('Since %(time)s: Locally: %(pass)d '
'passed audit, %(quar)d quarantined, %(error)d errors'),
{'time': time.ctime(reported), 'pass': self.passes,
'quar': self.quarantines, 'error': self.errors})
reported = time.time()
self.passes = 0
self.quarantines = 0
@ -66,7 +66,7 @@ class ObjectAuditor(Daemon):
def run_once(self):
"""Run the object audit once."""
self.logger.info('Begin object audit "once" mode')
self.logger.info(_('Begin object audit "once" mode'))
begin = reported = time.time()
all_locs = audit_location_generator(self.devices,
object_server.DATADIR,
@ -75,17 +75,17 @@ class ObjectAuditor(Daemon):
for path, device, partition in all_locs:
self.object_audit(path, device, partition)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Locally: %d passed audit, %d quarantined, '
'%d errors' % (time.ctime(reported), self.passes,
self.quarantines, self.errors))
self.logger.info(_('Since %(time)s: Locally: %(pass)d '
'passed audit, %(quar)d quarantined, %(error)d errors'),
{'time': time.ctime(reported), 'pass': self.passes,
'quar': self.quarantines, 'error': self.errors})
reported = time.time()
self.passes = 0
self.quarantines = 0
self.errors = 0
elapsed = time.time() - begin
self.logger.info(
'Object audit "once" mode completed: %.02fs' % elapsed)
_('Object audit "once" mode completed: %.02fs'), elapsed)
def object_audit(self, path, device, partition):
"""
@ -124,8 +124,8 @@ class ObjectAuditor(Daemon):
"%s" % (df.metadata['ETag'], etag))
except AuditException, err:
self.quarantines += 1
self.logger.error('ERROR Object %s failed audit and will be '
'quarantined: %s' % (path, err))
self.logger.error(_('ERROR Object %(obj)s failed audit and will be '
'quarantined: %(err)s'), {'obj': path, 'err': err})
invalidate_hash(os.path.dirname(path))
renamer_path = os.path.dirname(path)
renamer(renamer_path, os.path.join(self.devices, device,
@ -133,6 +133,6 @@ class ObjectAuditor(Daemon):
return
except Exception:
self.errors += 1
self.logger.exception('ERROR Trying to audit %s' % path)
self.logger.exception(_('ERROR Trying to audit %s'), path)
return
self.passes += 1

View File

@ -243,26 +243,27 @@ class ObjectReplicator(Daemon):
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error("Killing long-running rsync: %s" % str(args))
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
if results:
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
self.logger.info(result)
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
self.logger.info(result)
if ret_val:
self.logger.error(_('Bad rsync return code: %s -> %d'),
(str(args), ret_val))
elif results:
self.logger.info(
"Sync of %s at %s complete (%.03f) [%d]" % (
args[-2], args[-1], total_time, ret_val))
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
else:
self.logger.debug(
"Sync of %s at %s complete (%.03f) [%d]" % (
args[-2], args[-1], total_time, ret_val))
if ret_val:
self.logger.error('Bad rsync return code: %d' % ret_val)
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
@ -346,10 +347,10 @@ class ObjectReplicator(Daemon):
responses.append(success)
if not suffixes or (len(responses) == \
self.object_ring.replica_count and all(responses)):
self.logger.info("Removing partition: %s" % job['path'])
self.logger.info(_("Removing partition: %s"), job['path'])
tpool.execute(shutil.rmtree, job['path'], ignore_errors=True)
except (Exception, Timeout):
self.logger.exception("Error syncing handoff partition")
self.logger.exception(_("Error syncing handoff partition"))
finally:
self.partition_times.append(time.time() - begin)
@ -379,13 +380,14 @@ class ObjectReplicator(Daemon):
node['device'], job['partition'], 'REPLICATE',
'', headers={'Content-Length': '0'}).getresponse()
if resp.status == 507:
self.logger.error('%s/%s responded as unmounted' %
(node['ip'], node['device']))
self.logger.error(_('%(ip)s/%(device)s responded'
' as unmounted'), node)
attempts_left += 1
continue
if resp.status != 200:
self.logger.error("Invalid response %s from %s" %
(resp.status, node['ip']))
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status, 'ip': node['ip']})
continue
remote_hash = pickle.loads(resp.read())
del resp
@ -408,7 +410,7 @@ class ObjectReplicator(Daemon):
logging.exception("Error syncing with node: %s" % node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
self.logger.exception("Error syncing partition")
self.logger.exception(_("Error syncing partition"))
finally:
self.partition_times.append(time.time() - begin)
@ -418,27 +420,30 @@ class ObjectReplicator(Daemon):
"""
if self.replication_count:
rate = self.replication_count / (time.time() - self.start)
self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f "
"seconds (%.2f/sec, %s remaining)"
% (self.replication_count, self.job_count,
self.replication_count * 100.0 / self.job_count,
time.time() - self.start, rate,
'%d%s' % compute_eta(self.start,
self.replication_count, self.job_count)))
self.logger.info(_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count, self.job_count)})
if self.suffix_count:
self.logger.info("%d suffixes checked - %.2f%% hashed, "
"%.2f%% synced" %
(self.suffix_count,
(self.suffix_hash * 100.0) / self.suffix_count,
(self.suffix_sync * 100.0) / self.suffix_count))
self.logger.info(_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info("Partition times: max %.4fs, min %.4fs, "
"med %.4fs"
% (self.partition_times[-1], self.partition_times[0],
self.partition_times[len(self.partition_times) // 2]))
self.logger.info(_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info("Nothing replicated for %s seconds."
% (time.time() - self.start))
self.logger.info(_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
@ -466,7 +471,7 @@ class ObjectReplicator(Daemon):
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error("Lockup detected.. killing live coros.")
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
@ -483,7 +488,7 @@ class ObjectReplicator(Daemon):
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn('%s is not mounted' % local_dev['device'])
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
@ -521,8 +526,8 @@ class ObjectReplicator(Daemon):
jobs = self.collect_jobs()
for job in jobs:
if not self.check_ring():
self.logger.info(
"Ring change detected. Aborting current replication pass.")
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
@ -531,7 +536,7 @@ class ObjectReplicator(Daemon):
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
self.logger.exception("Exception in top-level replication loop")
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
@ -540,23 +545,23 @@ class ObjectReplicator(Daemon):
def run_once(self):
start = time.time()
self.logger.info("Running object replicator in script mode.")
self.logger.info(_("Running object replicator in script mode."))
self.replicate()
total = (time.time() - start) / 60
self.logger.info(
"Object replication complete. (%.02f minutes)" % total)
_("Object replication complete. (%.02f minutes)"), total)
def run_forever(self):
self.logger.info("Starting object replicator in daemon mode.")
# Run the replicator continually
while True:
start = time.time()
self.logger.info("Starting object replication pass.")
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - start) / 60
self.logger.info(
"Object replication complete. (%.02f minutes)" % total)
self.logger.debug('Replication sleeping for %s seconds.' %
_("Object replication complete. (%.02f minutes)"), total)
self.logger.debug(_('Replication sleeping for %s seconds.'),
self.run_pause)
sleep(self.run_pause)

View File

@ -292,13 +292,15 @@ class ObjectController(object):
if 200 <= response.status < 300:
return
else:
self.logger.error('ERROR Container update failed (saving '
'for async update later): %d response from %s:%s/%s' %
(response.status, ip, port, contdevice))
self.logger.error(_('ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except:
self.logger.exception('ERROR container update failed with '
'%s:%s/%s transaction %s (saving for async update later)' %
(ip, port, contdevice, headers_in.get('x-cf-trans-id', '-')))
self.logger.exception(_('ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
ohash = hash_path(account, container, obj)
write_pickle(
@ -560,6 +562,7 @@ class ObjectController(object):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8')
else:
@ -569,10 +572,8 @@ class ObjectController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
if self.log_requests:

View File

@ -54,7 +54,7 @@ class ObjectUpdater(Daemon):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
'Loading container ring from %s' % self.container_ring_path)
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
@ -62,7 +62,7 @@ class ObjectUpdater(Daemon):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin object update sweep')
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
@ -71,7 +71,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
@ -86,20 +86,23 @@ class ObjectUpdater(Daemon):
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info('Object update sweep of %s completed: '
'%.02fs, %s successes, %s failures' %
(device, elapsed, self.successes, self.failures))
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info('Object update sweep completed: %.02fs' % elapsed)
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
"""Run the updater once"""
self.logger.info('Begin object update single threaded sweep')
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
@ -107,13 +110,14 @@ class ObjectUpdater(Daemon):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info('Object update single threaded sweep completed: '
'%.02fs, %s successes, %s failures' %
(elapsed, self.successes, self.failures))
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
@ -150,7 +154,7 @@ class ObjectUpdater(Daemon):
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
'ERROR Pickle problem, quarantining %s' % update_path)
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
@ -170,11 +174,13 @@ class ObjectUpdater(Daemon):
successes.append(node['id'])
if success:
self.successes += 1
self.logger.debug('Update sent for %s %s' % (obj, update_path))
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug('Update failed for %s %s' % (obj, update_path))
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
@ -197,6 +203,6 @@ class ObjectUpdater(Daemon):
resp.read()
return resp.status
except:
self.logger.exception('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s' % node)
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500

View File

@ -171,10 +171,11 @@ class SegmentedIterable(object):
raise
except Exception, err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception('ERROR: While processing '
'manifest /%s/%s/%s %s' % (self.controller.account_name,
self.controller.container_name,
self.controller.object_name, self.controller.trans_id))
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = 503
raise
@ -203,10 +204,11 @@ class SegmentedIterable(object):
raise
except Exception, err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception('ERROR: While processing '
'manifest /%s/%s/%s %s' % (self.controller.account_name,
self.controller.container_name,
self.controller.object_name, self.controller.trans_id))
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = 503
raise
@ -249,10 +251,11 @@ class SegmentedIterable(object):
raise
except Exception, err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception('ERROR: While processing '
'manifest /%s/%s/%s %s' % (self.controller.account_name,
self.controller.container_name,
self.controller.object_name, self.controller.trans_id))
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = 503
raise
@ -283,8 +286,8 @@ class Controller(object):
:param msg: error message
"""
self.error_increment(node)
self.app.logger.error(
'%s %s:%s' % (msg, node['ip'], node['port']))
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s'),
{'msg': msg, 'ip': node['ip'], 'port': node['port']})
def exception_occurred(self, node, typ, additional_info):
"""
@ -295,9 +298,9 @@ class Controller(object):
:param additional_info: additional information to log
"""
self.app.logger.exception(
'ERROR with %s server %s:%s/%s transaction %s re: %s' % (typ,
node['ip'], node['port'], node['device'], self.trans_id,
additional_info))
_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s'),
{'type': typ, 'ip': node['ip'], 'port': node['port'],
'device': node['device'], 'info': additional_info})
def error_limited(self, node):
"""
@ -318,8 +321,7 @@ class Controller(object):
limited = node['errors'] > self.app.error_suppression_limit
if limited:
self.app.logger.debug(
'Node error limited %s:%s (%s)' % (
node['ip'], node['port'], node['device']))
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node):
@ -543,8 +545,8 @@ class Controller(object):
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error('%s returning 503 for %s, transaction %s' %
(server_type, statuses, self.trans_id))
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
@ -617,9 +619,7 @@ class Controller(object):
res.bytes_transferred += len(chunk)
except GeneratorExit:
res.client_disconnect = True
self.app.logger.info(
'Client disconnected on read transaction %s' %
self.trans_id)
self.app.logger.info(_('Client disconnected on read'))
except:
self.exception_occurred(node, 'Object',
'Trying to read during GET of %s' % req.path)
@ -852,7 +852,7 @@ class ObjectController(Controller):
error_response = check_metadata(req, 'object')
if error_response:
return error_response
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -894,7 +894,7 @@ class ObjectController(Controller):
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -909,7 +909,7 @@ class ObjectController(Controller):
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
# Sometimes the 'content-type' header exists, but is set to None.
if not req.headers.get('content-type'):
guessed_type, _ = mimetypes.guess_type(req.path_info)
guessed_type, _junk = mimetypes.guess_type(req.path_info)
if not guessed_type:
req.headers['Content-Type'] = 'application/octet-stream'
else:
@ -995,9 +995,9 @@ class ObjectController(Controller):
containers.insert(0, container)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
'Object PUT returning 503, %s/%s required connections, '
'transaction %s' %
(len(conns), len(nodes) / 2 + 1, self.trans_id))
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
try:
req.bytes_transferred = 0
@ -1027,27 +1027,26 @@ class ObjectController(Controller):
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
'Object PUT exceptions during send, %s/%s '
'required connections, transaction %s' %
(len(conns), len(nodes) // 2 + 1,
self.trans_id))
_('Object PUT exceptions during send, '
'%(conns)s/%(nodes)s required connections'),
{'conns': len(conns),
'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
if req.headers.get('transfer-encoding') and chunk == '':
break
except ChunkReadTimeout, err:
self.app.logger.info(
'ERROR Client read timeout (%ss)' % err.seconds)
_('ERROR Client read timeout (%ss)'), err.seconds)
return HTTPRequestTimeout(request=req)
except:
req.client_disconnect = True
self.app.logger.exception(
'ERROR Exception causing client disconnect')
_('ERROR Exception causing client disconnect'))
return Response(status='499 Client Disconnect')
if req.content_length and req.bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.info(
'Client disconnected without sending enough data %s' %
self.trans_id)
_('Client disconnected without sending enough data'))
return Response(status='499 Client Disconnect')
statuses = []
reasons = []
@ -1071,7 +1070,7 @@ class ObjectController(Controller):
'Trying to get final status of PUT to %s' % req.path)
if len(etags) > 1:
self.app.logger.error(
'Object servers returned %s mismatched etags' % len(etags))
_('Object servers returned %s mismatched etags'), len(etags))
return HTTPServerError(request=req)
etag = len(etags) and etags.pop() or None
while len(statuses) < len(nodes):
@ -1095,7 +1094,7 @@ class ObjectController(Controller):
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -1145,7 +1144,7 @@ class ObjectController(Controller):
if not dest.startswith('/'):
dest = '/' + dest
try:
_, dest_container, dest_object = dest.split('/', 2)
_junk, dest_container, dest_object = dest.split('/', 2)
except ValueError:
return HTTPPreconditionFailed(request=req,
body='Destination header must be of the form '
@ -1413,9 +1412,8 @@ class ContainerController(Controller):
# If even one node doesn't do the delete, we can't be sure
# what the outcome will be once everything is in sync; so
# we 503.
self.app.logger.error('Returning 503 because not all '
'container nodes confirmed DELETE, transaction %s' %
self.trans_id)
self.app.logger.error(_('Returning 503 because not all '
'container nodes confirmed DELETE'))
return HTTPServiceUnavailable(request=req)
if resp.status_int == 202: # Indicates no server had the container
return HTTPNotFound(request=req)
@ -1710,6 +1708,7 @@ class BaseApplication(object):
controller = controller(self, **path_parts)
controller.trans_id = req.headers.get('x-cf-trans-id', '-')
self.logger.txn_id = req.headers.get('x-cf-trans-id', None)
try:
handler = getattr(controller, req.method)
if not getattr(handler, 'publicly_accessible'):
@ -1737,7 +1736,7 @@ class BaseApplication(object):
return resp
return handler(req)
except Exception:
self.logger.exception('ERROR Unhandled exception in request')
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)

View File

@ -0,0 +1,7 @@
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)

0
test/probe/__init__.py Normal file
View File

View File

@ -21,7 +21,7 @@ from subprocess import Popen
from time import sleep
from swift.common import client
from common import get_to_final_state, kill_pids, reset_environment
from test.probe.common import get_to_final_state, kill_pids, reset_environment
class TestAccountFailures(unittest.TestCase):

View File

@ -23,7 +23,7 @@ from uuid import uuid4
from swift.common import client
from common import get_to_final_state, kill_pids, reset_environment
from test.probe.common import get_to_final_state, kill_pids, reset_environment
class TestContainerFailures(unittest.TestCase):

View File

@ -23,7 +23,7 @@ from uuid import uuid4
from swift.common import client, direct_client
from common import kill_pids, reset_environment
from test.probe.common import kill_pids, reset_environment
class TestObjectAsyncUpdate(unittest.TestCase):

View File

@ -23,7 +23,7 @@ from uuid import uuid4
from swift.common import client, direct_client
from common import kill_pids, reset_environment
from test.probe.common import kill_pids, reset_environment
class TestObjectHandoff(unittest.TestCase):

View File

@ -22,7 +22,7 @@ from time import sleep
from swift.common import client
from common import get_to_final_state, kill_pids, reset_environment
from test.probe.common import get_to_final_state, kill_pids, reset_environment
class TestRunningWithEachTypeDown(unittest.TestCase):

View File

@ -462,7 +462,7 @@ class TestAuthServer(unittest.TestCase):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
self.assertEquals(log.getvalue().rsplit(' ', 1)[0],
"auth SUCCESS create_user('test', 'tester', _, False, False) "
"SUCCESS create_user('test', 'tester', _, False, False) "
"= %s" % repr(url))
log.truncate(0)
def start_response(*args):
@ -491,7 +491,7 @@ class TestAuthServer(unittest.TestCase):
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'auth testhost - - '
self.assertEquals(' '.join(logsegs), 'testhost - - '
'[01/Jan/2001:01:02:03 +0000] "GET /v1/test/auth?test=True '
'HTTP/1.0" 204 - "-" "-" - - - - - - - - - "-" "None" "-" '
'0.1234')
@ -519,7 +519,7 @@ class TestAuthServer(unittest.TestCase):
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'auth None - - [01/Jan/2001:'
self.assertEquals(' '.join(logsegs), 'None - - [01/Jan/2001:'
'01:02:03 +0000] "GET /v1/test/auth HTTP/1.0" 204 - "-" "-" - '
'- - - - - - - - "-" "None" "Content-Length: 0\n'
'X-Storage-User: tester\nX-Storage-Pass: testing" 0.1234')
@ -556,7 +556,7 @@ class TestAuthServer(unittest.TestCase):
'HTTP_X_STORAGE_PASS': 'testing'},
start_response)
self.assert_(log.getvalue().startswith(
'auth ERROR Unhandled exception in ReST request'),
'ERROR Unhandled exception in ReST request'),
log.getvalue())
log.truncate(0)
finally:

View File

@ -50,7 +50,7 @@ class TestDaemon(unittest.TestCase):
def test_create(self):
d = daemon.Daemon({})
self.assertEquals(d.conf, {})
self.assert_(isinstance(d.logger, utils.NamedLogger))
self.assert_(isinstance(d.logger, utils.LogAdapter))
def test_stubs(self):
d = daemon.Daemon({})

View File

@ -283,35 +283,27 @@ Error: unable to locate %s
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_NamedLogger(self):
sio = StringIO()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sio))
nl = utils.NamedLogger(logger, 'server')
nl.warn('test')
self.assertEquals(sio.getvalue(), 'server test\n')
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'server test1\n')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'server test1\n')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'server test1\nserver test3\n')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'server test1\nserver test3\nserver test4\n')
'test1\ntest3\ntest4\n')
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'server test1\nserver test3\nserver test4\n')
'test1\ntest3\ntest4\n')
def test_storage_directory(self):
self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'),

View File

@ -3019,8 +3019,8 @@ class TestSegmentedIterable(unittest.TestCase):
self.assertRaises(Exception,
proxy_server.SegmentedIterable(self.controller, None,
[None])._load_next_segment)
self.assertEquals(self.controller.exception_args[0],
'ERROR: While processing manifest /a/c/o tx1')
self.assert_(self.controller.exception_args[0].startswith(
'ERROR: While processing manifest'))
def test_load_next_segment_with_no_segments(self):
self.assertRaises(StopIteration,
@ -3079,8 +3079,8 @@ class TestSegmentedIterable(unittest.TestCase):
self.assertRaises(Exception,
proxy_server.SegmentedIterable(self.controller, 'lc', [{'name':
'o1'}])._load_next_segment)
self.assertEquals(self.controller.exception_args[0],
'ERROR: While processing manifest /a/c/o tx1')
self.assert_(self.controller.exception_args[0].startswith(
'ERROR: While processing manifest'))
self.assertEquals(str(self.controller.exception_info[1]),
'Could not load object segment /a/lc/o1: 404')
@ -3088,8 +3088,8 @@ class TestSegmentedIterable(unittest.TestCase):
# Iterator value isn't a dict
self.assertRaises(Exception, ''.join,
proxy_server.SegmentedIterable(self.controller, None, [None]))
self.assertEquals(self.controller.exception_args[0],
'ERROR: While processing manifest /a/c/o tx1')
self.assert_(self.controller.exception_args[0].startswith(
'ERROR: While processing manifest'))
def test_iter_with_no_segments(self):
segit = proxy_server.SegmentedIterable(self.controller, 'lc', [])
@ -3118,8 +3118,8 @@ class TestSegmentedIterable(unittest.TestCase):
self.assertRaises(Exception, ''.join,
proxy_server.SegmentedIterable(self.controller, 'lc', [{'name':
'o1'}]))
self.assertEquals(self.controller.exception_args[0],
'ERROR: While processing manifest /a/c/o tx1')
self.assert_(self.controller.exception_args[0].startswith(
'ERROR: While processing manifest'))
self.assertEquals(str(self.controller.exception_info[1]),
'Could not load object segment /a/lc/o1: 404')
@ -3128,8 +3128,8 @@ class TestSegmentedIterable(unittest.TestCase):
self.assertRaises(Exception,
proxy_server.SegmentedIterable(self.controller, None,
[None]).app_iter_range(None, None).next)
self.assertEquals(self.controller.exception_args[0],
'ERROR: While processing manifest /a/c/o tx1')
self.assert_(self.controller.exception_args[0].startswith(
'ERROR: While processing manifest'))
def test_app_iter_range_with_no_segments(self):
self.assertEquals(''.join(proxy_server.SegmentedIterable(