Logging audit for guestagent/strategies module

Adjust logging to conform to logging standards.
Cleaned up some messages that were unclear. Changed
some messages from info to debug if they are
benign and expected.

Change-Id: Ibc79699bd3fbe9528c67566acdc683b33ac23f3f
Partial-Bug: #1324206
This commit is contained in:
Amrith Kumar 2014-07-17 12:53:36 -04:00
parent 24b1f25204
commit fe2f755a3b
8 changed files with 32 additions and 33 deletions

View File

@ -21,5 +21,5 @@ LOG = logging.getLogger(__name__)
def get_backup_strategy(backup_driver, ns=__name__):
LOG.debug("Getting backup strategy: %s" % backup_driver)
LOG.debug("Getting backup strategy: %s." % backup_driver)
return Strategy.get_strategy(backup_driver, ns)

View File

@ -15,7 +15,6 @@
#
from trove.guestagent.strategy import Strategy
from trove.openstack.common import log as logging
from trove.common import cfg, utils
from eventlet.green import subprocess
import os
@ -23,8 +22,6 @@ import signal
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BackupError(Exception):
"""Error running the Backup Command."""

View File

@ -58,22 +58,22 @@ class InnoBackupEx(base.BackupRunner):
def check_process(self):
"""Check the output from innobackupex for 'completed OK!'."""
LOG.debug('Checking innobackupex process output')
LOG.debug('Checking innobackupex process output.')
with open('/tmp/innobackupex.log', 'r') as backup_log:
output = backup_log.read()
LOG.info(output)
if not output:
LOG.error("Innobackupex log file empty")
LOG.error(_("Innobackupex log file empty."))
return False
last_line = output.splitlines()[-1].strip()
if not re.search('completed OK!', last_line):
LOG.error("Innobackupex did not complete successfully")
LOG.error(_("Innobackupex did not complete successfully."))
return False
return True
def metadata(self):
LOG.debug('Getting metadata from backup')
LOG.debug('Getting metadata from backup.')
meta = {}
lsn = re.compile("The latest check point \(for incremental\): '(\d+)'")
with open('/tmp/innobackupex.log', 'r') as backup_log:
@ -81,7 +81,7 @@ class InnoBackupEx(base.BackupRunner):
match = lsn.search(output)
if match:
meta = {'lsn': match.group(1)}
LOG.info("Metadata for backup: %s", str(meta))
LOG.info(_("Metadata for backup: %s.") % str(meta))
return meta
@property

View File

@ -19,5 +19,5 @@ LOG = logging.getLogger(__name__)
def get_restore_strategy(restore_driver, ns=__name__):
LOG.debug("Getting restore strategy: %s" % restore_driver)
LOG.debug("Getting restore strategy: %s." % restore_driver)
return Strategy.get_strategy(restore_driver, ns)

View File

@ -99,7 +99,7 @@ class RestoreRunner(Strategy):
content_length += len(chunk)
process.stdin.close()
utils.raise_if_process_errored(process, RestoreError)
LOG.info(_("Restored %s bytes from stream.") % content_length)
LOG.debug("Restored %s bytes from stream." % content_length)
return content_length

View File

@ -38,18 +38,18 @@ class MySQLRestoreMixin(object):
def mysql_is_running(self):
if base.exec_with_root_helper("/usr/bin/mysqladmin", "ping"):
LOG.info(_("The mysqld daemon is up and running."))
LOG.debug("MySQL is up and running.")
return True
else:
LOG.info(_("The mysqld daemon is not running."))
LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
if base.exec_with_root_helper("/usr/bin/pgrep", "mysqld"):
LOG.info(_("The mysqld daemon is still running."))
LOG.info("MySQL is still running.")
return False
else:
LOG.info(_("The mysqld daemon is not running."))
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
@ -66,9 +66,9 @@ class MySQLRestoreMixin(object):
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting mysqld daemon"))
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("wait_and_close_proc failed"))
LOG.exception(_("Got a timeout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
@ -76,8 +76,8 @@ class MySQLRestoreMixin(object):
self.mysql_is_running,
base.RestoreError("Reset root password failed: "
"mysqld did not start!"))
LOG.info(_("Root password reset successfully!"))
LOG.info(_("Cleaning up the temp mysqld process..."))
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
child.delayafterclose = 1
child.delayafterterminate = 1
child.close(force=True)
@ -118,7 +118,8 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
def pre_restore(self):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_db()
LOG.info(_("Cleaning out restore location: %s"), self.restore_location)
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
utils.execute_with_timeout("chmod", "-R", "0777",
self.restore_location,
root_helper="sudo",
@ -126,9 +127,9 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
utils.clean_out(self.restore_location)
def _run_prepare(self):
LOG.info(_("Running innobackupex prepare: %s"), self.prepare_cmd)
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully"))
LOG.info(_("Innobackupex prepare finished successfully."))
def post_restore(self):
self._run_prepare()
@ -185,9 +186,9 @@ class InnoBackupExIncremental(InnoBackupEx):
def _incremental_prepare(self, incremental_dir):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.info(_("Running innobackupex prepare: %s"), prepare_cmd)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully"))
LOG.info(_("Innobackupex prepare finished successfully."))
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
@ -203,7 +204,7 @@ class InnoBackupExIncremental(InnoBackupEx):
incremental_dir = None
if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s") % metadata)
" checksum: %(parent_checksum)s.") % metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially

View File

@ -21,5 +21,5 @@ LOG = logging.getLogger(__name__)
def get_storage_strategy(storage_driver, ns=__name__):
LOG.debug("Getting storage strategy: %s" % storage_driver)
LOG.debug("Getting storage strategy: %s." % storage_driver)
return Strategy.get_strategy(storage_driver, ns)

View File

@ -128,9 +128,9 @@ class SwiftStorage(base.Storage):
# Check each segment MD5 hash against swift etag
# Raise an error and mark backup as failed
if etag != segment_checksum:
LOG.error("Error saving data segment to swift. "
"ETAG: %s Segment MD5: %s",
etag, segment_checksum)
LOG.error(_("Error saving data segment to swift. "
"ETAG: %(tag)s Segment MD5: %(checksum)s."),
{'tag': etag, 'checksum': segment_checksum})
return False, "Error saving data to Swift!", None, location
swift_checksum.update(segment_checksum)
@ -159,8 +159,9 @@ class SwiftStorage(base.Storage):
final_swift_checksum = swift_checksum.hexdigest()
if etag != final_swift_checksum:
LOG.error(
"Error saving data to swift. Manifest ETAG: %s Swift MD5: %s",
etag, final_swift_checksum)
_("Error saving data to swift. Manifest "
"ETAG: %(tag)s Swift MD5: %(checksum)s"),
{'tag': etag, 'checksum': final_swift_checksum})
return False, "Error saving data to Swift!", None, location
return (True, "Successfully saved data to Swift!",
@ -175,8 +176,8 @@ class SwiftStorage(base.Storage):
def _verify_checksum(self, etag, checksum):
etag_checksum = etag.strip('"')
if etag_checksum != checksum:
msg = ("Original checksum: %(original)s does not match"
" the current checksum: %(current)s" %
msg = (_("Original checksum: %(original)s does not match"
" the current checksum: %(current)s") %
{'original': etag_checksum, 'current': checksum})
LOG.error(msg)
raise SwiftDownloadIntegrityError(msg)