switch LOG.audit to LOG.info
oslo.log does not have AUDIT level. This was a conscious decision documented in blueprint: https://blueprints.launchpad.net/oslo.log/+spec/remove-context-adapter which was implemented in: I10240f8af6c42508124659b3ed62c5ab93552953 So to prep for switching to oslo.log, this changeset removes usage of LOG.audit and replaces it with LOG.info Change-Id: I9cb1293529b2079b8a4778e27d60c6c760dfb622
This commit is contained in:
parent
50e0da9a44
commit
69d0c64361
|
@ -40,6 +40,7 @@ from nova import context
|
|||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import memorycache
|
||||
|
@ -461,7 +462,7 @@ class Authorizer(wsgi.Middleware):
|
|||
if self._matches_any_role(context, allowed_roles):
|
||||
return self.application
|
||||
else:
|
||||
LOG.audit(_('Unauthorized request for controller=%(controller)s '
|
||||
LOG.info(_LI('Unauthorized request for controller=%(controller)s '
|
||||
'and action=%(action)s'),
|
||||
{'controller': controller, 'action': action},
|
||||
context=context)
|
||||
|
|
|
@ -39,6 +39,7 @@ from nova.compute import api as compute_api
|
|||
from nova.compute import vm_states
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.i18n import _LW
|
||||
from nova.image import s3
|
||||
from nova import network
|
||||
|
@ -403,7 +404,7 @@ class CloudController(object):
|
|||
|
||||
def create_snapshot(self, context, volume_id, **kwargs):
|
||||
validate_volume_id(volume_id)
|
||||
LOG.audit(_("Create snapshot of volume %s"), volume_id,
|
||||
LOG.info(_LI("Create snapshot of volume %s"), volume_id,
|
||||
context=context)
|
||||
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
|
||||
args = (context, volume_id, kwargs.get('name'),
|
||||
|
@ -446,7 +447,7 @@ class CloudController(object):
|
|||
return {'keySet': result}
|
||||
|
||||
def create_key_pair(self, context, key_name, **kwargs):
|
||||
LOG.audit(_("Create key pair %s"), key_name, context=context)
|
||||
LOG.info(_LI("Create key pair %s"), key_name, context=context)
|
||||
|
||||
keypair, private_key = self.keypair_api.create_key_pair(
|
||||
context, context.user_id, key_name)
|
||||
|
@ -458,7 +459,7 @@ class CloudController(object):
|
|||
|
||||
def import_key_pair(self, context, key_name, public_key_material,
|
||||
**kwargs):
|
||||
LOG.audit(_("Import key %s"), key_name, context=context)
|
||||
LOG.info(_LI("Import key %s"), key_name, context=context)
|
||||
|
||||
public_key = base64.b64decode(public_key_material)
|
||||
|
||||
|
@ -471,7 +472,7 @@ class CloudController(object):
|
|||
'keyFingerprint': keypair['fingerprint']}
|
||||
|
||||
def delete_key_pair(self, context, key_name, **kwargs):
|
||||
LOG.audit(_("Delete key pair %s"), key_name, context=context)
|
||||
LOG.info(_LI("Delete key pair %s"), key_name, context=context)
|
||||
try:
|
||||
self.keypair_api.delete_key_pair(context, context.user_id,
|
||||
key_name)
|
||||
|
@ -774,7 +775,7 @@ class CloudController(object):
|
|||
"passwordData": output}
|
||||
|
||||
def get_console_output(self, context, instance_id, **kwargs):
|
||||
LOG.audit(_("Get console output for instance %s"), instance_id,
|
||||
LOG.info(_LI("Get console output for instance %s"), instance_id,
|
||||
context=context)
|
||||
# instance_id may be passed in as a list of instances
|
||||
if isinstance(instance_id, list):
|
||||
|
@ -847,11 +848,11 @@ class CloudController(object):
|
|||
if snapshot_ec2id is not None:
|
||||
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
|
||||
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
|
||||
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
|
||||
LOG.info(_LI("Create volume from snapshot %s"), snapshot_ec2id,
|
||||
context=context)
|
||||
else:
|
||||
snapshot = None
|
||||
LOG.audit(_("Create volume of %s GB"),
|
||||
LOG.info(_LI("Create volume of %s GB"),
|
||||
kwargs.get('size'),
|
||||
context=context)
|
||||
|
||||
|
@ -891,7 +892,7 @@ class CloudController(object):
|
|||
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
|
||||
instance = self.compute_api.get(context, instance_uuid,
|
||||
want_objects=True)
|
||||
LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
|
||||
LOG.info(_LI('Attach volume %(volume_id)s to instance %(instance_id)s '
|
||||
'at %(device)s'),
|
||||
{'volume_id': volume_id,
|
||||
'instance_id': instance_id,
|
||||
|
@ -921,7 +922,7 @@ class CloudController(object):
|
|||
def detach_volume(self, context, volume_id, **kwargs):
|
||||
validate_volume_id(volume_id)
|
||||
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
|
||||
LOG.audit(_("Detach volume %s"), volume_id, context=context)
|
||||
LOG.info(_LI("Detach volume %s"), volume_id, context=context)
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
instance = self._get_instance_from_volume(context, volume)
|
||||
|
||||
|
@ -1288,17 +1289,17 @@ class CloudController(object):
|
|||
return address
|
||||
|
||||
def allocate_address(self, context, **kwargs):
|
||||
LOG.audit(_("Allocate address"), context=context)
|
||||
LOG.info(_LI("Allocate address"), context=context)
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'publicIp': public_ip}
|
||||
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
LOG.audit(_('Release address %s'), public_ip, context=context)
|
||||
LOG.info(_LI('Release address %s'), public_ip, context=context)
|
||||
self.network_api.release_floating_ip(context, address=public_ip)
|
||||
return {'return': "true"}
|
||||
|
||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||
LOG.audit(_("Associate address %(public_ip)s to instance "
|
||||
LOG.info(_LI("Associate address %(public_ip)s to instance "
|
||||
"%(instance_id)s"),
|
||||
{'public_ip': public_ip, 'instance_id': instance_id},
|
||||
context=context)
|
||||
|
@ -1331,7 +1332,8 @@ class CloudController(object):
|
|||
if instance_id:
|
||||
instance = self.compute_api.get(context, instance_id,
|
||||
want_objects=True)
|
||||
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
|
||||
LOG.info(_LI("Disassociate address %s"),
|
||||
public_ip, context=context)
|
||||
self.network_api.disassociate_floating_ip(context, instance,
|
||||
address=public_ip)
|
||||
else:
|
||||
|
@ -1483,7 +1485,7 @@ class CloudController(object):
|
|||
def reboot_instances(self, context, instance_id, **kwargs):
|
||||
"""instance_id is a list of instance ids."""
|
||||
instances = self._ec2_ids_to_instances(context, instance_id)
|
||||
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
|
||||
LOG.info(_LI("Reboot instance %r"), instance_id, context=context)
|
||||
for instance in instances:
|
||||
self.compute_api.reboot(context, instance, 'HARD')
|
||||
return True
|
||||
|
@ -1598,7 +1600,7 @@ class CloudController(object):
|
|||
return {'imagesSet': images}
|
||||
|
||||
def deregister_image(self, context, image_id, **kwargs):
|
||||
LOG.audit(_("De-registering image %s"), image_id, context=context)
|
||||
LOG.info(_LI("De-registering image %s"), image_id, context=context)
|
||||
image = self._get_image(context, image_id)
|
||||
internal_id = image['id']
|
||||
self.image_service.delete(context, internal_id)
|
||||
|
@ -1634,7 +1636,7 @@ class CloudController(object):
|
|||
metadata['properties']['block_device_mapping'] = mappings
|
||||
|
||||
image_id = self._register_image(context, metadata)
|
||||
LOG.audit(_('Registered image %(image_location)s with id '
|
||||
LOG.info(_LI('Registered image %(image_location)s with id '
|
||||
'%(image_id)s'),
|
||||
{'image_location': image_location, 'image_id': image_id},
|
||||
context=context)
|
||||
|
@ -1703,7 +1705,7 @@ class CloudController(object):
|
|||
if operation_type not in ['add', 'remove']:
|
||||
msg = _('operation_type must be add or remove')
|
||||
raise exception.InvalidParameterValue(message=msg)
|
||||
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
|
||||
LOG.info(_LI("Updating image %s publicity"), image_id, context=context)
|
||||
|
||||
try:
|
||||
image = self._get_image(context, image_id)
|
||||
|
|
|
@ -22,6 +22,7 @@ from oslo_utils import timeutils
|
|||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
|
@ -288,7 +289,7 @@ def is_ec2_timestamp_expired(request, expires=None):
|
|||
timeutils.is_newer_than(query_time, expires)
|
||||
return False
|
||||
except ValueError:
|
||||
LOG.audit(_("Timestamp is invalid."))
|
||||
LOG.info(_LI("Timestamp is invalid."))
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ from nova.api.openstack import extensions
|
|||
from nova.api.openstack import wsgi
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
||||
|
@ -50,7 +50,7 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
|
|||
except KeyError:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
LOG.audit(_("Create assisted snapshot from volume %s"), volume_id,
|
||||
LOG.info(_LI("Create assisted snapshot from volume %s"), volume_id,
|
||||
context=context)
|
||||
|
||||
return self.compute_api.volume_snapshot_create(context, volume_id,
|
||||
|
@ -61,7 +61,7 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
|
|||
context = req.environ['nova.context']
|
||||
authorize(context, action='delete')
|
||||
|
||||
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
|
||||
|
||||
delete_metadata = {}
|
||||
delete_metadata.update(req.GET)
|
||||
|
|
|
@ -25,6 +25,7 @@ from nova.api.openstack import extensions
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import network
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -117,7 +118,7 @@ class InterfaceAttachmentController(object):
|
|||
instance = common.get_instance(self.compute_api,
|
||||
context, server_id,
|
||||
want_objects=True)
|
||||
LOG.audit(_("Attach interface"), instance=instance)
|
||||
LOG.info(_LI("Attach interface"), instance=instance)
|
||||
vif = self.compute_api.attach_interface(context,
|
||||
instance, network_id, port_id, req_ip)
|
||||
except (exception.PortNotFound,
|
||||
|
@ -149,7 +150,7 @@ class InterfaceAttachmentController(object):
|
|||
instance = common.get_instance(self.compute_api,
|
||||
context, server_id,
|
||||
want_objects=True)
|
||||
LOG.audit(_("Detach interface %s"), port_id, instance=instance)
|
||||
LOG.info(_LI("Detach interface %s"), port_id, instance=instance)
|
||||
try:
|
||||
self.compute_api.detach_interface(context,
|
||||
instance, port_id=port_id)
|
||||
|
|
|
@ -21,6 +21,7 @@ from nova.api.openstack import extensions
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -142,7 +143,7 @@ class HostController(object):
|
|||
"""Start/Stop host maintenance window. On start, it triggers
|
||||
guest VMs evacuation.
|
||||
"""
|
||||
LOG.audit(_("Putting host %(host_name)s in maintenance mode "
|
||||
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
|
||||
"%(mode)s."),
|
||||
{'host_name': host_name, 'mode': mode})
|
||||
try:
|
||||
|
@ -165,9 +166,9 @@ class HostController(object):
|
|||
on the host
|
||||
"""
|
||||
if enabled:
|
||||
LOG.audit(_("Enabling host %s.") % host_name)
|
||||
LOG.info(_LI("Enabling host %s.") % host_name)
|
||||
else:
|
||||
LOG.audit(_("Disabling host %s.") % host_name)
|
||||
LOG.info(_LI("Disabling host %s.") % host_name)
|
||||
try:
|
||||
result = self.api.set_host_enabled(context, host_name=host_name,
|
||||
enabled=enabled)
|
||||
|
|
|
@ -19,6 +19,7 @@ from nova.api.openstack import wsgi
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.objects import external_event as external_event_obj
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -93,8 +94,8 @@ class ServerExternalEventsController(wsgi.Controller):
|
|||
if instance.host:
|
||||
accepted_events.append(event)
|
||||
accepted_instances.add(instance)
|
||||
LOG.audit(_('Creating event %(name)s:%(tag)s for instance '
|
||||
'%(instance_uuid)s'),
|
||||
LOG.info(_LI('Creating event %(name)s:%(tag)s for '
|
||||
'instance %(instance_uuid)s'),
|
||||
dict(event.iteritems()))
|
||||
# NOTE: as the event is processed asynchronously verify
|
||||
# whether 202 is a more suitable response code than 200
|
||||
|
|
|
@ -25,6 +25,7 @@ from nova.api.openstack import wsgi
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import uuidutils
|
||||
|
@ -73,7 +74,7 @@ def _translate_volume_summary_view(context, vol):
|
|||
d['volumeType'] = vol['volume_type_id']
|
||||
|
||||
d['snapshotId'] = vol['snapshot_id']
|
||||
LOG.audit(_("vol=%s"), vol, context=context)
|
||||
LOG.info(_LI("vol=%s"), vol, context=context)
|
||||
|
||||
if vol.get('volume_metadata'):
|
||||
d['metadata'] = vol.get('volume_metadata')
|
||||
|
@ -107,7 +108,7 @@ class VolumeController(wsgi.Controller):
|
|||
context = req.environ['nova.context']
|
||||
authorize(context)
|
||||
|
||||
LOG.audit(_("Delete volume with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete volume with id: %s"), id, context=context)
|
||||
|
||||
try:
|
||||
self.volume_api.delete(context, id)
|
||||
|
@ -159,7 +160,7 @@ class VolumeController(wsgi.Controller):
|
|||
if size is None and snapshot is not None:
|
||||
size = snapshot['volume_size']
|
||||
|
||||
LOG.audit(_("Create volume of %s GB"), size, context=context)
|
||||
LOG.info(_LI("Create volume of %s GB"), size, context=context)
|
||||
|
||||
availability_zone = vol.get('availability_zone', None)
|
||||
|
||||
|
@ -292,7 +293,7 @@ class VolumeAttachmentController(wsgi.Controller):
|
|||
|
||||
self._validate_volume_id(volume_id)
|
||||
|
||||
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
|
||||
LOG.info(_LI("Attach volume %(volume_id)s to instance %(server_id)s "
|
||||
"at %(device)s"),
|
||||
{'volume_id': volume_id,
|
||||
'device': device,
|
||||
|
@ -391,7 +392,7 @@ class VolumeAttachmentController(wsgi.Controller):
|
|||
authorize_attach(context, action='delete')
|
||||
|
||||
volume_id = id
|
||||
LOG.audit(_("Detach volume %s"), volume_id, context=context)
|
||||
LOG.info(_LI("Detach volume %s"), volume_id, context=context)
|
||||
|
||||
instance = common.get_instance(self.compute_api, context, server_id,
|
||||
want_objects=True)
|
||||
|
@ -502,7 +503,7 @@ class SnapshotController(wsgi.Controller):
|
|||
context = req.environ['nova.context']
|
||||
authorize(context)
|
||||
|
||||
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
|
||||
|
||||
try:
|
||||
self.volume_api.delete_snapshot(context, id)
|
||||
|
@ -540,7 +541,7 @@ class SnapshotController(wsgi.Controller):
|
|||
snapshot = body['snapshot']
|
||||
volume_id = snapshot['volume_id']
|
||||
|
||||
LOG.audit(_("Create snapshot from volume %s"), volume_id,
|
||||
LOG.info(_LI("Create snapshot from volume %s"), volume_id,
|
||||
context=context)
|
||||
|
||||
force = snapshot.get('force', False)
|
||||
|
|
|
@ -26,7 +26,7 @@ from nova.api.openstack import wsgi
|
|||
from nova.api import validation
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
||||
|
@ -54,7 +54,7 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
|
|||
create_info = snapshot['create_info']
|
||||
volume_id = snapshot['volume_id']
|
||||
|
||||
LOG.audit(_("Create assisted snapshot from volume %s"), volume_id,
|
||||
LOG.info(_LI("Create assisted snapshot from volume %s"), volume_id,
|
||||
context=context)
|
||||
try:
|
||||
return self.compute_api.volume_snapshot_create(context, volume_id,
|
||||
|
@ -70,7 +70,7 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
|
|||
context = req.environ['nova.context']
|
||||
authorize(context, action='delete')
|
||||
|
||||
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
|
||||
|
||||
delete_metadata = {}
|
||||
delete_metadata.update(req.GET)
|
||||
|
|
|
@ -24,6 +24,7 @@ from nova.api import validation
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -133,7 +134,7 @@ class HostController(wsgi.Controller):
|
|||
"""Start/Stop host maintenance window. On start, it triggers
|
||||
guest VMs evacuation.
|
||||
"""
|
||||
LOG.audit(_("Putting host %(host_name)s in maintenance mode "
|
||||
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
|
||||
"%(mode)s."),
|
||||
{'host_name': host_name, 'mode': mode})
|
||||
try:
|
||||
|
@ -155,9 +156,9 @@ class HostController(wsgi.Controller):
|
|||
on the host.
|
||||
"""
|
||||
if enabled:
|
||||
LOG.audit(_("Enabling host %s."), host_name)
|
||||
LOG.info(_LI("Enabling host %s."), host_name)
|
||||
else:
|
||||
LOG.audit(_("Disabling host %s."), host_name)
|
||||
LOG.info(_LI("Disabling host %s."), host_name)
|
||||
try:
|
||||
result = self.api.set_host_enabled(context, host_name=host_name,
|
||||
enabled=enabled)
|
||||
|
|
|
@ -21,6 +21,7 @@ from nova.api import validation
|
|||
from nova import compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -83,8 +84,8 @@ class ServerExternalEventsController(wsgi.Controller):
|
|||
if instance.host:
|
||||
accepted_events.append(event)
|
||||
accepted_instances.add(instance)
|
||||
LOG.audit(_('Creating event %(name)s:%(tag)s for instance '
|
||||
'%(instance_uuid)s'),
|
||||
LOG.info(_LI('Creating event %(name)s:%(tag)s for '
|
||||
'instance %(instance_uuid)s'),
|
||||
dict(event.iteritems()))
|
||||
# NOTE: as the event is processed asynchronously verify
|
||||
# whether 202 is a more suitable response code than 200
|
||||
|
|
|
@ -3867,7 +3867,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
|
|||
msg = _("Quota exceeded, too many security groups.")
|
||||
self.raise_over_quota(msg)
|
||||
|
||||
LOG.audit(_("Create Security Group %s"), name, context=context)
|
||||
LOG.info(_LI("Create Security Group %s"), name, context=context)
|
||||
|
||||
try:
|
||||
self.ensure_default(context)
|
||||
|
@ -3972,7 +3972,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
|
|||
LOG.exception(_LE("Failed to update usages deallocating "
|
||||
"security group"))
|
||||
|
||||
LOG.audit(_("Delete security group %s"), security_group['name'],
|
||||
LOG.info(_LI("Delete security group %s"), security_group['name'],
|
||||
context=context)
|
||||
self.db.security_group_destroy(context, security_group['id'])
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ from oslo_serialization import jsonutils
|
|||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -166,7 +167,7 @@ class Claim(NopClaim):
|
|||
raise exception.ComputeResourcesUnavailable(reason=
|
||||
"; ".join(reasons))
|
||||
|
||||
LOG.audit(_('Claim successful'), instance=self.instance)
|
||||
LOG.info(_LI('Claim successful'), instance=self.instance)
|
||||
|
||||
def _test_memory(self, resources, limit):
|
||||
type_ = _("memory")
|
||||
|
@ -235,22 +236,22 @@ class Claim(NopClaim):
|
|||
"""Test if the given type of resource needed for a claim can be safely
|
||||
allocated.
|
||||
"""
|
||||
LOG.audit(_('Total %(type)s: %(total)d %(unit)s, used: %(used).02f '
|
||||
LOG.info(_LI('Total %(type)s: %(total)d %(unit)s, used: %(used).02f '
|
||||
'%(unit)s'),
|
||||
{'type': type_, 'total': total, 'unit': unit, 'used': used},
|
||||
instance=self.instance)
|
||||
|
||||
if limit is None:
|
||||
# treat resource as unlimited:
|
||||
LOG.audit(_('%(type)s limit not specified, defaulting to '
|
||||
LOG.info(_LI('%(type)s limit not specified, defaulting to '
|
||||
'unlimited'), {'type': type_}, instance=self.instance)
|
||||
return
|
||||
|
||||
free = limit - used
|
||||
|
||||
# Oversubscribed resource policy info:
|
||||
LOG.audit(_('%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f '
|
||||
'%(unit)s'),
|
||||
LOG.info(_LI('%(type)s limit: %(limit).02f %(unit)s, '
|
||||
'free: %(free).02f %(unit)s'),
|
||||
{'type': type_, 'limit': limit, 'free': free, 'unit': unit},
|
||||
instance=self.instance)
|
||||
|
||||
|
|
|
@ -1650,7 +1650,7 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
def _start_building(self, context, instance):
|
||||
"""Save the host and launched_on fields and log appropriately."""
|
||||
LOG.audit(_('Starting instance...'), context=context,
|
||||
LOG.info(_LI('Starting instance...'), context=context,
|
||||
instance=instance)
|
||||
self._instance_update(context, instance.uuid,
|
||||
vm_state=vm_states.BUILDING,
|
||||
|
@ -2055,7 +2055,7 @@ class ComputeManager(manager.Manager):
|
|||
node=None, limits=None):
|
||||
|
||||
try:
|
||||
LOG.audit(_('Starting instance...'), context=context,
|
||||
LOG.info(_LI('Starting instance...'), context=context,
|
||||
instance=instance)
|
||||
instance.vm_state = vm_states.BUILDING
|
||||
instance.task_state = None
|
||||
|
@ -2427,7 +2427,8 @@ class ComputeManager(manager.Manager):
|
|||
trying to teardown networking
|
||||
"""
|
||||
context = context.elevated()
|
||||
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
|
||||
LOG.info(_LI('%(action_str)s instance') %
|
||||
{'action_str': 'Terminating'},
|
||||
context=context, instance=instance)
|
||||
|
||||
if notify:
|
||||
|
@ -2794,7 +2795,7 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
orig_vm_state = instance.vm_state
|
||||
with self._error_out_instance_on_exception(context, instance):
|
||||
LOG.audit(_("Rebuilding instance"), context=context,
|
||||
LOG.info(_LI("Rebuilding instance"), context=context,
|
||||
instance=instance)
|
||||
|
||||
if recreate:
|
||||
|
@ -2961,7 +2962,7 @@ class ComputeManager(manager.Manager):
|
|||
task_states.REBOOT_PENDING_HARD,
|
||||
task_states.REBOOT_STARTED_HARD)
|
||||
context = context.elevated()
|
||||
LOG.audit(_("Rebooting instance"), context=context, instance=instance)
|
||||
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
|
||||
|
||||
block_device_info = self._get_instance_block_device_info(context,
|
||||
instance)
|
||||
|
@ -3099,7 +3100,7 @@ class ComputeManager(manager.Manager):
|
|||
try:
|
||||
instance.save()
|
||||
|
||||
LOG.audit(_('instance snapshotting'), context=context,
|
||||
LOG.info(_LI('instance snapshotting'), context=context,
|
||||
instance=instance)
|
||||
|
||||
if instance.power_state != power_state.RUNNING:
|
||||
|
@ -3234,7 +3235,7 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
try:
|
||||
self.driver.set_admin_password(instance, new_pass)
|
||||
LOG.audit(_("Root password set"), instance=instance)
|
||||
LOG.info(_LI("Root password set"), instance=instance)
|
||||
instance.task_state = None
|
||||
instance.save(
|
||||
expected_task_state=task_states.UPDATING_PASSWORD)
|
||||
|
@ -3282,7 +3283,7 @@ class ComputeManager(manager.Manager):
|
|||
{'current_state': current_power_state,
|
||||
'expected_state': expected_state},
|
||||
instance=instance)
|
||||
LOG.audit(_('injecting file to %s'), path,
|
||||
LOG.info(_LI('injecting file to %s'), path,
|
||||
instance=instance)
|
||||
self.driver.inject_file(instance, path, file_contents)
|
||||
|
||||
|
@ -3320,7 +3321,7 @@ class ComputeManager(manager.Manager):
|
|||
def rescue_instance(self, context, instance, rescue_password,
|
||||
rescue_image_ref=None, clean_shutdown=True):
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Rescuing'), context=context, instance=instance)
|
||||
LOG.info(_LI('Rescuing'), context=context, instance=instance)
|
||||
|
||||
admin_password = (rescue_password if rescue_password else
|
||||
utils.generate_password())
|
||||
|
@ -3369,7 +3370,7 @@ class ComputeManager(manager.Manager):
|
|||
@wrap_instance_fault
|
||||
def unrescue_instance(self, context, instance):
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Unrescuing'), context=context, instance=instance)
|
||||
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
|
||||
|
||||
network_info = self._get_instance_nw_info(context, instance)
|
||||
self._notify_about_instance_usage(context, instance,
|
||||
|
@ -3703,7 +3704,7 @@ class ComputeManager(manager.Manager):
|
|||
rt = self._get_resource_tracker(node)
|
||||
with rt.resize_claim(context, instance, instance_type,
|
||||
image_meta=image, limits=limits) as claim:
|
||||
LOG.audit(_('Migrating'), context=context, instance=instance)
|
||||
LOG.info(_LI('Migrating'), context=context, instance=instance)
|
||||
self.compute_rpcapi.resize_instance(
|
||||
context, instance, claim.migration, image,
|
||||
instance_type, quotas.reservations,
|
||||
|
@ -4042,7 +4043,7 @@ class ComputeManager(manager.Manager):
|
|||
def pause_instance(self, context, instance):
|
||||
"""Pause an instance on this host."""
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Pausing'), context=context, instance=instance)
|
||||
LOG.info(_LI('Pausing'), context=context, instance=instance)
|
||||
self._notify_about_instance_usage(context, instance, 'pause.start')
|
||||
self.driver.pause(instance)
|
||||
instance.power_state = self._get_power_state(context, instance)
|
||||
|
@ -4058,7 +4059,7 @@ class ComputeManager(manager.Manager):
|
|||
def unpause_instance(self, context, instance):
|
||||
"""Unpause a paused instance on this host."""
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Unpausing'), context=context, instance=instance)
|
||||
LOG.info(_LI('Unpausing'), context=context, instance=instance)
|
||||
self._notify_about_instance_usage(context, instance, 'unpause.start')
|
||||
self.driver.unpause(instance)
|
||||
instance.power_state = self._get_power_state(context, instance)
|
||||
|
@ -4096,7 +4097,7 @@ class ComputeManager(manager.Manager):
|
|||
"""Retrieve diagnostics for an instance on this host."""
|
||||
current_power_state = self._get_power_state(context, instance)
|
||||
if current_power_state == power_state.RUNNING:
|
||||
LOG.audit(_("Retrieving diagnostics"), context=context,
|
||||
LOG.info(_LI("Retrieving diagnostics"), context=context,
|
||||
instance=instance)
|
||||
return self.driver.get_diagnostics(instance)
|
||||
else:
|
||||
|
@ -4113,7 +4114,7 @@ class ComputeManager(manager.Manager):
|
|||
"""Retrieve diagnostics for an instance on this host."""
|
||||
current_power_state = self._get_power_state(context, instance)
|
||||
if current_power_state == power_state.RUNNING:
|
||||
LOG.audit(_("Retrieving diagnostics"), context=context,
|
||||
LOG.info(_LI("Retrieving diagnostics"), context=context,
|
||||
instance=instance)
|
||||
diags = self.driver.get_instance_diagnostics(instance)
|
||||
return diags.serialize()
|
||||
|
@ -4152,7 +4153,7 @@ class ComputeManager(manager.Manager):
|
|||
def resume_instance(self, context, instance):
|
||||
"""Resume the given suspended instance."""
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Resuming'), context=context, instance=instance)
|
||||
LOG.info(_LI('Resuming'), context=context, instance=instance)
|
||||
|
||||
self._notify_about_instance_usage(context, instance, 'resume.start')
|
||||
network_info = self._get_instance_nw_info(context, instance)
|
||||
|
@ -4388,7 +4389,7 @@ class ComputeManager(manager.Manager):
|
|||
def get_console_output(self, context, instance, tail_length):
|
||||
"""Send the console output for the given instance."""
|
||||
context = context.elevated()
|
||||
LOG.audit(_("Get console output"), context=context,
|
||||
LOG.info(_LI("Get console output"), context=context,
|
||||
instance=instance)
|
||||
output = self.driver.get_console_output(context, instance)
|
||||
|
||||
|
@ -4632,7 +4633,7 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
def _attach_volume(self, context, instance, bdm):
|
||||
context = context.elevated()
|
||||
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
|
||||
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
|
||||
{'volume_id': bdm.volume_id,
|
||||
'mountpoint': bdm['mount_device']},
|
||||
context=context, instance=instance)
|
||||
|
@ -4657,7 +4658,7 @@ class ComputeManager(manager.Manager):
|
|||
mp = bdm.device_name
|
||||
volume_id = bdm.volume_id
|
||||
|
||||
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
|
||||
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
|
||||
{'volume_id': volume_id, 'mp': mp},
|
||||
context=context, instance=instance)
|
||||
|
||||
|
@ -6068,7 +6069,7 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
for cn in compute_nodes_in_db:
|
||||
if cn.hypervisor_hostname not in nodenames:
|
||||
LOG.audit(_("Deleting orphan compute node %s") % cn.id)
|
||||
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
|
||||
cn.destroy()
|
||||
|
||||
self._resource_tracker_dict = new_resource_tracker_dict
|
||||
|
|
|
@ -311,12 +311,12 @@ class ResourceTracker(object):
|
|||
declared a need for resources, but not necessarily retrieved them from
|
||||
the hypervisor layer yet.
|
||||
"""
|
||||
LOG.audit(_("Auditing locally available compute resources"))
|
||||
LOG.info(_LI("Auditing locally available compute resources"))
|
||||
resources = self.driver.get_available_resource(self.nodename)
|
||||
|
||||
if not resources:
|
||||
# The virt driver does not support this function
|
||||
LOG.audit(_("Virt driver does not support "
|
||||
LOG.info(_LI("Virt driver does not support "
|
||||
"'get_available_resource' Compute tracking is disabled."))
|
||||
self.compute_node = None
|
||||
return
|
||||
|
@ -510,25 +510,25 @@ class ResourceTracker(object):
|
|||
including instance calculations and in-progress resource claims. These
|
||||
values will be exposed via the compute node table to the scheduler.
|
||||
"""
|
||||
LOG.audit(_("Total physical ram (MB): %(pram)s, "
|
||||
LOG.info(_LI("Total physical ram (MB): %(pram)s, "
|
||||
"total allocated virtual ram (MB): %(vram)s"),
|
||||
{'pram': resources['memory_mb'],
|
||||
'vram': resources['memory_mb_used']})
|
||||
LOG.audit(_("Total physical disk (GB): %(pdisk)s, "
|
||||
LOG.info(_LI("Total physical disk (GB): %(pdisk)s, "
|
||||
"total allocated virtual disk (GB): %(vdisk)s"),
|
||||
{'pdisk': resources['local_gb'],
|
||||
'vdisk': resources['local_gb_used']})
|
||||
|
||||
vcpus = resources['vcpus']
|
||||
if vcpus:
|
||||
LOG.audit(_("Total usable vcpus: %(tcpu)s, "
|
||||
LOG.info(_LI("Total usable vcpus: %(tcpu)s, "
|
||||
"total allocated vcpus: %(ucpu)s"),
|
||||
{'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
|
||||
else:
|
||||
LOG.audit(_("Free VCPU information unavailable"))
|
||||
LOG.info(_LI("Free VCPU information unavailable"))
|
||||
|
||||
if 'pci_stats' in resources:
|
||||
LOG.audit(_("PCI stats: %s"), resources['pci_stats'])
|
||||
LOG.info(_LI("PCI stats: %s"), resources['pci_stats'])
|
||||
|
||||
def _resource_change(self, resources):
|
||||
"""Check to see if any resouces have changed."""
|
||||
|
@ -594,7 +594,7 @@ class ResourceTracker(object):
|
|||
represent an incoming or outbound migration.
|
||||
"""
|
||||
uuid = migration['instance_uuid']
|
||||
LOG.audit(_("Updating from migration %s") % uuid)
|
||||
LOG.info(_LI("Updating from migration %s") % uuid)
|
||||
|
||||
incoming = (migration['dest_compute'] == self.host and
|
||||
migration['dest_node'] == self.nodename)
|
||||
|
|
|
@ -24,7 +24,7 @@ from oslo_serialization import jsonutils
|
|||
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _LI, _LW
|
||||
from nova import manager
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -96,7 +96,7 @@ class ConsoleAuthManager(manager.Manager):
|
|||
"into memcached"),
|
||||
{'instance_uuid': instance_uuid})
|
||||
|
||||
LOG.audit(_("Received Token: %(token)s, %(token_dict)s"),
|
||||
LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"),
|
||||
{'token': token, 'token_dict': token_dict})
|
||||
|
||||
def _validate_token(self, context, token):
|
||||
|
@ -121,7 +121,7 @@ class ConsoleAuthManager(manager.Manager):
|
|||
def check_token(self, context, token):
|
||||
token_str = self.mc.get(token.encode('UTF-8'))
|
||||
token_valid = (token_str is not None)
|
||||
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s"),
|
||||
LOG.info(_LI("Checking Token: %(token)s, %(token_valid)s"),
|
||||
{'token': token, 'token_valid': token_valid})
|
||||
if token_valid:
|
||||
token = jsonutils.loads(token_str)
|
||||
|
|
|
@ -31,7 +31,7 @@ from nova import conductor
|
|||
from nova import context
|
||||
from nova import debugger
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.objects import base as objects_base
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import service
|
||||
|
@ -158,7 +158,7 @@ class Service(service.Service):
|
|||
|
||||
def start(self):
|
||||
verstr = version.version_string_with_package()
|
||||
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
|
||||
LOG.info(_LI('Starting %(topic)s node (version %(version)s)'),
|
||||
{'topic': self.topic, 'version': verstr})
|
||||
self.basic_config_check()
|
||||
self.manager.init_host()
|
||||
|
|
|
@ -252,7 +252,7 @@ class HackingTestCase(test.NoDBTestCase):
|
|||
'exception']
|
||||
levels = ['_LI', '_LW', '_LE', '_LC']
|
||||
debug = "LOG.debug('OK')"
|
||||
audit = "LOG.audit(_('OK'))"
|
||||
audit = "LOG.info(_LI('OK'))"
|
||||
self.assertEqual(
|
||||
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
|
||||
self.assertEqual(
|
||||
|
|
|
@ -19,7 +19,6 @@ from oslo_serialization import jsonutils
|
|||
from oslo_utils import excutils
|
||||
|
||||
from nova import block_device
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
|
@ -392,7 +391,7 @@ def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
|
|||
def _log_and_attach(bdm):
|
||||
context = attach_args[0]
|
||||
instance = attach_args[1]
|
||||
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
|
||||
LOG.info(_LI('Booting with volume %(volume_id)s at %(mountpoint)s'),
|
||||
{'volume_id': bdm.volume_id,
|
||||
'mountpoint': bdm['mount_device']},
|
||||
context=context, instance=instance)
|
||||
|
|
|
@ -27,7 +27,7 @@ import webob
|
|||
|
||||
from nova.consoleauth import rpcapi as consoleauth_rpcapi
|
||||
from nova import context
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import version
|
||||
from nova import wsgi
|
||||
|
@ -91,12 +91,13 @@ class XCPVNCProxy(object):
|
|||
data += b
|
||||
if data.find("\r\n\r\n") != -1:
|
||||
if not data.split("\r\n")[0].find("200"):
|
||||
LOG.audit(_("Error in handshake format: %s"), data)
|
||||
LOG.info(_LI("Error in handshake format: %s"),
|
||||
data)
|
||||
return
|
||||
break
|
||||
|
||||
if not b or len(data) > 4096:
|
||||
LOG.audit(_("Error in handshake: %s"), data)
|
||||
LOG.info(_LI("Error in handshake: %s"), data)
|
||||
return
|
||||
|
||||
client = req.environ['eventlet.input'].get_socket()
|
||||
|
@ -111,7 +112,7 @@ class XCPVNCProxy(object):
|
|||
t0.wait()
|
||||
|
||||
if not sockets.get('client') or not sockets.get('server'):
|
||||
LOG.audit(_("Invalid request: %s"), req)
|
||||
LOG.info(_LI("Invalid request: %s"), req)
|
||||
start_response('400 Invalid Request',
|
||||
[('content-type', 'text/html')])
|
||||
return "Invalid Request"
|
||||
|
@ -131,10 +132,10 @@ class XCPVNCProxy(object):
|
|||
def __call__(self, environ, start_response):
|
||||
try:
|
||||
req = webob.Request(environ)
|
||||
LOG.audit(_("Request: %s"), req)
|
||||
LOG.info(_LI("Request: %s"), req)
|
||||
token = req.params.get('token')
|
||||
if not token:
|
||||
LOG.audit(_("Request made with missing token: %s"), req)
|
||||
LOG.info(_LI("Request made with missing token: %s"), req)
|
||||
start_response('400 Invalid Request',
|
||||
[('content-type', 'text/html')])
|
||||
return "Invalid Request"
|
||||
|
@ -144,14 +145,14 @@ class XCPVNCProxy(object):
|
|||
connect_info = api.check_token(ctxt, token)
|
||||
|
||||
if not connect_info:
|
||||
LOG.audit(_("Request made with invalid token: %s"), req)
|
||||
LOG.info(_LI("Request made with invalid token: %s"), req)
|
||||
start_response('401 Not Authorized',
|
||||
[('content-type', 'text/html')])
|
||||
return "Not Authorized"
|
||||
|
||||
return self.proxy_connection(req, connect_info, start_response)
|
||||
except Exception as e:
|
||||
LOG.audit(_("Unexpected error: %s"), e)
|
||||
LOG.info(_LI("Unexpected error: %s"), e)
|
||||
|
||||
|
||||
class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
|
||||
|
@ -171,7 +172,7 @@ class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
|
|||
|
||||
|
||||
def get_wsgi_server():
|
||||
LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"),
|
||||
LOG.info(_LI("Starting nova-xvpvncproxy node (version %s)"),
|
||||
version.version_string_with_package())
|
||||
|
||||
return wsgi.Server("XCP VNC Proxy",
|
||||
|
|
Loading…
Reference in New Issue