From 525191ab0f6e6a472d18505820400cf56e2e3dd1 Mon Sep 17 00:00:00 2001 From: "Erlon R. Cruz" Date: Mon, 12 Feb 2018 15:41:17 -0200 Subject: [PATCH] Improve logs on scheduler Scheduler decisions still not easy to track due lacking of information about backends and pools. This adds more verbose information on that so we can have a better means to help debug problems. TrivialFix Change-Id: I4a59ed298ba2542dcb4d6787b47085749a34244e (cherry picked from commit 9c4cd4a2d75d36145f68e08d3aa28f9a45a68f48) --- cinder/scheduler/filters/capacity_filter.py | 16 +++++++++----- cinder/scheduler/host_manager.py | 24 +++++++++++++++++++-- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py index 20e4b6941eb..1661d16f448 100644 --- a/cinder/scheduler/filters/capacity_filter.py +++ b/cinder/scheduler/filters/capacity_filter.py @@ -118,6 +118,10 @@ class CapacityFilter(filters.BaseBackendFilter): if provision_type == 'thick': thin = False + msg_args = {"grouping_name": backend_state.backend_id, + "grouping": grouping, + "requested": requested_size, + "available": free} # Only evaluate using max_over_subscription_ratio if # thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity has exceeded over @@ -126,6 +130,8 @@ class CapacityFilter(filters.BaseBackendFilter): backend_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((backend_state.provisioned_capacity_gb + requested_size) / total) + LOG.debug("Checking provisioning for request of %s GB. " + "Backend: %s", requested_size, backend_state) if provisioned_ratio > backend_state.max_over_subscription_ratio: msg_args = { "provisioned_ratio": provisioned_ratio, @@ -159,6 +165,11 @@ class CapacityFilter(filters.BaseBackendFilter): "(%(available)sGB) to accommodate thin " "provisioned %(size)sGB volume on %(grouping)s" " %(grouping_name)s.", msg_args) + else: + LOG.debug("Space information for volume creation " + "on %(grouping)s %(grouping_name)s " + "(requested / avail): " + "%(requested)s/%(available)s", msg_args) return res elif thin and backend_state.thin_provisioning_support: LOG.warning("Filtering out %(grouping)s %(grouping_name)s " @@ -171,11 +182,6 @@ class CapacityFilter(filters.BaseBackendFilter): "grouping_name": backend_state.backend_id}) return False - msg_args = {"grouping_name": backend_state.backend_id, - "grouping": grouping, - "requested": requested_size, - "available": free} - if free < requested_size: LOG.warning("Insufficient free space for volume creation " "on %(grouping)s %(grouping_name)s (requested / " diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index fc219f4167b..8c6688e2692 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -313,6 +313,7 @@ class BackendState(object): self.free_capacity_gb -= volume_gb if update_time: self.updated = timeutils.utcnow() + LOG.debug("Consumed %s GB from backend: %s", volume['size'], self) def __repr__(self): # FIXME(zhiteng) backend level free_capacity_gb isn't as @@ -320,8 +321,27 @@ class BackendState(object): # come up with better representation of HostState. grouping = 'cluster' if self.cluster_name else 'host' grouping_name = self.backend_id - return ("%s '%s': free_capacity_gb: %s, pools: %s" % - (grouping, grouping_name, self.free_capacity_gb, self.pools)) + return ("%(grouping)s '%(grouping_name)s':" + "free_capacity_gb: %(free_capacity_gb)s, " + "total_capacity_gb: %(total_capacity_gb)s," + "allocated_capacity_gb: %(allocated_capacity_gb)s, " + "max_over_subscription_ratio: %(mosr)s," + "reserved_percentage: %(reserved_percentage)s, " + "provisioned_capacity_gb: %(provisioned_capacity_gb)s," + "thin_provisioning_support: %(thin_provisioning_support)s, " + "thick_provisioning_support: %(thick)s," + "pools: %(pools)s," + "updated at: %(updated)s" % + {'grouping': grouping, 'grouping_name': grouping_name, + 'free_capacity_gb': self.free_capacity_gb, + 'total_capacity_gb': self.total_capacity_gb, + 'allocated_capacity_gb': self.allocated_capacity_gb, + 'mosr': self.max_over_subscription_ratio, + 'reserved_percentage': self.reserved_percentage, + 'provisioned_capacity_gb': self.provisioned_capacity_gb, + 'thin_provisioning_support': self.thin_provisioning_support, + 'thick': self.thick_provisioning_support, + 'pools': self.pools, 'updated': self.updated}) class PoolState(BackendState):