Adapt workload_balance strategy to multiple datasource backend

This patch set:
1. Removes nova, ceilometer and gnocchi properties.
2. Adds using of datasource_backend properties along with
   statistic_aggregation method.
3. Changes type of datasource config.

Change-Id: I09d2dce00378f0ee5381d7c85006752aea6975d2
Partially-Implements: blueprint watcher-multi-datasource
This commit is contained in:
Alexander Chadin 2018-01-11 13:27:56 +03:00
parent 5dd6817d47
commit 891f6bc241
4 changed files with 20 additions and 95 deletions

View File

@ -47,15 +47,12 @@ hosts nodes.
"""
from __future__ import division
import datetime
from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil
from watcher.datasource import gnocchi as gnoc
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@ -111,28 +108,6 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
# the migration plan will be triggered when the CPU or RAM
# utilization % reaches threshold
self._meter = None
self._ceilometer = None
self._gnocchi = None
@property
def ceilometer(self):
if self._ceilometer is None:
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
return self._ceilometer
@ceilometer.setter
def ceilometer(self, c):
self._ceilometer = c
@property
def gnocchi(self):
if self._gnocchi is None:
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
return self._gnocchi
@gnocchi.setter
def gnocchi(self, gnocchi):
self._gnocchi = gnocchi
@classmethod
def get_name(cls):
@ -184,11 +159,14 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
@classmethod
def get_config_opts(cls):
return [
cfg.StrOpt(
"datasource",
help="Data source to use in order to query the needed metrics",
default="gnocchi",
choices=["ceilometer", "gnocchi"])
cfg.ListOpt(
"datasources",
help="Datasources to use in order to query the needed metrics."
" If one of strategy metric isn't available in the first"
" datasource, the next datasource will be chosen.",
item_type=cfg.types.String(choices=['gnocchi', 'ceilometer',
'monasca']),
default=['gnocchi', 'ceilometer', 'monasca'])
]
def get_available_compute_nodes(self):
@ -307,43 +285,28 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
instances = self.compute_model.get_node_instances(node)
node_workload = 0.0
for instance in instances:
instance_util = None
util = None
try:
if self.config.datasource == "ceilometer":
instance_util = self.ceilometer.statistic_aggregation(
resource_id=instance.uuid,
meter_name=self._meter,
period=self._period,
aggregate='avg')
elif self.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int(self._period))
instance_util = self.gnocchi.statistic_aggregation(
resource_id=instance.uuid,
metric=self._meter,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
util = self.datasource_backend.statistic_aggregation(
instance.uuid, self._meter, self._period, 'mean',
granularity=self.granularity)
except Exception as exc:
LOG.exception(exc)
LOG.error("Can not get %s from %s", self._meter,
self.config.datasource)
continue
if instance_util is None:
if util is None:
LOG.debug("Instance (%s): %s is None",
instance.uuid, self._meter)
continue
if self._meter == self.CPU_METER_NAME:
workload_cache[instance.uuid] = (instance_util *
workload_cache[instance.uuid] = (util *
instance.vcpus / 100)
else:
workload_cache[instance.uuid] = instance_util
workload_cache[instance.uuid] = util
node_workload += workload_cache[instance.uuid]
LOG.debug("VM (%s): %s %f", instance.uuid, self._meter,
instance_util)
util)
cluster_workload += node_workload
if self._meter == self.CPU_METER_NAME:

View File

@ -56,7 +56,7 @@ class FakeCeilometerMetrics(object):
return result
def mock_get_statistics_wb(self, resource_id, meter_name, period,
aggregate='avg'):
aggregate, granularity=300):
result = 0.0
if meter_name == "cpu_util":
result = self.get_average_usage_instance_cpu_wb(resource_id)

View File

@ -87,8 +87,8 @@ class FakeGnocchiMetrics(object):
mock[uuid] = 25 * oslo_utils.units.Ki
return mock[str(uuid)]
def mock_get_statistics_wb(self, resource_id, metric, granularity,
start_time, stop_time, aggregation='mean'):
def mock_get_statistics_wb(self, resource_id, metric, period, aggregate,
granularity=300):
result = 0.0
if metric == "cpu_util":
result = self.get_average_usage_instance_cpu_wb(resource_id)

View File

@ -17,7 +17,6 @@
# limitations under the License.
#
import collections
import datetime
import mock
from watcher.applier.loading import default
@ -56,7 +55,7 @@ class TestWorkloadBalance(base.TestCase):
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.WorkloadBalance, self.datasource,
strategies.WorkloadBalance, "datasource_backend",
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
@ -190,40 +189,3 @@ class TestWorkloadBalance(base.TestCase):
loaded_action = loader.load(action['action_type'])
loaded_action.input_parameters = action['input_parameters']
loaded_action.validate_parameters()
def test_periods(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
p_ceilometer = mock.patch.object(
strategies.WorkloadBalance, "ceilometer")
m_ceilometer = p_ceilometer.start()
self.addCleanup(p_ceilometer.stop)
p_gnocchi = mock.patch.object(strategies.WorkloadBalance, "gnocchi")
m_gnocchi = p_gnocchi.start()
self.addCleanup(p_gnocchi.stop)
datetime_patcher = mock.patch.object(
datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2017, 3, 19, 18, 53, 11, 657417)
self.addCleanup(datetime_patcher.stop)
m_ceilometer.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics_wb)
m_gnocchi.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics_wb)
instance0 = model.get_instance_by_uuid("INSTANCE_0")
self.strategy.group_hosts_by_cpu_or_ram_util()
if self.strategy.config.datasource == "ceilometer":
m_ceilometer.statistic_aggregation.assert_any_call(
aggregate='avg', meter_name='cpu_util',
period=300, resource_id=instance0.uuid)
elif self.strategy.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int('300'))
m_gnocchi.statistic_aggregation.assert_called_with(
resource_id=mock.ANY, metric='cpu_util',
granularity=300, start_time=start_time, stop_time=stop_time,
aggregation='mean')