node resource consolidation

This strategy is used to centralize VMs to as few nodes as possible
by VM migration. User can set a input parameter to decide how to
select the destination node.

Implements: blueprint node-resource-consolidation
Closes-Bug: #1843016
Change-Id: I104c864d532c2092f5dc6f0c8f756ebeae12f09e
This commit is contained in:
licanwei 2019-08-30 02:16:38 -07:00
parent 845a9187e3
commit f1fe4b6c62
7 changed files with 667 additions and 1 deletions

View File

@ -0,0 +1,7 @@
---
features:
- |
Added strategy "node resource consolidation". This
strategy is used to centralize VMs to as few nodes
as possible by VM migration. User can set an input
parameter to decide how to select the destination node.

View File

@ -87,6 +87,7 @@ watcher_strategies =
storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance
zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration
host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance
node_resource_consolidation = watcher.decision_engine.strategy.strategies.node_resource_consolidation:NodeResourceConsolidation
watcher_actions =
migrate = watcher.applier.actions.migration:Migrate

View File

@ -20,6 +20,8 @@ from watcher.decision_engine.strategy.strategies import basic_consolidation
from watcher.decision_engine.strategy.strategies import dummy_strategy
from watcher.decision_engine.strategy.strategies import dummy_with_scorer
from watcher.decision_engine.strategy.strategies import host_maintenance
from watcher.decision_engine.strategy.strategies import \
node_resource_consolidation
from watcher.decision_engine.strategy.strategies import noisy_neighbor
from watcher.decision_engine.strategy.strategies import outlet_temp_control
from watcher.decision_engine.strategy.strategies import saving_energy
@ -45,6 +47,8 @@ VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation
WorkloadBalance = workload_balance.WorkloadBalance
WorkloadStabilization = workload_stabilization.WorkloadStabilization
UniformAirflow = uniform_airflow.UniformAirflow
NodeResourceConsolidation = (
node_resource_consolidation.NodeResourceConsolidation)
NoisyNeighbor = noisy_neighbor.NoisyNeighbor
ZoneMigration = zone_migration.ZoneMigration
HostMaintenance = host_maintenance.HostMaintenance
@ -54,4 +58,4 @@ __all__ = ("Actuator", "BaseStrategy", "BasicConsolidation",
"VMWorkloadConsolidation", "WorkloadBalance",
"WorkloadStabilization", "UniformAirflow", "NoisyNeighbor",
"SavingEnergy", "StorageCapacityBalance", "ZoneMigration",
"HostMaintenance")
"HostMaintenance", "NodeResourceConsolidation")

View File

@ -0,0 +1,290 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 ZTE Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
from watcher import objects
LOG = log.getLogger(__name__)
class NodeResourceConsolidation(base.ServerConsolidationBaseStrategy):
"""consolidating resources on nodes using server migration
*Description*
This strategy checks the resource usages of compute nodes, if the used
resources are less than total, it will try to migrate server to
consolidate the use of resource.
*Requirements*
* You must have at least 2 compute nodes to run
this strategy.
* Hardware: compute nodes should use the same physical CPUs/RAMs
*Limitations*
* This is a proof of concept that is not meant to be used in production
* It assume that live migrations are possible
*Spec URL*
http://specs.openstack.org/openstack/watcher-specs/specs/train/implemented/node-resource-consolidation.html
"""
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
REASON_FOR_DISABLE = 'Watcher node resource consolidation strategy'
def __init__(self, config, osc=None):
"""node resource consolidation
:param config: A mapping containing the configuration of this strategy
:type config: :py:class:`~.Struct` instance
:param osc: :py:class:`~.OpenStackClients` instance
"""
super(NodeResourceConsolidation, self).__init__(config, osc)
self.host_choice = 'auto'
self.audit = None
self.compute_nodes_count = 0
self.number_of_released_nodes = 0
self.number_of_migrations = 0
@classmethod
def get_name(cls):
return "node_resource_consolidation"
@classmethod
def get_display_name(cls):
return _("Node Resource Consolidation strategy")
@classmethod
def get_translatable_display_name(cls):
return "Node Resource Consolidation strategy"
@classmethod
def get_schema(cls):
# Mandatory default setting for each element
return {
"properties": {
"host_choice": {
"description": "the way to select the server migration "
"destination node, The value auto "
"means that Nova schedular selects "
"the destination node, and specify "
"means the strategy specifies the "
"destination.",
"type": "string",
"default": 'auto'
},
},
}
def check_resources(self, servers, destination):
# check whether a node able to accommodate a VM
dest_flag = False
if not destination:
return dest_flag
free_res = self.compute_model.get_node_free_resources(destination)
for server in servers:
# just vcpu and memory, do not consider disk
if free_res['vcpu'] >= server.vcpus and (
free_res['memory'] >= server.memory):
free_res['vcpu'] -= server.vcpus
free_res['memory'] -= server.memory
dest_flag = True
servers.remove(server)
return dest_flag
def select_destination(self, server, source, destinations):
dest_node = None
if not destinations:
return dest_node
sorted_nodes = sorted(
destinations,
key=lambda x: self.compute_model.get_node_free_resources(
x)['vcpu'])
for dest in sorted_nodes:
if self.check_resources([server], dest):
if self.compute_model.migrate_instance(server, source, dest):
dest_node = dest
break
return dest_node
def add_migrate_actions(self, sources, destinations):
if not sources or not destinations:
return
for node in sources:
servers = self.compute_model.get_node_instances(node)
sorted_servers = sorted(
servers,
key=lambda x: x.vcpus,
reverse=True)
for server in sorted_servers:
parameters = {'migration_type': 'live',
'source_node': node.hostname,
'resource_name': server.name}
action_flag = False
if self.host_choice != 'auto':
# specify destination host
dest = self.select_destination(server, node, destinations)
if dest:
parameters['destination_node'] = dest.hostname
action_flag = True
else:
action_flag = True
if action_flag:
self.number_of_migrations += 1
self.solution.add_action(
action_type=self.MIGRATION,
resource_id=server.uuid,
input_parameters=parameters)
def add_change_node_state_actions(self, nodes, status):
if status not in (element.ServiceState.DISABLED.value,
element.ServiceState.ENABLED.value):
raise exception.IllegalArgumentException(
message=_("The node status is not defined"))
changed_nodes = []
for node in nodes:
if node.status != status:
parameters = {'state': status,
'resource_name': node.hostname}
if status == element.ServiceState.DISABLED.value:
parameters['disabled_reason'] = self.REASON_FOR_DISABLE
self.solution.add_action(
action_type=self.CHANGE_NOVA_SERVICE_STATE,
resource_id=node.uuid,
input_parameters=parameters)
node.status = status
changed_nodes.append(node)
return changed_nodes
def get_nodes_migrate_failed(self):
# check if migration action ever failed
# just for continuous audit
nodes_failed = []
if self.audit is None or (
self.audit.audit_type ==
objects.audit.AuditType.ONESHOT.value):
return nodes_failed
filters = {'audit_uuid': self.audit.uuid}
actions = objects.action.Action.list(
self.ctx,
filters=filters)
for action in actions:
if action.state == objects.action.State.FAILED and (
action.action_type == self.MIGRATION):
server_uuid = action.input_parameters.get('resource_id')
node = self.compute_model.get_node_by_instance_uuid(
server_uuid)
if node not in nodes_failed:
nodes_failed.append(node)
return nodes_failed
def group_nodes(self, nodes):
free_nodes = []
source_nodes = []
dest_nodes = []
nodes_failed = self.get_nodes_migrate_failed()
LOG.info("nodes: %s migration failed", nodes_failed)
sorted_nodes = sorted(
nodes,
key=lambda x: self.compute_model.get_node_used_resources(
x)['vcpu'])
for node in sorted_nodes:
if node in dest_nodes:
break
# If ever migration failed, do not migrate again
if node in nodes_failed:
# maybe can as the destination node
if node.status == element.ServiceState.ENABLED.value:
dest_nodes.append(node)
continue
used_resource = self.compute_model.get_node_used_resources(node)
if used_resource['vcpu'] > 0:
servers = self.compute_model.get_node_instances(node)
for dest in reversed(sorted_nodes):
# skip if compute node is disabled
if dest.status == element.ServiceState.DISABLED.value:
LOG.info("node %s is down", dest.hostname)
continue
if dest in dest_nodes:
continue
if node == dest:
# The last on as destination node
dest_nodes.append(dest)
break
if self.check_resources(servers, dest):
dest_nodes.append(dest)
if node not in source_nodes:
source_nodes.append(node)
if not servers:
break
else:
free_nodes.append(node)
return free_nodes, source_nodes, dest_nodes
def pre_execute(self):
self._pre_execute()
self.host_choice = self.input_parameters.host_choice
def do_execute(self, audit=None):
"""Strategy execution phase
Executing strategy and creating solution.
"""
self.audit = audit
nodes = list(self.compute_model.get_all_compute_nodes().values())
free_nodes, source_nodes, dest_nodes = self.group_nodes(nodes)
self.compute_nodes_count = len(nodes)
self.number_of_released_nodes = len(source_nodes)
LOG.info("Free nodes: %s", free_nodes)
LOG.info("Source nodes: %s", source_nodes)
LOG.info("Destination nodes: %s", dest_nodes)
if not source_nodes:
LOG.info("No compute node needs to be consolidated")
return
nodes_disabled = []
if self.host_choice == 'auto':
# disable compute node to avoid to be select by Nova scheduler
nodes_disabled = self.add_change_node_state_actions(
free_nodes+source_nodes, element.ServiceState.DISABLED.value)
self.add_migrate_actions(source_nodes, dest_nodes)
if nodes_disabled:
# restore disabled compute node after migration
self.add_change_node_state_actions(
nodes_disabled, element.ServiceState.ENABLED.value)
def post_execute(self):
"""Post-execution phase
"""
self.solution.set_efficacy_indicators(
compute_nodes_count=self.compute_nodes_count,
released_compute_nodes_count=self.number_of_released_nodes,
instance_migrations_count=self.number_of_migrations,
)

View File

@ -0,0 +1,27 @@
<ModelRoot>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c97f" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_0" uuid="6ae05517-a512-462d-9d83-90c313b5a8ff" vcpus="10" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
<Instance watcher_exclude="False" state="active" name="INSTANCE_1" uuid="6ae05517-a512-462d-9d83-90c313b5a8f1" vcpus="15" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="26F03131-32CB-4697-9D61-9123F87A8147"/>
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c971" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_2" uuid="6ae05517-a512-462d-9d83-90c313b5a8f2" vcpus="14" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="109F7909-0607-4712-B32C-5CC6D49D2F15"/>
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c972" status="enabled" state="up" id="2" hostname="hostname_2" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_3" uuid="6ae05517-a512-462d-9d83-90c313b5a8f3" vcpus="10" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
<Instance watcher_exclude="False" state="active" name="INSTANCE_4" uuid="6ae05517-a512-462d-9d83-90c313b5a8f4" vcpus="10" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
<Instance watcher_exclude="False" state="active" name="INSTANCE_5" uuid="6ae05517-a512-462d-9d83-90c313b5a8f5" vcpus="10" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c973" status="enabled" state="up" id="3" hostname="hostname_3" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_6" uuid="6ae05517-a512-462d-9d83-90c313b5a8f6" vcpus="8" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c974" status="enabled" state="up" id="4" hostname="hostname_4" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_7" uuid="6ae05517-a512-462d-9d83-90c313b5a8f7" vcpus="10" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c975" status="enabled" state="up" id="5" hostname="hostname_5" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c976" status="disabled" state="up" id="6" hostname="hostname_6" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
</ComputeNode>
<ComputeNode uuid="89dce55c-8e74-4402-b23f-32aaf216c977" status="disabled" state="up" id="4" hostname="hostname_7" vcpus="40" vcpu_reserved="0" vcpu_ratio="1" disk="250" disk_gb_reserved="0" disk_ratio="1" disk_capacity="250" memory="132" memory_mb_reserved="0" memory_ratio="1">
<Instance watcher_exclude="False" state="active" name="INSTANCE_8" uuid="6ae05517-a512-462d-9d83-90c313b5a8f8" vcpus="12" disk="20" disk_capacity="20" memory="20" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
</ComputeNode>
</ModelRoot>

View File

@ -171,6 +171,9 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.load_model(
'scenario_9_with_3_active_plus_1_disabled_nodes.xml')
def generate_scenario_10(self):
return self.load_model('scenario_10.xml')
class FakerStorageModelCollector(base.BaseClusterDataModelCollector):

View File

@ -0,0 +1,334 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 ZTE Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy import strategies
from watcher import objects
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
from watcher.tests.objects import utils as obj_utils
class TestNodeResourceConsolidation(TestBaseStrategy):
def setUp(self):
super(TestNodeResourceConsolidation, self).setUp()
self.strategy = strategies.NodeResourceConsolidation(
config=mock.Mock())
self.model = self.fake_c_cluster.generate_scenario_10()
self.m_c_model.return_value = self.model
def test_check_resources(self):
instance = [self.model.get_instance_by_uuid(
"6ae05517-a512-462d-9d83-90c313b5a8ff")]
dest = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c972")
# test destination is null
result = self.strategy.check_resources(instance, [])
self.assertFalse(result)
result = self.strategy.check_resources(instance, dest)
self.assertTrue(result)
self.assertEqual([], instance)
def test_select_destination(self):
instance0 = self.model.get_instance_by_uuid(
"6ae05517-a512-462d-9d83-90c313b5a8ff")
source = self.model.get_node_by_instance_uuid(
"6ae05517-a512-462d-9d83-90c313b5a8ff")
expected = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c972")
# test destination is null
result = self.strategy.select_destination(instance0, source, [])
self.assertIsNone(result)
nodes = list(self.model.get_all_compute_nodes().values())
nodes.remove(source)
result = self.strategy.select_destination(instance0, source, nodes)
self.assertEqual(expected, result)
def test_add_migrate_actions_with_null(self):
self.strategy.add_migrate_actions([], [])
self.assertEqual([], self.strategy.solution.actions)
self.strategy.add_migrate_actions(None, None)
self.assertEqual([], self.strategy.solution.actions)
def test_add_migrate_actions_with_auto(self):
self.strategy.host_choice = 'auto'
source = self.model.get_node_by_instance_uuid(
"6ae05517-a512-462d-9d83-90c313b5a8ff")
nodes = list(self.model.get_all_compute_nodes().values())
nodes.remove(source)
self.strategy.add_migrate_actions([source], nodes)
expected = [{'action_type': 'migrate',
'input_parameters': {
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1',
'resource_name': 'INSTANCE_1',
'source_node': 'hostname_0'}},
{'action_type': 'migrate',
'input_parameters': {
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff',
'resource_name': 'INSTANCE_0',
'source_node': 'hostname_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_migrate_actions_with_specify(self):
self.strategy.host_choice = 'specify'
source = self.model.get_node_by_instance_uuid(
"6ae05517-a512-462d-9d83-90c313b5a8ff")
nodes = list(self.model.get_all_compute_nodes().values())
nodes.remove(source)
self.strategy.add_migrate_actions([source], nodes)
expected = [{'action_type': 'migrate',
'input_parameters': {
'destination_node': 'hostname_1',
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1',
'resource_name': 'INSTANCE_1',
'source_node': 'hostname_0'}},
{'action_type': 'migrate',
'input_parameters': {
'destination_node': 'hostname_2',
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff',
'resource_name': 'INSTANCE_0',
'source_node': 'hostname_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_migrate_actions_with_no_action(self):
self.strategy.host_choice = 'specify'
source = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c971")
dest = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c972")
self.strategy.add_migrate_actions([source], [dest])
self.assertEqual([], self.strategy.solution.actions)
def test_add_change_node_state_actions_with_exeception(self):
self.assertRaises(exception.IllegalArgumentException,
self.strategy.add_change_node_state_actions,
[], 'down')
def test_add_change_node_state_actions(self):
node1 = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c972")
node2 = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c97f")
# disable two nodes
status = element.ServiceState.DISABLED.value
result = self.strategy.add_change_node_state_actions(
[node1, node2], status)
self.assertEqual([node1, node2], result)
expected = [{
'action_type': 'change_nova_service_state',
'input_parameters': {
'disabled_reason': 'Watcher node resource '
'consolidation strategy',
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c972',
'resource_name': 'hostname_2',
'state': 'disabled'}},
{
'action_type': 'change_nova_service_state',
'input_parameters': {
'disabled_reason': 'Watcher node resource consolidation '
'strategy',
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f',
'resource_name': 'hostname_0',
'state': 'disabled'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_change_node_state_actions_one_disabled(self):
node1 = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c972")
node2 = self.model.get_node_by_uuid(
"89dce55c-8e74-4402-b23f-32aaf216c97f")
# disable two nodes
status = element.ServiceState.DISABLED.value
# one enable, one disable
node1.status = element.ServiceState.DISABLED.value
result = self.strategy.add_change_node_state_actions(
[node1, node2], status)
self.assertEqual([node2], result)
expected = [{
'action_type': 'change_nova_service_state',
'input_parameters': {
'disabled_reason': 'Watcher node resource consolidation '
'strategy',
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f',
'resource_name': 'hostname_0',
'state': 'disabled'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_get_nodes_migrate_failed_return_null(self):
self.strategy.audit = None
result = self.strategy.get_nodes_migrate_failed()
self.assertEqual([], result)
self.strategy.audit = mock.Mock(
audit_type=objects.audit.AuditType.ONESHOT.value)
result = self.strategy.get_nodes_migrate_failed()
self.assertEqual([], result)
@mock.patch.object(objects.action.Action, 'list')
def test_get_nodes_migrate_failed(self, mock_list):
self.strategy.audit = mock.Mock(
audit_type=objects.audit.AuditType.CONTINUOUS.value)
fake_action = obj_utils.get_test_action(
self.context,
state=objects.action.State.FAILED,
action_type='migrate',
input_parameters={
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1'})
mock_list.return_value = [fake_action]
result = self.strategy.get_nodes_migrate_failed()
expected = self.model.get_node_by_uuid(
'89dce55c-8e74-4402-b23f-32aaf216c97f')
self.assertEqual([expected], result)
def test_group_nodes_with_ONESHOT(self):
self.strategy.audit = mock.Mock(
audit_type=objects.audit.AuditType.ONESHOT.value)
nodes = list(self.model.get_all_compute_nodes().values())
result = self.strategy.group_nodes(nodes)
node0 = self.model.get_node_by_name('hostname_0')
node1 = self.model.get_node_by_name('hostname_1')
node2 = self.model.get_node_by_name('hostname_2')
node3 = self.model.get_node_by_name('hostname_3')
node4 = self.model.get_node_by_name('hostname_4')
node5 = self.model.get_node_by_name('hostname_5')
node6 = self.model.get_node_by_name('hostname_6')
node7 = self.model.get_node_by_name('hostname_7')
source_nodes = [node3, node4, node7]
dest_nodes = [node2, node0, node1]
self.assertIn(node5, result[0])
self.assertIn(node6, result[0])
self.assertEqual(source_nodes, result[1])
self.assertEqual(dest_nodes, result[2])
@mock.patch.object(objects.action.Action, 'list')
def test_group_nodes_with_CONTINUOUS(self, mock_list):
self.strategy.audit = mock.Mock(
audit_type=objects.audit.AuditType.CONTINUOUS.value)
fake_action = obj_utils.get_test_action(
self.context,
state=objects.action.State.FAILED,
action_type='migrate',
input_parameters={
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'})
mock_list.return_value = [fake_action]
nodes = list(self.model.get_all_compute_nodes().values())
result = self.strategy.group_nodes(nodes)
node0 = self.model.get_node_by_name('hostname_0')
node1 = self.model.get_node_by_name('hostname_1')
node2 = self.model.get_node_by_name('hostname_2')
node3 = self.model.get_node_by_name('hostname_3')
node4 = self.model.get_node_by_name('hostname_4')
node5 = self.model.get_node_by_name('hostname_5')
node6 = self.model.get_node_by_name('hostname_6')
node7 = self.model.get_node_by_name('hostname_7')
source_nodes = [node4, node7]
dest_nodes = [node3, node2, node0, node1]
self.assertIn(node5, result[0])
self.assertIn(node6, result[0])
self.assertEqual(source_nodes, result[1])
self.assertEqual(dest_nodes, result[2])
@mock.patch.object(objects.action.Action, 'list')
def test_execute_with_auto(self, mock_list):
fake_action = obj_utils.get_test_action(
self.context,
state=objects.action.State.FAILED,
action_type='migrate',
input_parameters={
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'})
mock_list.return_value = [fake_action]
mock_audit = mock.Mock(
audit_type=objects.audit.AuditType.CONTINUOUS.value)
self.strategy.host_choice = 'auto'
self.strategy.do_execute(mock_audit)
expected = [
{'action_type': 'change_nova_service_state',
'input_parameters': {
'disabled_reason': 'Watcher node resource consolidation '
'strategy',
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975',
'resource_name': 'hostname_5',
'state': 'disabled'}},
{'action_type': 'change_nova_service_state',
'input_parameters': {
'disabled_reason': 'Watcher node resource consolidation '
'strategy',
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974',
'resource_name': 'hostname_4',
'state': 'disabled'}},
{'action_type': 'migrate',
'input_parameters': {
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7',
'resource_name': 'INSTANCE_7',
'source_node': 'hostname_4'}},
{'action_type': 'migrate',
'input_parameters': {
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8',
'resource_name': 'INSTANCE_8',
'source_node': 'hostname_7'}},
{'action_type': 'change_nova_service_state',
'input_parameters': {
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975',
'resource_name': 'hostname_5',
'state': 'enabled'}},
{'action_type': 'change_nova_service_state',
'input_parameters': {
'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974',
'resource_name': 'hostname_4',
'state': 'enabled'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_execute_with_specify(self):
mock_audit = mock.Mock(
audit_type=objects.audit.AuditType.ONESHOT.value)
self.strategy.host_choice = 'specify'
self.strategy.do_execute(mock_audit)
expected = [
{'action_type': 'migrate',
'input_parameters': {
'destination_node': 'hostname_2',
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6',
'resource_name': 'INSTANCE_6',
'source_node': 'hostname_3'}},
{'action_type': 'migrate',
'input_parameters': {
'destination_node': 'hostname_0',
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7',
'resource_name': 'INSTANCE_7',
'source_node': 'hostname_4'}},
{'action_type': 'migrate',
'input_parameters': {
'destination_node': 'hostname_1',
'migration_type': 'live',
'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8',
'resource_name': 'INSTANCE_8',
'source_node': 'hostname_7'}}]
self.assertEqual(expected, self.strategy.solution.actions)