Refactored the compute model and its elements

In this changeset, I refactored the whole Watcher codebase to
adopt a naming convention about the various elements of the
Compute model so that it reflects the same naming convention
adopted by Nova.

Change-Id: I28adba5e1f27175f025330417b072686134d5f51
Partially-Implements: blueprint cluster-model-objects-wrapper
This commit is contained in:
Vincent Françoise 2016-07-06 17:44:29 +02:00
parent dbde1afea0
commit 31c37342cd
53 changed files with 1865 additions and 1803 deletions

View File

@ -172,7 +172,7 @@ Input parameter could cause audit creation failure, when:
Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
composed of a list of potential optimization :ref:`actions <action_definition>`
(instance migration, disabling of an hypervisor, ...) according to the
(instance migration, disabling of a compute node, ...) according to the
:ref:`goal <goal_definition>` to achieve. You can see all of the goals
available in section ``[watcher_strategies]`` of the Watcher service
configuration file.

View File

@ -312,7 +312,7 @@ Using that you can now query the values for that specific metric:
.. code-block:: py
query_history.statistic_aggregation(resource_id=hypervisor.uuid,
query_history.statistic_aggregation(resource_id=compute_node.uuid,
meter_name='compute.node.cpu.percent',
period="7200",
aggregate='avg'

View File

@ -27,7 +27,7 @@ of the OpenStack :ref:`Cluster <cluster_definition>` such as:
- Live migration of an instance from one compute node to another compute
node with Nova
- Changing the power level of a compute node (ACPI level, ...)
- Changing the current state of an hypervisor (enable or disable) with Nova
- Changing the current state of a compute node (enable or disable) with Nova
In most cases, an :ref:`Action <action_definition>` triggers some concrete
commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.).

View File

@ -23,7 +23,7 @@ from watcher._i18n import _
from watcher.applier.actions import base
from watcher.common import exception
from watcher.common import nova_helper
from watcher.decision_engine.model import hypervisor_state as hstate
from watcher.decision_engine.model import element
class ChangeNovaServiceState(base.BaseAction):
@ -57,7 +57,7 @@ class ChangeNovaServiceState(base.BaseAction):
voluptuous.Length(min=1)),
voluptuous.Required(self.STATE):
voluptuous.Any(*[state.value
for state in list(hstate.HypervisorState)]),
for state in list(element.ServiceState)]),
})
@property
@ -70,17 +70,17 @@ class ChangeNovaServiceState(base.BaseAction):
def execute(self):
target_state = None
if self.state == hstate.HypervisorState.DISABLED.value:
if self.state == element.ServiceState.DISABLED.value:
target_state = False
elif self.state == hstate.HypervisorState.ENABLED.value:
elif self.state == element.ServiceState.ENABLED.value:
target_state = True
return self._nova_manage_service(target_state)
def revert(self):
target_state = None
if self.state == hstate.HypervisorState.DISABLED.value:
if self.state == element.ServiceState.DISABLED.value:
target_state = True
elif self.state == hstate.HypervisorState.ENABLED.value:
elif self.state == element.ServiceState.ENABLED.value:
target_state = False
return self._nova_manage_service(target_state)

View File

@ -44,12 +44,12 @@ class Migrate(base.BaseAction):
schema = Schema({
'resource_id': str, # should be a UUID
'migration_type': str, # choices -> "live", "cold"
'dst_hypervisor': str,
'src_hypervisor': str,
'destination_node': str,
'source_node': str,
})
The `resource_id` is the UUID of the server to migrate.
The `src_hypervisor` and `dst_hypervisor` parameters are respectively the
The `source_node` and `destination_node` parameters are respectively the
source and the destination compute hostname (list of available compute
hosts is returned by this command: ``nova service-list --binary
nova-compute``).
@ -59,8 +59,8 @@ class Migrate(base.BaseAction):
MIGRATION_TYPE = 'migration_type'
LIVE_MIGRATION = 'live'
COLD_MIGRATION = 'cold'
DST_HYPERVISOR = 'dst_hypervisor'
SRC_HYPERVISOR = 'src_hypervisor'
DESTINATION_NODE = 'destination_node'
SOURCE_NODE = 'source_node'
def check_resource_id(self, value):
if (value is not None and
@ -73,14 +73,14 @@ class Migrate(base.BaseAction):
def schema(self):
return voluptuous.Schema({
voluptuous.Required(self.RESOURCE_ID): self.check_resource_id,
voluptuous.Required(self.MIGRATION_TYPE,
default=self.LIVE_MIGRATION):
voluptuous.Any(*[self.LIVE_MIGRATION,
self.COLD_MIGRATION]),
voluptuous.Required(self.DST_HYPERVISOR):
voluptuous.Required(
self.MIGRATION_TYPE, default=self.LIVE_MIGRATION):
voluptuous.Any(
*[self.LIVE_MIGRATION, self.COLD_MIGRATION]),
voluptuous.Required(self.DESTINATION_NODE):
voluptuous.All(voluptuous.Any(*six.string_types),
voluptuous.Length(min=1)),
voluptuous.Required(self.SRC_HYPERVISOR):
voluptuous.Required(self.SOURCE_NODE):
voluptuous.All(voluptuous.Any(*six.string_types),
voluptuous.Length(min=1)),
})
@ -94,12 +94,12 @@ class Migrate(base.BaseAction):
return self.input_parameters.get(self.MIGRATION_TYPE)
@property
def dst_hypervisor(self):
return self.input_parameters.get(self.DST_HYPERVISOR)
def destination_node(self):
return self.input_parameters.get(self.DESTINATION_NODE)
@property
def src_hypervisor(self):
return self.input_parameters.get(self.SRC_HYPERVISOR)
def source_node(self):
return self.input_parameters.get(self.SOURCE_NODE)
def _live_migrate_instance(self, nova, destination):
result = None
@ -159,14 +159,14 @@ class Migrate(base.BaseAction):
raise exception.InstanceNotFound(name=self.instance_uuid)
def execute(self):
return self.migrate(destination=self.dst_hypervisor)
return self.migrate(destination=self.destination_node)
def revert(self):
return self.migrate(destination=self.src_hypervisor)
return self.migrate(destination=self.source_node)
def precondition(self):
# todo(jed) check if the instance exist/ check if the instance is on
# the src_hypervisor
# the source_node
pass
def postcondition(self):

View File

@ -317,7 +317,7 @@ class KeystoneFailure(WatcherException):
class ClusterEmpty(WatcherException):
msg_fmt = _("The list of hypervisor(s) in the cluster is empty")
msg_fmt = _("The list of compute node(s) in the cluster is empty")
class MetricCollectorNotDefined(WatcherException):
@ -346,7 +346,7 @@ class GlobalEfficacyComputationError(WatcherException):
"goal using the '%(strategy)s' strategy.")
class NoMetricValuesForVM(WatcherException):
class NoMetricValuesForInstance(WatcherException):
msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.")
@ -357,11 +357,11 @@ class NoSuchMetricForHost(WatcherException):
# Model
class InstanceNotFound(WatcherException):
msg_fmt = _("The instance '%(name)s' is not found")
msg_fmt = _("The instance '%(name)s' could not be found")
class HypervisorNotFound(WatcherException):
msg_fmt = _("The hypervisor is not found")
class ComputeNodeNotFound(WatcherException):
msg_fmt = _("The compute node %s could not be found")
class LoadingError(WatcherException):

View File

@ -40,7 +40,7 @@ class NovaHelper(object):
self.nova = self.osc.nova()
self.glance = self.osc.glance()
def get_hypervisors_list(self):
def get_compute_node_list(self):
return self.nova.hypervisors.list()
def find_instance(self, instance_id):
@ -54,7 +54,26 @@ class NovaHelper(object):
break
return instance
def watcher_non_live_migrate_instance(self, instance_id, hypervisor_id,
def wait_for_volume_status(self, volume, status, timeout=60,
poll_interval=1):
"""Wait until volume reaches given status.
:param volume: volume resource
:param status: expected status of volume
:param timeout: timeout in seconds
:param poll_interval: poll interval in seconds
"""
start_time = time.time()
while time.time() - start_time < timeout:
volume = self.cinder.volumes.get(volume.id)
if volume.status == status:
break
time.sleep(poll_interval)
else:
raise Exception("Volume %s did not reach status %s after %d s"
% (volume.id, status, timeout))
def watcher_non_live_migrate_instance(self, instance_id, node_id,
keep_original_image_name=True):
"""This method migrates a given instance
@ -218,7 +237,7 @@ class NovaHelper(object):
# We create the new instance from
# the intermediate image of the original instance
new_instance = self. \
create_instance(hypervisor_id,
create_instance(node_id,
instance_name,
image_uuid,
flavor_name,
@ -358,7 +377,7 @@ class NovaHelper(object):
# Sets the compute host's ability to accept new instances.
# host_maintenance_mode(self, host, mode):
# Start/Stop host maintenance window.
# On start, it triggers guest VMs evacuation.
# On start, it triggers guest instances evacuation.
host = self.nova.hosts.get(hostname)
if not host:
@ -463,20 +482,20 @@ class NovaHelper(object):
else:
self.nova.servers.stop(instance_id)
if self.wait_for_vm_state(instance, "stopped", 8, 10):
if self.wait_for_instance_state(instance, "stopped", 8, 10):
LOG.debug("Instance %s stopped." % instance_id)
return True
else:
return False
def wait_for_vm_state(self, server, vm_state, retry, sleep):
"""Waits for server to be in a specific vm_state
def wait_for_instance_state(self, server, state, retry, sleep):
"""Waits for server to be in a specific state
The vm_state can be one of the following :
The state can be one of the following :
active, stopped
:param server: server object.
:param vm_state: for which state we are waiting for
:param state: for which state we are waiting for
:param retry: how many times to retry
:param sleep: seconds to sleep between the retries
"""
@ -484,11 +503,11 @@ class NovaHelper(object):
if not server:
return False
while getattr(server, 'OS-EXT-STS:vm_state') != vm_state and retry:
while getattr(server, 'OS-EXT-STS:vm_state') != state and retry:
time.sleep(sleep)
server = self.nova.servers.get(server)
retry -= 1
return getattr(server, 'OS-EXT-STS:vm_state') == vm_state
return getattr(server, 'OS-EXT-STS:vm_state') == state
def wait_for_instance_status(self, instance, status_list, retry, sleep):
"""Waits for instance to be in a specific status
@ -514,7 +533,7 @@ class NovaHelper(object):
LOG.debug("Current instance status: %s" % instance.status)
return instance.status in status_list
def create_instance(self, hypervisor_id, inst_name="test", image_id=None,
def create_instance(self, node_id, inst_name="test", image_id=None,
flavor_name="m1.tiny",
sec_group_list=["default"],
network_names_list=["demo-net"], keypair_name="mykeys",
@ -570,15 +589,14 @@ class NovaHelper(object):
net_obj = {"net-id": nic_id}
net_list.append(net_obj)
instance = self.nova.servers. \
create(inst_name,
image, flavor=flavor,
key_name=keypair_name,
security_groups=sec_group_list,
nics=net_list,
block_device_mapping_v2=block_device_mapping_v2,
availability_zone="nova:" +
hypervisor_id)
instance = self.nova.servers.create(
inst_name, image,
flavor=flavor,
key_name=keypair_name,
security_groups=sec_group_list,
nics=net_list,
block_device_mapping_v2=block_device_mapping_v2,
availability_zone="nova:%s" % node_id)
# Poll at 5 second intervals, until the status is no longer 'BUILD'
if instance:
@ -609,13 +627,13 @@ class NovaHelper(object):
return network_id
def get_vms_by_hypervisor(self, host):
return [vm for vm in
def get_instances_by_node(self, host):
return [instance for instance in
self.nova.servers.list(search_opts={"all_tenants": True})
if self.get_hostname(vm) == host]
if self.get_hostname(instance) == host]
def get_hostname(self, vm):
return str(getattr(vm, 'OS-EXT-SRV-ATTR:host'))
def get_hostname(self, instance):
return str(getattr(instance, 'OS-EXT-SRV-ATTR:host'))
def get_flavor_instance(self, instance, cache):
fid = instance.flavor['id']

View File

@ -118,11 +118,11 @@ class ReleasedComputeNodesCount(IndicatorSpecification):
voluptuous.Range(min=0), required=True)
class VmMigrationsCount(IndicatorSpecification):
class InstanceMigrationsCount(IndicatorSpecification):
def __init__(self):
super(VmMigrationsCount, self).__init__(
name="vm_migrations_count",
description=_("The number of migrations to be performed."),
super(InstanceMigrationsCount, self).__init__(
name="instance_migrations_count",
description=_("The number of VM migrations to be performed."),
unit=None,
)

View File

@ -34,14 +34,14 @@ class ServerConsolidation(base.EfficacySpecification):
def get_indicators_specifications(self):
return [
indicators.ReleasedComputeNodesCount(),
indicators.VmMigrationsCount(),
indicators.InstanceMigrationsCount(),
]
def get_global_efficacy_indicator(self, indicators_map):
value = 0
if indicators_map.vm_migrations_count > 0:
if indicators_map.instance_migrations_count > 0:
value = (float(indicators_map.released_compute_nodes_count) /
float(indicators_map.vm_migrations_count)) * 100
float(indicators_map.instance_migrations_count)) * 100
return efficacy.Indicator(
name="released_nodes_ratio",

View File

@ -16,7 +16,6 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_config import cfg
@ -40,7 +39,7 @@ class CollectorManager(object):
for collector_name in available_collectors:
collector = self.collector_loader.load(collector_name)
collectors[collector_name] = collector
self._collectors = collectors
self._collectors = collectors
return self._collectors

View File

@ -20,10 +20,8 @@ from oslo_log import log
from watcher.common import nova_helper
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor as obj_hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as obj_vm
LOG = log.getLogger(__name__)
@ -50,45 +48,46 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
LOG.debug("Building latest Nova cluster data model")
model = model_root.ModelRoot()
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
disk_capacity = resource.Resource(resource.ResourceType.disk_capacity)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
model.create_resource(mem)
model.create_resource(num_cores)
model.create_resource(disk)
model.create_resource(disk_capacity)
flavor_cache = {}
hypervisors = self.wrapper.get_hypervisors_list()
for h in hypervisors:
service = self.wrapper.nova.services.find(id=h.service['id'])
# create hypervisor in cluster_model_collector
hypervisor = obj_hypervisor.Hypervisor()
hypervisor.uuid = service.host
hypervisor.hostname = h.hypervisor_hostname
nodes = self.wrapper.get_compute_node_list()
for n in nodes:
service = self.wrapper.nova.services.find(id=n.service['id'])
# create node in cluster_model_collector
node = element.ComputeNode()
node.uuid = service.host
node.hostname = n.hypervisor_hostname
# set capacity
mem.set_capacity(hypervisor, h.memory_mb)
disk.set_capacity(hypervisor, h.free_disk_gb)
disk_capacity.set_capacity(hypervisor, h.local_gb)
num_cores.set_capacity(hypervisor, h.vcpus)
hypervisor.state = h.state
hypervisor.status = h.status
model.add_hypervisor(hypervisor)
vms = self.wrapper.get_vms_by_hypervisor(str(service.host))
for v in vms:
mem.set_capacity(node, n.memory_mb)
disk.set_capacity(node, n.free_disk_gb)
disk_capacity.set_capacity(node, n.local_gb)
num_cores.set_capacity(node, n.vcpus)
node.state = n.state
node.status = n.status
model.add_node(node)
instances = self.wrapper.get_instances_by_node(str(service.host))
for v in instances:
# create VM in cluster_model_collector
vm = obj_vm.VM()
vm.uuid = v.id
# nova/nova/compute/vm_states.py
vm.state = getattr(v, 'OS-EXT-STS:vm_state')
instance = element.Instance()
instance.uuid = v.id
# nova/nova/compute/instance_states.py
instance.state = getattr(v, 'OS-EXT-STS:instance_state')
# set capacity
self.wrapper.get_flavor_instance(v, flavor_cache)
mem.set_capacity(vm, v.flavor['ram'])
disk.set_capacity(vm, v.flavor['disk'])
num_cores.set_capacity(vm, v.flavor['vcpus'])
mem.set_capacity(instance, v.flavor['ram'])
disk.set_capacity(instance, v.flavor['disk'])
num_cores.set_capacity(instance, v.flavor['vcpus'])
model.get_mapping().map(node, instance)
model.add_instance(instance)
model.get_mapping().map(hypervisor, vm)
model.add_vm(vm)
return model

View File

@ -0,0 +1,39 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2016 b<>com
#
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model.element import disk_info
from watcher.decision_engine.model.element import instance
from watcher.decision_engine.model.element import node
from watcher.decision_engine.model.element import resource
ServiceState = node.ServiceState
PowerState = node.PowerState
ComputeNode = node.ComputeNode
InstanceState = instance.InstanceState
Instance = instance.Instance
DiskInfo = disk_info.DiskInfo
ResourceType = resource.ResourceType
Resource = resource.Resource
__all__ = [
'ServiceState', 'PowerState', 'ComputeNode', 'InstanceState', 'Instance',
'DiskInfo', 'ResourceType', 'Resource']

View File

@ -1,11 +1,13 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
# Copyright (c) 2016 b<>com
#
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -14,11 +16,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import abc
import six
class HypervisorState(enum.Enum):
ONLINE = 'up'
OFFLINE = 'down'
ENABLED = 'enabled'
DISABLED = 'disabled'
@six.add_metaclass(abc.ABCMeta)
class Element(object):
@abc.abstractmethod
def accept(self, visitor):
raise NotImplementedError()

View File

@ -14,8 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class ComputeResource(object):
import six
from watcher.decision_engine.model.element import base
@six.add_metaclass(abc.ABCMeta)
class ComputeResource(base.Element):
def __init__(self):
self._uuid = ""

View File

@ -14,8 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model.element import base
class DiskInfo(base.Element):
class DiskInfo(object):
def __init__(self):
self.name = ""
self.major = 0
@ -23,6 +26,9 @@ class DiskInfo(object):
self.size = 0
self.scheduler = ""
def accept(self, visitor):
raise NotImplementedError()
def set_size(self, size):
"""DiskInfo

View File

@ -0,0 +1,54 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from watcher.decision_engine.model.element import compute_resource
class InstanceState(enum.Enum):
ACTIVE = 'active' # Instance is running
BUILDING = 'building' # Instance only exists in DB
PAUSED = 'paused'
SUSPENDED = 'suspended' # Instance is suspended to disk.
STOPPED = 'stopped' # Instance is shut off, the disk image is still there.
RESCUED = 'rescued' # A rescue image is running with the original image
# attached.
RESIZED = 'resized' # a Instance with the new size is active.
SOFT_DELETED = 'soft-delete'
# still available to restore.
DELETED = 'deleted' # Instance is permanently deleted.
ERROR = 'error'
class Instance(compute_resource.ComputeResource):
def __init__(self):
super(Instance, self).__init__()
self._state = InstanceState.ACTIVE.value
def accept(self, visitor):
raise NotImplementedError()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state

View File

@ -14,17 +14,46 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model import compute_resource
from watcher.decision_engine.model import hypervisor_state
from watcher.decision_engine.model import power_state
import enum
from watcher.decision_engine.model.element import compute_resource
class Hypervisor(compute_resource.ComputeResource):
class ServiceState(enum.Enum):
ONLINE = 'up'
OFFLINE = 'down'
ENABLED = 'enabled'
DISABLED = 'disabled'
class PowerState(enum.Enum):
# away mode
g0 = "g0"
# power on suspend (processor caches are flushed)
# The power to the CPU(s) and RAM is maintained
g1_S1 = "g1_S1"
# CPU powered off. Dirty cache is flushed to RAM
g1_S2 = "g1_S2"
# Suspend to RAM
g1_S3 = "g1_S3"
# Suspend to Disk
g1_S4 = "g1_S4"
# switch outlet X OFF on the PDU (Power Distribution Unit)
switch_off = "switch_off"
# switch outlet X ON on the PDU (Power Distribution Unit)
switch_on = "switch_on"
class ComputeNode(compute_resource.ComputeResource):
def __init__(self):
super(Hypervisor, self).__init__()
self._state = hypervisor_state.HypervisorState.ONLINE
self._status = hypervisor_state.HypervisorState.ENABLED
self._power_state = power_state.PowerState.g0
super(ComputeNode, self).__init__()
self._state = ServiceState.ONLINE
self._status = ServiceState.ENABLED
self._power_state = PowerState.g0
def accept(self, visitor):
raise NotImplementedError()
@property
def state(self):

View File

@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
import threading
from oslo_log import log
from watcher._i18n import _LW
LOG = log.getLogger(__name__)
@ -25,110 +26,108 @@ LOG = log.getLogger(__name__)
class Mapping(object):
def __init__(self, model):
self.model = model
self._mapping_hypervisors = {}
self.mapping_vm = {}
self.compute_node_mapping = {}
self.instance_mapping = {}
self.lock = threading.Lock()
def map(self, hypervisor, vm):
"""Select the hypervisor where the instance is launched
def map(self, node, instance):
"""Select the node where the instance is launched
:param hypervisor: the hypervisor
:param vm: the virtual machine or instance
:param node: the node
:param instance: the virtual machine or instance
"""
try:
self.lock.acquire()
# init first
if hypervisor.uuid not in self._mapping_hypervisors.keys():
self._mapping_hypervisors[hypervisor.uuid] = []
if node.uuid not in self.compute_node_mapping.keys():
self.compute_node_mapping[node.uuid] = []
# map node => vms
self._mapping_hypervisors[hypervisor.uuid].append(
vm.uuid)
# map node => instances
self.compute_node_mapping[node.uuid].append(
instance.uuid)
# map vm => node
self.mapping_vm[vm.uuid] = hypervisor.uuid
# map instance => node
self.instance_mapping[instance.uuid] = node.uuid
finally:
self.lock.release()
def unmap(self, hypervisor, vm):
"""Remove the instance from the hypervisor
def unmap(self, node, instance):
"""Remove the instance from the node
:param hypervisor: the hypervisor
:param vm: the virtual machine or instance
:param node: the node
:param instance: the virtual machine or instance
"""
self.unmap_from_id(hypervisor.uuid, vm.uuid)
self.unmap_from_id(node.uuid, instance.uuid)
def unmap_from_id(self, node_uuid, vm_uuid):
"""Remove the instance (by id) from the hypervisor (by id)
def unmap_from_id(self, node_uuid, instance_uuid):
"""Remove the instance (by id) from the node (by id)
:rtype : object
"""
try:
self.lock.acquire()
if str(node_uuid) in self._mapping_hypervisors:
self._mapping_hypervisors[str(node_uuid)].remove(str(vm_uuid))
# remove vm
self.mapping_vm.pop(vm_uuid)
if str(node_uuid) in self.compute_node_mapping:
self.compute_node_mapping[str(node_uuid)].remove(
str(instance_uuid))
# remove instance
self.instance_mapping.pop(instance_uuid)
else:
LOG.warning(_LW(
"trying to delete the virtual machine %(vm)s but it was "
"not found on hypervisor %(hyp)s"),
{'vm': vm_uuid, 'hyp': node_uuid})
"Trying to delete the instance %(instance)s but it was "
"not found on node %(node)s"),
{'instance': instance_uuid, 'node': node_uuid})
finally:
self.lock.release()
def get_mapping(self):
return self._mapping_hypervisors
return self.compute_node_mapping
def get_mapping_vm(self):
return self.mapping_vm
def get_node_from_instance(self, instance):
return self.get_node_from_instance_id(instance.uuid)
def get_node_from_vm(self, vm):
return self.get_node_from_vm_id(vm.uuid)
def get_node_from_instance_id(self, instance_uuid):
"""Getting host information from the guest instance
def get_node_from_vm_id(self, vm_uuid):
"""Getting host information from the guest VM
:param vm: the uuid of the instance
:return: hypervisor
:param instance: the uuid of the instance
:return: node
"""
return self.model.get_hypervisor_from_id(
self.get_mapping_vm()[str(vm_uuid)])
return self.model.get_node_from_id(
self.instance_mapping[str(instance_uuid)])
def get_node_vms(self, hypervisor):
"""Get the list of instances running on the hypervisor
def get_node_instances(self, node):
"""Get the list of instances running on the node
:param hypervisor:
:param node:
:return:
"""
return self.get_node_vms_from_id(hypervisor.uuid)
return self.get_node_instances_from_id(node.uuid)
def get_node_vms_from_id(self, node_uuid):
if str(node_uuid) in self._mapping_hypervisors.keys():
return self._mapping_hypervisors[str(node_uuid)]
def get_node_instances_from_id(self, node_uuid):
if str(node_uuid) in self.compute_node_mapping.keys():
return self.compute_node_mapping[str(node_uuid)]
else:
# empty
return []
def migrate_vm(self, vm, src_hypervisor, dest_hypervisor):
"""Migrate single instance from src_hypervisor to dest_hypervisor
def migrate_instance(self, instance, source_node, destination_node):
"""Migrate single instance from source_node to destination_node
:param vm:
:param src_hypervisor:
:param dest_hypervisor:
:param instance:
:param source_node:
:param destination_node:
:return:
"""
if src_hypervisor == dest_hypervisor:
if source_node == destination_node:
return False
# unmap
self.unmap(src_hypervisor, vm)
self.unmap(source_node, instance)
# map
self.map(dest_hypervisor, vm)
self.map(destination_node, instance)
return True

View File

@ -17,16 +17,14 @@
from watcher._i18n import _
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import mapping
from watcher.decision_engine.model import vm
class ModelRoot(object):
def __init__(self, stale=False):
self._hypervisors = utils.Struct()
self._vms = utils.Struct()
self._nodes = utils.Struct()
self._instances = utils.Struct()
self.mapping = mapping.Mapping(self)
self.resource = utils.Struct()
self.stale = stale
@ -36,46 +34,46 @@ class ModelRoot(object):
__bool__ = __nonzero__
def assert_hypervisor(self, obj):
if not isinstance(obj, hypervisor.Hypervisor):
def assert_node(self, obj):
if not isinstance(obj, element.ComputeNode):
raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid"))
def assert_vm(self, obj):
if not isinstance(obj, vm.VM):
def assert_instance(self, obj):
if not isinstance(obj, element.Instance):
raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid"))
def add_hypervisor(self, hypervisor):
self.assert_hypervisor(hypervisor)
self._hypervisors[hypervisor.uuid] = hypervisor
def add_node(self, node):
self.assert_node(node)
self._nodes[node.uuid] = node
def remove_hypervisor(self, hypervisor):
self.assert_hypervisor(hypervisor)
if str(hypervisor.uuid) not in self._hypervisors.keys():
raise exception.HypervisorNotFound(hypervisor.uuid)
def remove_node(self, node):
self.assert_node(node)
if str(node.uuid) not in self._nodes:
raise exception.ComputeNodeNotFound(node.uuid)
else:
del self._hypervisors[hypervisor.uuid]
del self._nodes[node.uuid]
def add_vm(self, vm):
self.assert_vm(vm)
self._vms[vm.uuid] = vm
def add_instance(self, instance):
self.assert_instance(instance)
self._instances[instance.uuid] = instance
def get_all_hypervisors(self):
return self._hypervisors
def get_all_compute_nodes(self):
return self._nodes
def get_hypervisor_from_id(self, hypervisor_uuid):
if str(hypervisor_uuid) not in self._hypervisors.keys():
raise exception.HypervisorNotFound(hypervisor_uuid)
return self._hypervisors[str(hypervisor_uuid)]
def get_node_from_id(self, node_uuid):
if str(node_uuid) not in self._nodes:
raise exception.ComputeNodeNotFound(node_uuid)
return self._nodes[str(node_uuid)]
def get_vm_from_id(self, uuid):
if str(uuid) not in self._vms.keys():
def get_instance_from_id(self, uuid):
if str(uuid) not in self._instances:
raise exception.InstanceNotFound(name=uuid)
return self._vms[str(uuid)]
return self._instances[str(uuid)]
def get_all_vms(self):
return self._vms
def get_all_instances(self):
return self._instances
def get_mapping(self):
return self.mapping
@ -83,5 +81,5 @@ class ModelRoot(object):
def create_resource(self, r):
self.resource[str(r.name)] = r
def get_resource_from_id(self, id):
return self.resource[str(id)]
def get_resource_from_id(self, resource_id):
return self.resource[str(resource_id)]

View File

@ -1,31 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class PowerState(enum.Enum):
# away mode
g0 = "g0"
# power on suspend (processor caches are flushed)
# The power to the CPU(s) and RAM is maintained
g1_S1 = "g1_S1"
# CPU powered off. Dirty cache is flushed to RAM
g1_S2 = "g1_S2"
# Suspend to RAM
g1_S3 = "g1_S3"
# Suspend to Disk
g1_S4 = "g1_S4"
# switch outlet X OFF on the PDU (Power Distribution Unit)
switch_off = "switch_off"
# switch outlet X ON on the PDU (Power Distribution Unit)
switch_on = "switch_on"

View File

@ -1,31 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model import compute_resource
from watcher.decision_engine.model import vm_state
class VM(compute_resource.ComputeResource):
def __init__(self):
super(VM, self).__init__()
self._state = vm_state.VMState.ACTIVE.value
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state

View File

@ -1,34 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class VMState(enum.Enum):
ACTIVE = 'active' # VM is running
BUILDING = 'building' # VM only exists in DB
PAUSED = 'paused'
SUSPENDED = 'suspended' # VM is suspended to disk.
STOPPED = 'stopped' # VM is powered off, the disk image is still there.
RESCUED = 'rescued' # A rescue image is running with the original VM image
# attached.
RESIZED = 'resized' # a VM with the new size is active.
SOFT_DELETED = 'soft-delete'
# still available to restore.
DELETED = 'deleted' # VM is permanently deleted.
ERROR = 'error'

View File

@ -32,9 +32,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer as cch
from watcher.decision_engine.model import hypervisor_state as hyper_state
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@ -136,46 +134,47 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
"""
self.migration_attempts = size_cluster * self.bound_migration
def check_migration(self, src_hypervisor, dest_hypervisor, vm_to_mig):
def check_migration(self, source_node, destination_node,
instance_to_migrate):
"""Check if the migration is possible
:param src_hypervisor: the current node of the virtual machine
:param dest_hypervisor: the destination of the virtual machine
:param vm_to_mig: the virtual machine
:param source_node: the current node of the virtual machine
:param destination_node: the destination of the virtual machine
:param instance_to_migrate: the instance / virtual machine
:return: True if the there is enough place otherwise false
"""
if src_hypervisor == dest_hypervisor:
if source_node == destination_node:
return False
LOG.debug('Migrate VM %s from %s to %s',
vm_to_mig, src_hypervisor, dest_hypervisor)
LOG.debug('Migrate instance %s from %s to %s',
instance_to_migrate, source_node, destination_node)
total_cores = 0
total_disk = 0
total_mem = 0
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores)
element.ResourceType.cpu_cores)
disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk)
element.ResourceType.disk)
memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory)
element.ResourceType.memory)
for vm_id in self.compute_model. \
get_mapping().get_node_vms(dest_hypervisor):
vm = self.compute_model.get_vm_from_id(vm_id)
total_cores += cpu_capacity.get_capacity(vm)
total_disk += disk_capacity.get_capacity(vm)
total_mem += memory_capacity.get_capacity(vm)
for instance_id in self.compute_model. \
get_mapping().get_node_instances(destination_node):
instance = self.compute_model.get_instance_from_id(instance_id)
total_cores += cpu_capacity.get_capacity(instance)
total_disk += disk_capacity.get_capacity(instance)
total_mem += memory_capacity.get_capacity(instance)
# capacity requested by hypervisor
total_cores += cpu_capacity.get_capacity(vm_to_mig)
total_disk += disk_capacity.get_capacity(vm_to_mig)
total_mem += memory_capacity.get_capacity(vm_to_mig)
# capacity requested by the compute node
total_cores += cpu_capacity.get_capacity(instance_to_migrate)
total_disk += disk_capacity.get_capacity(instance_to_migrate)
total_mem += memory_capacity.get_capacity(instance_to_migrate)
return self.check_threshold(dest_hypervisor, total_cores, total_disk,
return self.check_threshold(destination_node, total_cores, total_disk,
total_mem)
def check_threshold(self, dest_hypervisor, total_cores,
def check_threshold(self, destination_node, total_cores,
total_disk, total_mem):
"""Check threshold
@ -183,18 +182,18 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
aggregated CPU capacity of VMs on one node to CPU capacity
of this node must not exceed the threshold value.
:param dest_hypervisor: the destination of the virtual machine
:param destination_node: the destination of the virtual machine
:param total_cores: total cores of the virtual machine
:param total_disk: total disk size used by the virtual machine
:param total_mem: total memory used by the virtual machine
:return: True if the threshold is not exceed
"""
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(dest_hypervisor)
element.ResourceType.cpu_cores).get_capacity(destination_node)
disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(dest_hypervisor)
element.ResourceType.disk).get_capacity(destination_node)
memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(dest_hypervisor)
element.ResourceType.memory).get_capacity(destination_node)
return (cpu_capacity >= total_cores * self.threshold_cores and
disk_capacity >= total_disk * self.threshold_disk and
@ -210,7 +209,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
"""
return self.migration_attempts
def calculate_weight(self, element, total_cores_used, total_disk_used,
def calculate_weight(self, node, total_cores_used, total_disk_used,
total_memory_used):
"""Calculate weight of every resource
@ -221,13 +220,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
:return:
"""
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(element)
element.ResourceType.cpu_cores).get_capacity(node)
disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(element)
element.ResourceType.disk).get_capacity(node)
memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(element)
element.ResourceType.memory).get_capacity(node)
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
float(cpu_capacity))
@ -245,13 +244,14 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# TODO(jed): take in account weight
return (score_cores + score_disk + score_memory) / 3
def calculate_score_node(self, hypervisor):
def calculate_score_node(self, node):
"""Calculate the score that represent the utilization level
:param hypervisor:
:return:
:param node: :py:class:`~.ComputeNode` instance
:return: Score for the given compute node
:rtype: float
"""
resource_id = "%s_%s" % (hypervisor.uuid, hypervisor.hostname)
resource_id = "%s_%s" % (node.uuid, node.hostname)
host_avg_cpu_util = self.ceilometer. \
statistic_aggregation(resource_id=resource_id,
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
@ -268,11 +268,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
host_avg_cpu_util = 100
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(hypervisor)
element.ResourceType.cpu_cores).get_capacity(node)
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)
return self.calculate_weight(hypervisor, total_cores_used, 0, 0)
return self.calculate_weight(node, total_cores_used, 0, 0)
def calculate_migration_efficacy(self):
"""Calculate migration efficacy
@ -286,34 +286,34 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
else:
return 0
def calculate_score_vm(self, vm):
def calculate_score_instance(self, instance):
"""Calculate Score of virtual machine
:param vm: the virtual machine
:param instance: the virtual machine
:return: score
"""
vm_cpu_utilization = self.ceilometer. \
instance_cpu_utilization = self.ceilometer. \
statistic_aggregation(
resource_id=vm.uuid,
resource_id=instance.uuid,
meter_name=self.INSTANCE_CPU_USAGE_METRIC_NAME,
period="7200",
aggregate='avg'
)
if vm_cpu_utilization is None:
if instance_cpu_utilization is None:
LOG.error(
_LE("No values returned by %(resource_id)s "
"for %(metric_name)s"),
resource_id=vm.uuid,
resource_id=instance.uuid,
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME,
)
vm_cpu_utilization = 100
instance_cpu_utilization = 100
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(vm)
element.ResourceType.cpu_cores).get_capacity(instance)
total_cores_used = cpu_capacity * (vm_cpu_utilization / 100.0)
total_cores_used = cpu_capacity * (instance_cpu_utilization / 100.0)
return self.calculate_weight(vm, total_cores_used, 0, 0)
return self.calculate_weight(instance, total_cores_used, 0, 0)
def add_change_service_state(self, resource_id, state):
parameters = {'state': state}
@ -324,79 +324,80 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def add_migration(self,
resource_id,
migration_type,
src_hypervisor,
dst_hypervisor):
source_node,
destination_node):
parameters = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor,
'dst_hypervisor': dst_hypervisor}
'source_node': source_node,
'destination_node': destination_node}
self.solution.add_action(action_type=self.MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def score_of_nodes(self, score):
"""Calculate score of nodes based on load by VMs"""
for hypervisor_id in self.compute_model.get_all_hypervisors():
hypervisor = self.compute_model. \
get_hypervisor_from_id(hypervisor_id)
for node_id in self.compute_model.get_all_compute_nodes():
node = self.compute_model. \
get_node_from_id(node_id)
count = self.compute_model.get_mapping(). \
get_node_vms_from_id(hypervisor_id)
get_node_instances_from_id(node_id)
if len(count) > 0:
result = self.calculate_score_node(hypervisor)
result = self.calculate_score_node(node)
else:
# The hypervisor has not VMs
# The node has not VMs
result = 0
if len(count) > 0:
score.append((hypervisor_id, result))
score.append((node_id, result))
return score
def node_and_vm_score(self, sorted_score, score):
def node_and_instance_score(self, sorted_score, score):
"""Get List of VMs from node"""
node_to_release = sorted_score[len(score) - 1][0]
vms_to_mig = self.compute_model.get_mapping().get_node_vms_from_id(
node_to_release)
instances_to_migrate = (
self.compute_model.mapping.get_node_instances_from_id(
node_to_release))
vm_score = []
for vm_id in vms_to_mig:
vm = self.compute_model.get_vm_from_id(vm_id)
if vm.state == vm_state.VMState.ACTIVE.value:
vm_score.append(
(vm_id, self.calculate_score_vm(vm)))
instance_score = []
for instance_id in instances_to_migrate:
instance = self.compute_model.get_instance_from_id(instance_id)
if instance.state == element.InstanceState.ACTIVE.value:
instance_score.append(
(instance_id, self.calculate_score_instance(instance)))
return node_to_release, vm_score
return node_to_release, instance_score
def create_migration_vm(self, mig_vm, mig_src_hypervisor,
mig_dst_hypervisor):
def create_migration_instance(self, mig_instance, mig_source_node,
mig_destination_node):
"""Create migration VM"""
if self.compute_model.get_mapping().migrate_vm(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor):
self.add_migration(mig_vm.uuid, 'live',
mig_src_hypervisor.uuid,
mig_dst_hypervisor.uuid)
if self.compute_model.get_mapping().migrate_instance(
mig_instance, mig_source_node, mig_destination_node):
self.add_migration(mig_instance.uuid, 'live',
mig_source_node.uuid,
mig_destination_node.uuid)
if len(self.compute_model.get_mapping().get_node_vms(
mig_src_hypervisor)) == 0:
self.add_change_service_state(mig_src_hypervisor.
if len(self.compute_model.get_mapping().get_node_instances(
mig_source_node)) == 0:
self.add_change_service_state(mig_source_node.
uuid,
hyper_state.HypervisorState.
DISABLED.value)
element.ServiceState.DISABLED.value)
self.number_of_released_nodes += 1
def calculate_num_migrations(self, sorted_vms, node_to_release,
def calculate_num_migrations(self, sorted_instances, node_to_release,
sorted_score):
number_migrations = 0
for vm in sorted_vms:
for instance in sorted_instances:
for j in range(0, len(sorted_score)):
mig_vm = self.compute_model.get_vm_from_id(vm[0])
mig_src_hypervisor = self.compute_model.get_hypervisor_from_id(
mig_instance = self.compute_model.get_instance_from_id(
instance[0])
mig_source_node = self.compute_model.get_node_from_id(
node_to_release)
mig_dst_hypervisor = self.compute_model.get_hypervisor_from_id(
mig_destination_node = self.compute_model.get_node_from_id(
sorted_score[j][0])
result = self.check_migration(
mig_src_hypervisor, mig_dst_hypervisor, mig_vm)
mig_source_node, mig_destination_node, mig_instance)
if result:
self.create_migration_vm(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor)
self.create_migration_instance(
mig_instance, mig_source_node, mig_destination_node)
number_migrations += 1
break
return number_migrations
@ -420,22 +421,20 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
unsuccessful_migration = 0
first_migration = True
size_cluster = len(self.compute_model.get_all_hypervisors())
size_cluster = len(self.compute_model.get_all_compute_nodes())
if size_cluster == 0:
raise exception.ClusterEmpty()
self.compute_attempts(size_cluster)
for hypervisor_id in self.compute_model.get_all_hypervisors():
hypervisor = self.compute_model.get_hypervisor_from_id(
hypervisor_id)
for node_id in self.compute_model.get_all_compute_nodes():
node = self.compute_model.get_node_from_id(node_id)
count = self.compute_model.get_mapping(). \
get_node_vms_from_id(hypervisor_id)
get_node_instances_from_id(node_id)
if len(count) == 0:
if hypervisor.state == hyper_state.HypervisorState.ENABLED:
self.add_change_service_state(hypervisor_id,
hyper_state.HypervisorState.
DISABLED.value)
if node.state == element.ServiceState.ENABLED:
self.add_change_service_state(
node_id, element.ServiceState.DISABLED.value)
while self.get_allowed_migration_attempts() >= unsuccessful_migration:
if not first_migration:
@ -449,7 +448,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# Sort compute nodes by Score decreasing
sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
LOG.debug("Hypervisor(s) BFD %s", sorted_score)
LOG.debug("Compute node(s) BFD %s", sorted_score)
# Get Node to be released
if len(score) == 0:
@ -458,16 +457,17 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
" of the cluster is zero"))
break
node_to_release, vm_score = self.node_and_vm_score(
node_to_release, instance_score = self.node_and_instance_score(
sorted_score, score)
# Sort VMs by Score
sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
# Sort instances by Score
sorted_instances = sorted(
instance_score, reverse=True, key=lambda x: (x[1]))
# BFD: Best Fit Decrease
LOG.debug("VM(s) BFD %s", sorted_vms)
LOG.debug("VM(s) BFD %s", sorted_instances)
migrations = self.calculate_num_migrations(
sorted_vms, node_to_release, sorted_score)
sorted_instances, node_to_release, sorted_score)
unsuccessful_migration = self.unsuccessful_migration_actualization(
migrations, unsuccessful_migration)
@ -481,5 +481,5 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def post_execute(self):
self.solution.set_efficacy_indicators(
released_compute_nodes_count=self.number_of_released_nodes,
vm_migrations_count=self.number_of_migrations,
instance_migrations_count=self.number_of_migrations,
)

View File

@ -30,11 +30,10 @@ telemetries to measure thermal/workload status of server.
from oslo_log import log
from watcher._i18n import _, _LI, _LW
from watcher._i18n import _, _LW, _LI
from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@ -122,35 +121,35 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
def ceilometer(self, c):
self._ceilometer = c
def calc_used_res(self, hypervisor, cpu_capacity,
def calc_used_res(self, node, cpu_capacity,
memory_capacity, disk_capacity):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
vms = self.compute_model.get_mapping().get_node_vms(hypervisor)
instances = self.compute_model.mapping.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
if len(vms) > 0:
for vm_id in vms:
vm = self.compute_model.get_vm_from_id(vm_id)
vcpus_used += cpu_capacity.get_capacity(vm)
memory_mb_used += memory_capacity.get_capacity(vm)
disk_gb_used += disk_capacity.get_capacity(vm)
if len(instances) > 0:
for instance_id in instances:
instance = self.compute_model.get_instance_from_id(instance_id)
vcpus_used += cpu_capacity.get_capacity(instance)
memory_mb_used += memory_capacity.get_capacity(instance)
disk_gb_used += disk_capacity.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used
def group_hosts_by_outlet_temp(self):
"""Group hosts based on outlet temp meters"""
hypervisors = self.compute_model.get_all_hypervisors()
size_cluster = len(hypervisors)
nodes = self.compute_model.get_all_compute_nodes()
size_cluster = len(nodes)
if size_cluster == 0:
raise wexc.ClusterEmpty()
hosts_need_release = []
hosts_target = []
for hypervisor_id in hypervisors:
hypervisor = self.compute_model.get_hypervisor_from_id(
hypervisor_id)
resource_id = hypervisor.uuid
for node_id in nodes:
node = self.compute_model.get_node_from_id(
node_id)
resource_id = node.uuid
outlet_temp = self.ceilometer.statistic_aggregation(
resource_id=resource_id,
@ -163,53 +162,55 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
hvmap = {'hv': hypervisor, 'outlet_temp': outlet_temp}
instance_data = {'node': node, 'outlet_temp': outlet_temp}
if outlet_temp >= self.threshold:
# mark the hypervisor to release resources
hosts_need_release.append(hvmap)
# mark the node to release resources
hosts_need_release.append(instance_data)
else:
hosts_target.append(hvmap)
hosts_target.append(instance_data)
return hosts_need_release, hosts_target
def choose_vm_to_migrate(self, hosts):
"""Pick up an active vm instance to migrate from provided hosts"""
for hvmap in hosts:
mig_src_hypervisor = hvmap['hv']
vms_of_src = self.compute_model.get_mapping().get_node_vms(
mig_src_hypervisor)
if len(vms_of_src) > 0:
for vm_id in vms_of_src:
def choose_instance_to_migrate(self, hosts):
"""Pick up an active instance to migrate from provided hosts"""
for instance_data in hosts:
mig_source_node = instance_data['node']
instances_of_src = self.compute_model.mapping.get_node_instances(
mig_source_node)
if len(instances_of_src) > 0:
for instance_id in instances_of_src:
try:
# select the first active VM to migrate
vm = self.compute_model.get_vm_from_id(vm_id)
if vm.state != vm_state.VMState.ACTIVE.value:
LOG.info(_LI("VM not active, skipped: %s"),
vm.uuid)
# select the first active instance to migrate
instance = self.compute_model.get_instance_from_id(
instance_id)
if (instance.state !=
element.InstanceState.ACTIVE.value):
LOG.info(_LI("Instance not active, skipped: %s"),
instance.uuid)
continue
return mig_src_hypervisor, vm
return mig_source_node, instance
except wexc.InstanceNotFound as e:
LOG.exception(e)
LOG.info(_LI("VM not found"))
LOG.info(_LI("Instance not found"))
return None
def filter_dest_servers(self, hosts, vm_to_migrate):
def filter_dest_servers(self, hosts, instance_to_migrate):
"""Only return hosts with sufficient available resources"""
cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores)
element.ResourceType.cpu_cores)
disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk)
element.ResourceType.disk)
memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory)
element.ResourceType.memory)
required_cores = cpu_capacity.get_capacity(vm_to_migrate)
required_disk = disk_capacity.get_capacity(vm_to_migrate)
required_memory = memory_capacity.get_capacity(vm_to_migrate)
required_cores = cpu_capacity.get_capacity(instance_to_migrate)
required_disk = disk_capacity.get_capacity(instance_to_migrate)
required_memory = memory_capacity.get_capacity(instance_to_migrate)
# filter hypervisors without enough resource
# filter nodes without enough resource
dest_servers = []
for hvmap in hosts:
host = hvmap['hv']
for instance_data in hosts:
host = instance_data['node']
# available
cores_used, mem_used, disk_used = self.calc_used_res(
host, cpu_capacity, memory_capacity, disk_capacity)
@ -219,7 +220,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
if cores_available >= required_cores \
and disk_available >= required_disk \
and mem_available >= required_memory:
dest_servers.append(hvmap)
dest_servers.append(instance_data)
return dest_servers
@ -251,13 +252,14 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
reverse=True,
key=lambda x: (x["outlet_temp"]))
vm_to_migrate = self.choose_vm_to_migrate(hosts_need_release)
# calculate the vm's cpu cores,memory,disk needs
if vm_to_migrate is None:
instance_to_migrate = self.choose_instance_to_migrate(
hosts_need_release)
# calculate the instance's cpu cores,memory,disk needs
if instance_to_migrate is None:
return self.solution
mig_src_hypervisor, vm_src = vm_to_migrate
dest_servers = self.filter_dest_servers(hosts_target, vm_src)
mig_source_node, instance_src = instance_to_migrate
dest_servers = self.filter_dest_servers(hosts_target, instance_src)
# sort the filtered result by outlet temp
# pick up the lowest one as dest server
if len(dest_servers) == 0:
@ -268,15 +270,15 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))
# always use the host with lowerest outlet temperature
mig_dst_hypervisor = dest_servers[0]['hv']
# generate solution to migrate the vm to the dest server,
if self.compute_model.get_mapping().migrate_vm(
vm_src, mig_src_hypervisor, mig_dst_hypervisor):
mig_destination_node = dest_servers[0]['node']
# generate solution to migrate the instance to the dest server,
if self.compute_model.mapping.migrate_instance(
instance_src, mig_source_node, mig_destination_node):
parameters = {'migration_type': 'live',
'src_hypervisor': mig_src_hypervisor.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid}
'source_node': mig_source_node.uuid,
'destination_node': mig_destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid,
resource_id=instance_src.uuid,
input_parameters=parameters)
def post_execute(self):

View File

@ -21,8 +21,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@ -121,20 +120,20 @@ class UniformAirflow(base.BaseStrategy):
return {
"properties": {
"threshold_airflow": {
"description": "airflow threshold for migration, Unit is\
0.1CFM",
"description": ("airflow threshold for migration, Unit is "
"0.1CFM"),
"type": "number",
"default": 400.0
},
"threshold_inlet_t": {
"description": "inlet temperature threshold for migration\
decision",
"description": ("inlet temperature threshold for "
"migration decision"),
"type": "number",
"default": 28.0
},
"threshold_power": {
"description": "system power threshold for migration\
decision",
"description": ("system power threshold for migration "
"decision"),
"type": "number",
"default": 350.0
},
@ -146,112 +145,120 @@ class UniformAirflow(base.BaseStrategy):
},
}
def calculate_used_resource(self, hypervisor, cap_cores, cap_mem,
cap_disk):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
vms = self.compute_model.get_mapping().get_node_vms(hypervisor)
def calculate_used_resource(self, node, cap_cores, cap_mem, cap_disk):
"""Compute the used vcpus, memory and disk based on instance flavors"""
instances = self.compute_model.mapping.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for vm_id in vms:
vm = self.compute_model.get_vm_from_id(vm_id)
vcpus_used += cap_cores.get_capacity(vm)
memory_mb_used += cap_mem.get_capacity(vm)
disk_gb_used += cap_disk.get_capacity(vm)
for instance_id in instances:
instance = self.compute_model.get_instance_from_id(
instance_id)
vcpus_used += cap_cores.get_capacity(instance)
memory_mb_used += cap_mem.get_capacity(instance)
disk_gb_used += cap_disk.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used
def choose_vm_to_migrate(self, hosts):
"""Pick up an active vm instance to migrate from provided hosts
def choose_instance_to_migrate(self, hosts):
"""Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains hypervisor object
:param hosts: the array of dict which contains node object
"""
vms_tobe_migrate = []
for hvmap in hosts:
source_hypervisor = hvmap['hv']
source_vms = self.compute_model.get_mapping().get_node_vms(
source_hypervisor)
if source_vms:
instances_tobe_migrate = []
for nodemap in hosts:
source_node = nodemap['node']
source_instances = self.compute_model.mapping.get_node_instances(
source_node)
if source_instances:
inlet_t = self.ceilometer.statistic_aggregation(
resource_id=source_hypervisor.uuid,
resource_id=source_node.uuid,
meter_name=self.meter_name_inlet_t,
period=self._period,
aggregate='avg')
power = self.ceilometer.statistic_aggregation(
resource_id=source_hypervisor.uuid,
resource_id=source_node.uuid,
meter_name=self.meter_name_power,
period=self._period,
aggregate='avg')
if (power < self.threshold_power and
inlet_t < self.threshold_inlet_t):
# hardware issue, migrate all vms from this hypervisor
for vm_id in source_vms:
# hardware issue, migrate all instances from this node
for instance_id in source_instances:
try:
vm = self.compute_model.get_vm_from_id(vm_id)
vms_tobe_migrate.append(vm)
instance = (self.compute_model.
get_instance_from_id(instance_id))
instances_tobe_migrate.append(instance)
except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id)
return source_hypervisor, vms_tobe_migrate
LOG.error(_LE("Instance not found; error: %s"),
instance_id)
return source_node, instances_tobe_migrate
else:
# migrate the first active vm
for vm_id in source_vms:
# migrate the first active instance
for instance_id in source_instances:
try:
vm = self.compute_model.get_vm_from_id(vm_id)
if vm.state != vm_state.VMState.ACTIVE.value:
LOG.info(_LI("VM not active; skipped: %s"),
vm.uuid)
instance = (self.compute_model.
get_instance_from_id(instance_id))
if (instance.state !=
element.InstanceState.ACTIVE.value):
LOG.info(
_LI("Instance not active, skipped: %s"),
instance.uuid)
continue
vms_tobe_migrate.append(vm)
return source_hypervisor, vms_tobe_migrate
instances_tobe_migrate.append(instance)
return source_node, instances_tobe_migrate
except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id)
LOG.error(_LE("Instance not found; error: %s"),
instance_id)
else:
LOG.info(_LI("VM not found on hypervisor: %s"),
source_hypervisor.uuid)
LOG.info(_LI("Instance not found on node: %s"),
source_node.uuid)
def filter_destination_hosts(self, hosts, vms_to_migrate):
"""Return vm and host with sufficient available resources"""
def filter_destination_hosts(self, hosts, instances_to_migrate):
"""Find instance and host with sufficient available resources"""
cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores)
element.ResourceType.cpu_cores)
cap_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk)
element.ResourceType.disk)
cap_mem = self.compute_model.get_resource_from_id(
resource.ResourceType.memory)
# large vm go first
vms_to_migrate = sorted(vms_to_migrate, reverse=True,
key=lambda x: (cap_cores.get_capacity(x)))
# find hosts for VMs
element.ResourceType.memory)
# large instance go first
instances_to_migrate = sorted(
instances_to_migrate, reverse=True,
key=lambda x: (cap_cores.get_capacity(x)))
# find hosts for instances
destination_hosts = []
for vm_to_migrate in vms_to_migrate:
required_cores = cap_cores.get_capacity(vm_to_migrate)
required_disk = cap_disk.get_capacity(vm_to_migrate)
required_mem = cap_mem.get_capacity(vm_to_migrate)
for instance_to_migrate in instances_to_migrate:
required_cores = cap_cores.get_capacity(instance_to_migrate)
required_disk = cap_disk.get_capacity(instance_to_migrate)
required_mem = cap_mem.get_capacity(instance_to_migrate)
dest_migrate_info = {}
for hvmap in hosts:
host = hvmap['hv']
if 'cores_used' not in hvmap:
for nodemap in hosts:
host = nodemap['node']
if 'cores_used' not in nodemap:
# calculate the available resources
hvmap['cores_used'], hvmap['mem_used'],\
hvmap['disk_used'] = self.calculate_used_resource(
nodemap['cores_used'], nodemap['mem_used'],\
nodemap['disk_used'] = self.calculate_used_resource(
host, cap_cores, cap_mem, cap_disk)
cores_available = (cap_cores.get_capacity(host) -
hvmap['cores_used'])
nodemap['cores_used'])
disk_available = (cap_disk.get_capacity(host) -
hvmap['disk_used'])
mem_available = cap_mem.get_capacity(host) - hvmap['mem_used']
nodemap['disk_used'])
mem_available = (
cap_mem.get_capacity(host) - nodemap['mem_used'])
if (cores_available >= required_cores and
disk_available >= required_disk and
mem_available >= required_mem):
dest_migrate_info['vm'] = vm_to_migrate
dest_migrate_info['hv'] = host
hvmap['cores_used'] += required_cores
hvmap['mem_used'] += required_mem
hvmap['disk_used'] += required_disk
dest_migrate_info['instance'] = instance_to_migrate
dest_migrate_info['node'] = host
nodemap['cores_used'] += required_cores
nodemap['mem_used'] += required_mem
nodemap['disk_used'] += required_disk
destination_hosts.append(dest_migrate_info)
break
# check if all vms have target hosts
if len(destination_hosts) != len(vms_to_migrate):
# check if all instances have target hosts
if len(destination_hosts) != len(instances_to_migrate):
LOG.warning(_LW("Not all target hosts could be found; it might "
"be because there is not enough resource"))
return None
@ -260,15 +267,15 @@ class UniformAirflow(base.BaseStrategy):
def group_hosts_by_airflow(self):
"""Group hosts based on airflow meters"""
hypervisors = self.compute_model.get_all_hypervisors()
if not hypervisors:
nodes = self.compute_model.get_all_compute_nodes()
if not nodes:
raise wexc.ClusterEmpty()
overload_hosts = []
nonoverload_hosts = []
for hypervisor_id in hypervisors:
hypervisor = self.compute_model.get_hypervisor_from_id(
hypervisor_id)
resource_id = hypervisor.uuid
for node_id in nodes:
node = self.compute_model.get_node_from_id(
node_id)
resource_id = node.uuid
airflow = self.ceilometer.statistic_aggregation(
resource_id=resource_id,
meter_name=self.meter_name_airflow,
@ -280,12 +287,12 @@ class UniformAirflow(base.BaseStrategy):
continue
LOG.debug("%s: airflow %f" % (resource_id, airflow))
hvmap = {'hv': hypervisor, 'airflow': airflow}
nodemap = {'node': node, 'airflow': airflow}
if airflow >= self.threshold_airflow:
# mark the hypervisor to release resources
overload_hosts.append(hvmap)
# mark the node to release resources
overload_hosts.append(nodemap)
else:
nonoverload_hosts.append(hvmap)
nonoverload_hosts.append(nodemap)
return overload_hosts, nonoverload_hosts
def pre_execute(self):
@ -299,49 +306,48 @@ class UniformAirflow(base.BaseStrategy):
self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
self.threshold_power = self.input_parameters.threshold_power
self._period = self.input_parameters.period
src_hypervisors, target_hypervisors = (
self.group_hosts_by_airflow())
source_nodes, target_nodes = self.group_hosts_by_airflow()
if not src_hypervisors:
if not source_nodes:
LOG.debug("No hosts require optimization")
return self.solution
if not target_hypervisors:
if not target_nodes:
LOG.warning(_LW("No hosts currently have airflow under %s, "
"therefore there are no possible target "
"hosts for any migration"),
self.threshold_airflow)
return self.solution
# migrate the vm from server with largest airflow first
src_hypervisors = sorted(src_hypervisors,
reverse=True,
key=lambda x: (x["airflow"]))
vms_to_migrate = self.choose_vm_to_migrate(src_hypervisors)
if not vms_to_migrate:
# migrate the instance from server with largest airflow first
source_nodes = sorted(source_nodes,
reverse=True,
key=lambda x: (x["airflow"]))
instances_to_migrate = self.choose_instance_to_migrate(source_nodes)
if not instances_to_migrate:
return self.solution
source_hypervisor, vms_src = vms_to_migrate
source_node, instances_src = instances_to_migrate
# sort host with airflow
target_hypervisors = sorted(target_hypervisors,
key=lambda x: (x["airflow"]))
# find the hosts that have enough resource for the VM to be migrated
destination_hosts = self.filter_destination_hosts(target_hypervisors,
vms_src)
target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"]))
# find the hosts that have enough resource
# for the instance to be migrated
destination_hosts = self.filter_destination_hosts(
target_nodes, instances_src)
if not destination_hosts:
LOG.warning(_LW("No target host could be found; it might "
"be because there is not enough resources"))
return self.solution
# generate solution to migrate the vm to the dest server,
# generate solution to migrate the instance to the dest server,
for info in destination_hosts:
vm_src = info['vm']
mig_dst_hypervisor = info['hv']
if self.compute_model.get_mapping().migrate_vm(
vm_src, source_hypervisor, mig_dst_hypervisor):
instance = info['instance']
destination_node = info['node']
if self.compute_model.mapping.migrate_instance(
instance, source_node, destination_node):
parameters = {'migration_type': 'live',
'src_hypervisor': source_hypervisor.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid}
'source_node': source_node.uuid,
'destination_node': destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid,
resource_id=instance.uuid,
input_parameters=parameters)
def post_execute(self):

View File

@ -24,9 +24,7 @@ from watcher._i18n import _, _LE, _LI
from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer \
as ceilometer_cluster_history
from watcher.decision_engine.model import hypervisor_state as hyper_state
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@ -48,26 +46,26 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* Offload phase - handling over-utilized resources
* Consolidation phase - handling under-utilized resources
* Solution optimization - reducing number of migrations
* Disability of unused hypervisors
* Disability of unused compute nodes
A capacity coefficients (cc) might be used to adjust optimization
thresholds. Different resources may require different coefficient
values as well as setting up different coefficient values in both
phases may lead to more efficient consolidation in the end.
phases may lead to to more efficient consolidation in the end.
If the cc equals 1 the full resource capacity may be used, cc
values lower than 1 will lead to resource under utilization and
values higher than 1 will lead to resource overbooking.
e.g. If targeted utilization is 80 percent of hypervisor capacity,
e.g. If targeted utilization is 80 percent of a compute node capacity,
the coefficient in the consolidation phase will be 0.8, but
may any lower value in the offloading phase. The lower it gets
the cluster will appear more released (distributed) for the
following consolidation phase.
As this strategy leverages VM live migration to move the load
from one hypervisor to another, this feature needs to be set up
correctly on all hypervisors within the cluster.
As this strategy laverages VM live migration to move the load
from one compute node to another, this feature needs to be set up
correctly on all compute nodes within the cluster.
This strategy assumes it is possible to live migrate any VM from
an active hypervisor to any other active hypervisor.
an active compute node to any other active compute node.
*Requirements*
@ -86,8 +84,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
super(VMWorkloadConsolidation, self).__init__(config, osc)
self._ceilometer = None
self.number_of_migrations = 0
self.number_of_released_hypervisors = 0
self.ceilometer_vm_data_cache = dict()
self.number_of_released_nodes = 0
self.ceilometer_instance_data_cache = dict()
@classmethod
def get_name(cls):
@ -119,200 +117,203 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
"""
if isinstance(state, six.string_types):
return state
elif isinstance(state, (vm_state.VMState,
hyper_state.HypervisorState)):
elif isinstance(state, (element.InstanceState, element.ServiceState)):
return state.value
else:
LOG.error(_LE('Unexpected resource state type, '
LOG.error(_LE('Unexpexted resource state type, '
'state=%(state)s, state_type=%(st)s.'),
state=state,
st=type(state))
raise exception.WatcherException
def add_action_enable_hypervisor(self, hypervisor):
"""Add an action for hypervisor enabler into the solution.
def add_action_enable_compute_node(self, node):
"""Add an action for node enabler into the solution.
:param hypervisor: hypervisor object
:param node: node object
:return: None
"""
params = {'state': hyper_state.HypervisorState.ENABLED.value}
params = {'state': element.ServiceState.ENABLED.value}
self.solution.add_action(
action_type='change_nova_service_state',
resource_id=hypervisor.uuid,
resource_id=node.uuid,
input_parameters=params)
self.number_of_released_hypervisors -= 1
self.number_of_released_nodes -= 1
def add_action_disable_hypervisor(self, hypervisor):
"""Add an action for hypervisor disablity into the solution.
def add_action_disable_node(self, node):
"""Add an action for node disablity into the solution.
:param hypervisor: hypervisor object
:param node: node object
:return: None
"""
params = {'state': hyper_state.HypervisorState.DISABLED.value}
params = {'state': element.ServiceState.DISABLED.value}
self.solution.add_action(
action_type='change_nova_service_state',
resource_id=hypervisor.uuid,
resource_id=node.uuid,
input_parameters=params)
self.number_of_released_hypervisors += 1
self.number_of_released_nodes += 1
def add_migration(self, vm_uuid, src_hypervisor,
dst_hypervisor, model):
def add_migration(self, instance_uuid, source_node,
destination_node, model):
"""Add an action for VM migration into the solution.
:param vm_uuid: vm uuid
:param src_hypervisor: hypervisor object
:param dst_hypervisor: hypervisor object
:param instance_uuid: instance uuid
:param source_node: node object
:param destination_node: node object
:param model: model_root object
:return: None
"""
vm = model.get_vm_from_id(vm_uuid)
instance = model.get_instance_from_id(instance_uuid)
vm_state_str = self.get_state_str(vm.state)
if vm_state_str != vm_state.VMState.ACTIVE.value:
instance_state_str = self.get_state_str(instance.state)
if instance_state_str != element.InstanceState.ACTIVE.value:
# Watcher curently only supports live VM migration and block live
# VM migration which both requires migrated VM to be active.
# When supported, the cold migration may be used as a fallback
# migration mechanism to move non active VMs.
LOG.error(_LE('Cannot live migrate: vm_uuid=%(vm_uuid)s, '
'state=%(vm_state)s.'),
vm_uuid=vm_uuid,
vm_state=vm_state_str)
LOG.error(
_LE('Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.'),
instance_uuid=instance_uuid,
instance_state=instance_state_str)
raise exception.WatcherException
migration_type = 'live'
dst_hyper_state_str = self.get_state_str(dst_hypervisor.state)
if dst_hyper_state_str == hyper_state.HypervisorState.DISABLED.value:
self.add_action_enable_hypervisor(dst_hypervisor)
model.get_mapping().unmap(src_hypervisor, vm)
model.get_mapping().map(dst_hypervisor, vm)
destination_node_state_str = self.get_state_str(destination_node.state)
if destination_node_state_str == element.ServiceState.DISABLED.value:
self.add_action_enable_compute_node(destination_node)
model.mapping.unmap(source_node, instance)
model.mapping.map(destination_node, instance)
params = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor.uuid,
'dst_hypervisor': dst_hypervisor.uuid}
'source_node': source_node.uuid,
'destination_node': destination_node.uuid}
self.solution.add_action(action_type='migrate',
resource_id=vm.uuid,
resource_id=instance.uuid,
input_parameters=params)
self.number_of_migrations += 1
def disable_unused_hypervisors(self, model):
"""Generate actions for disablity of unused hypervisors.
def disable_unused_nodes(self, model):
"""Generate actions for disablity of unused nodes.
:param model: model_root object
:return: None
"""
for hypervisor in model.get_all_hypervisors().values():
if (len(model.get_mapping().get_node_vms(hypervisor)) == 0 and
hypervisor.status !=
hyper_state.HypervisorState.DISABLED.value):
self.add_action_disable_hypervisor(hypervisor)
for node in model.get_all_compute_nodes().values():
if (len(model.mapping.get_node_instances(node)) == 0 and
node.status !=
element.ServiceState.DISABLED.value):
self.add_action_disable_node(node)
def get_vm_utilization(self, vm_uuid, model, period=3600, aggr='avg'):
def get_instance_utilization(self, instance_uuid, model,
period=3600, aggr='avg'):
"""Collect cpu, ram and disk utilization statistics of a VM.
:param vm_uuid: vm object
:param instance_uuid: instance object
:param model: model_root object
:param period: seconds
:param aggr: string
:return: dict(cpu(number of vcpus used), ram(MB used), disk(B used))
"""
if vm_uuid in self.ceilometer_vm_data_cache.keys():
return self.ceilometer_vm_data_cache.get(vm_uuid)
if instance_uuid in self.ceilometer_instance_data_cache.keys():
return self.ceilometer_instance_data_cache.get(instance_uuid)
cpu_util_metric = 'cpu_util'
ram_util_metric = 'memory.usage'
ram_alloc_metric = 'memory'
disk_alloc_metric = 'disk.root.size'
vm_cpu_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=cpu_util_metric,
instance_cpu_util = self.ceilometer.statistic_aggregation(
resource_id=instance_uuid, meter_name=cpu_util_metric,
period=period, aggregate=aggr)
vm_cpu_cores = model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(
model.get_vm_from_id(vm_uuid))
instance_cpu_cores = model.get_resource_from_id(
element.ResourceType.cpu_cores).get_capacity(
model.get_instance_from_id(instance_uuid))
if vm_cpu_util:
total_cpu_utilization = vm_cpu_cores * (vm_cpu_util / 100.0)
if instance_cpu_util:
total_cpu_utilization = (
instance_cpu_cores * (instance_cpu_util / 100.0))
else:
total_cpu_utilization = vm_cpu_cores
total_cpu_utilization = instance_cpu_cores
vm_ram_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=ram_util_metric,
instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=instance_uuid, meter_name=ram_util_metric,
period=period, aggregate=aggr)
if not vm_ram_util:
vm_ram_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=ram_alloc_metric,
if not instance_ram_util:
instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=instance_uuid, meter_name=ram_alloc_metric,
period=period, aggregate=aggr)
vm_disk_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=disk_alloc_metric,
instance_disk_util = self.ceilometer.statistic_aggregation(
resource_id=instance_uuid, meter_name=disk_alloc_metric,
period=period, aggregate=aggr)
if not vm_ram_util or not vm_disk_util:
if not instance_ram_util or not instance_disk_util:
LOG.error(
_LE('No values returned by %(resource_id)s '
'for memory.usage or disk.root.size'),
resource_id=vm_uuid
resource_id=instance_uuid
)
raise exception.NoDataFound
self.ceilometer_vm_data_cache[vm_uuid] = dict(
cpu=total_cpu_utilization, ram=vm_ram_util, disk=vm_disk_util)
return self.ceilometer_vm_data_cache.get(vm_uuid)
self.ceilometer_instance_data_cache[instance_uuid] = dict(
cpu=total_cpu_utilization, ram=instance_ram_util,
disk=instance_disk_util)
return self.ceilometer_instance_data_cache.get(instance_uuid)
def get_hypervisor_utilization(self, hypervisor, model, period=3600,
aggr='avg'):
"""Collect cpu, ram and disk utilization statistics of a hypervisor.
def get_node_utilization(self, node, model, period=3600, aggr='avg'):
"""Collect cpu, ram and disk utilization statistics of a node.
:param hypervisor: hypervisor object
:param node: node object
:param model: model_root object
:param period: seconds
:param aggr: string
:return: dict(cpu(number of cores used), ram(MB used), disk(B used))
"""
hypervisor_vms = model.get_mapping().get_node_vms_from_id(
hypervisor.uuid)
hypervisor_ram_util = 0
hypervisor_disk_util = 0
hypervisor_cpu_util = 0
for vm_uuid in hypervisor_vms:
vm_util = self.get_vm_utilization(vm_uuid, model, period, aggr)
hypervisor_cpu_util += vm_util['cpu']
hypervisor_ram_util += vm_util['ram']
hypervisor_disk_util += vm_util['disk']
node_instances = model.mapping.get_node_instances_from_id(
node.uuid)
node_ram_util = 0
node_disk_util = 0
node_cpu_util = 0
for instance_uuid in node_instances:
instance_util = self.get_instance_utilization(
instance_uuid, model, period, aggr)
node_cpu_util += instance_util['cpu']
node_ram_util += instance_util['ram']
node_disk_util += instance_util['disk']
return dict(cpu=hypervisor_cpu_util, ram=hypervisor_ram_util,
disk=hypervisor_disk_util)
return dict(cpu=node_cpu_util, ram=node_ram_util,
disk=node_disk_util)
def get_hypervisor_capacity(self, hypervisor, model):
"""Collect cpu, ram and disk capacity of a hypervisor.
def get_node_capacity(self, node, model):
"""Collect cpu, ram and disk capacity of a node.
:param hypervisor: hypervisor object
:param node: node object
:param model: model_root object
:return: dict(cpu(cores), ram(MB), disk(B))
"""
hypervisor_cpu_capacity = model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(hypervisor)
node_cpu_capacity = model.get_resource_from_id(
element.ResourceType.cpu_cores).get_capacity(node)
hypervisor_disk_capacity = model.get_resource_from_id(
resource.ResourceType.disk_capacity).get_capacity(hypervisor)
node_disk_capacity = model.get_resource_from_id(
element.ResourceType.disk_capacity).get_capacity(node)
hypervisor_ram_capacity = model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(hypervisor)
return dict(cpu=hypervisor_cpu_capacity, ram=hypervisor_ram_capacity,
disk=hypervisor_disk_capacity)
node_ram_capacity = model.get_resource_from_id(
element.ResourceType.memory).get_capacity(node)
return dict(cpu=node_cpu_capacity, ram=node_ram_capacity,
disk=node_disk_capacity)
def get_relative_hypervisor_utilization(self, hypervisor, model):
"""Return relative hypervisor utilization (rhu).
def get_relative_node_utilization(self, node, model):
"""Return relative node utilization (rhu).
:param hypervisor: hypervisor object
:param node: node object
:param model: model_root object
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
"""
rhu = {}
util = self.get_hypervisor_utilization(hypervisor, model)
cap = self.get_hypervisor_capacity(hypervisor, model)
util = self.get_node_utilization(node, model)
cap = self.get_node_capacity(node, model)
for k in util.keys():
rhu[k] = float(util[k]) / float(cap[k])
return rhu
@ -320,18 +321,18 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
def get_relative_cluster_utilization(self, model):
"""Calculate relative cluster utilization (rcu).
RCU is an average of relative utilizations (rhu) of active hypervisors.
RCU is an average of relative utilizations (rhu) of active nodes.
:param model: model_root object
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
"""
hypervisors = model.get_all_hypervisors().values()
nodes = model.get_all_compute_nodes().values()
rcu = {}
counters = {}
for hypervisor in hypervisors:
hyper_state_str = self.get_state_str(hypervisor.state)
if hyper_state_str == hyper_state.HypervisorState.ENABLED.value:
rhu = self.get_relative_hypervisor_utilization(
hypervisor, model)
for node in nodes:
node_state_str = self.get_state_str(node.state)
if node_state_str == element.ServiceState.ENABLED.value:
rhu = self.get_relative_node_utilization(
node, model)
for k in rhu.keys():
if k not in rcu:
rcu[k] = 0
@ -343,42 +344,43 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
rcu[k] /= counters[k]
return rcu
def is_overloaded(self, hypervisor, model, cc):
"""Indicate whether a hypervisor is overloaded.
def is_overloaded(self, node, model, cc):
"""Indicate whether a node is overloaded.
This considers provided resource capacity coefficients (cc).
:param hypervisor: hypervisor object
:param node: node object
:param model: model_root object
:param cc: dictionary containing resource capacity coefficients
:return: [True, False]
"""
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model)
hypervisor_utilization = self.get_hypervisor_utilization(
hypervisor, model)
node_capacity = self.get_node_capacity(node, model)
node_utilization = self.get_node_utilization(
node, model)
metrics = ['cpu']
for m in metrics:
if hypervisor_utilization[m] > hypervisor_capacity[m] * cc[m]:
if node_utilization[m] > node_capacity[m] * cc[m]:
return True
return False
def vm_fits(self, vm_uuid, hypervisor, model, cc):
"""Indicate whether is a hypervisor able to accommodate a VM.
def instance_fits(self, instance_uuid, node, model, cc):
"""Indicate whether is a node able to accommodate a VM.
This considers provided resource capacity coefficients (cc).
:param vm_uuid: string
:param hypervisor: hypervisor object
:param instance_uuid: string
:param node: node object
:param model: model_root object
:param cc: dictionary containing resource capacity coefficients
:return: [True, False]
"""
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model)
hypervisor_utilization = self.get_hypervisor_utilization(
hypervisor, model)
vm_utilization = self.get_vm_utilization(vm_uuid, model)
node_capacity = self.get_node_capacity(node, model)
node_utilization = self.get_node_utilization(
node, model)
instance_utilization = self.get_instance_utilization(
instance_uuid, model)
metrics = ['cpu', 'ram', 'disk']
for m in metrics:
if (vm_utilization[m] + hypervisor_utilization[m] >
hypervisor_capacity[m] * cc[m]):
if (instance_utilization[m] + node_utilization[m] >
node_capacity[m] * cc[m]):
return False
return True
@ -391,7 +393,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* A->B, B->C => replace migrations A->B, B->C with
a single migration A->C as both solution result in
VM running on hypervisor C which can be achieved with
VM running on node C which can be achieved with
one migration instead of two.
* A->B, B->A => remove A->B and B->A as they do not result
in a new VM placement.
@ -401,58 +403,59 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
migrate_actions = (
a for a in self.solution.actions if a[
'action_type'] == 'migrate')
vm_to_be_migrated = (a['input_parameters']['resource_id']
for a in migrate_actions)
vm_uuids = list(set(vm_to_be_migrated))
for vm_uuid in vm_uuids:
instance_to_be_migrated = (
a['input_parameters']['resource_id'] for a in migrate_actions)
instance_uuids = list(set(instance_to_be_migrated))
for instance_uuid in instance_uuids:
actions = list(
a for a in self.solution.actions if a[
'input_parameters'][
'resource_id'] == vm_uuid)
'resource_id'] == instance_uuid)
if len(actions) > 1:
src = actions[0]['input_parameters']['src_hypervisor']
dst = actions[-1]['input_parameters']['dst_hypervisor']
src = actions[0]['input_parameters']['source_node']
dst = actions[-1]['input_parameters']['destination_node']
for a in actions:
self.solution.actions.remove(a)
self.number_of_migrations -= 1
if src != dst:
self.add_migration(vm_uuid, src, dst, model)
self.add_migration(instance_uuid, src, dst, model)
def offload_phase(self, model, cc):
"""Perform offloading phase.
This considers provided resource capacity coefficients.
Offload phase performing first-fit based bin packing to offload
overloaded hypervisors. This is done in a fashion of moving
overloaded nodes. This is done in a fashion of moving
the least CPU utilized VM first as live migration these
generaly causes less troubles. This phase results in a cluster
with no overloaded hypervisors.
* This phase is be able to enable disabled hypervisors (if needed
with no overloaded nodes.
* This phase is be able to enable disabled nodes (if needed
and any available) in the case of the resource capacity provided by
active hypervisors is not able to accomodate all the load.
active nodes is not able to accomodate all the load.
As the offload phase is later followed by the consolidation phase,
the hypervisor enabler in this phase doesn't necessarily results
in more enabled hypervisors in the final solution.
the node enabler in this phase doesn't necessarily results
in more enabled nodes in the final solution.
:param model: model_root object
:param cc: dictionary containing resource capacity coefficients
"""
sorted_hypervisors = sorted(
model.get_all_hypervisors().values(),
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu'])
for hypervisor in reversed(sorted_hypervisors):
if self.is_overloaded(hypervisor, model, cc):
for vm in sorted(
model.get_mapping().get_node_vms(hypervisor),
key=lambda x: self.get_vm_utilization(
sorted_nodes = sorted(
model.get_all_compute_nodes().values(),
key=lambda x: self.get_node_utilization(x, model)['cpu'])
for node in reversed(sorted_nodes):
if self.is_overloaded(node, model, cc):
for instance in sorted(
model.mapping.get_node_instances(node),
key=lambda x: self.get_instance_utilization(
x, model)['cpu']
):
for dst_hypervisor in reversed(sorted_hypervisors):
if self.vm_fits(vm, dst_hypervisor, model, cc):
self.add_migration(vm, hypervisor,
dst_hypervisor, model)
for destination_node in reversed(sorted_nodes):
if self.instance_fits(
instance, destination_node, model, cc):
self.add_migration(instance, node,
destination_node, model)
break
if not self.is_overloaded(hypervisor, model, cc):
if not self.is_overloaded(node, model, cc):
break
def consolidation_phase(self, model, cc):
@ -460,8 +463,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
This considers provided resource capacity coefficients.
Consolidation phase performing first-fit based bin packing.
First, hypervisors with the lowest cpu utilization are consolidated
by moving their load to hypervisors with the highest cpu utilization
First, nodes with the lowest cpu utilization are consolidated
by moving their load to nodes with the highest cpu utilization
which can accomodate the load. In this phase the most cpu utilizied
VMs are prioritizied as their load is more difficult to accomodate
in the system than less cpu utilizied VMs which can be later used
@ -470,22 +473,23 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
:param model: model_root object
:param cc: dictionary containing resource capacity coefficients
"""
sorted_hypervisors = sorted(
model.get_all_hypervisors().values(),
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu'])
sorted_nodes = sorted(
model.get_all_compute_nodes().values(),
key=lambda x: self.get_node_utilization(x, model)['cpu'])
asc = 0
for hypervisor in sorted_hypervisors:
vms = sorted(model.get_mapping().get_node_vms(hypervisor),
key=lambda x: self.get_vm_utilization(x,
model)['cpu'])
for vm in reversed(vms):
dsc = len(sorted_hypervisors) - 1
for dst_hypervisor in reversed(sorted_hypervisors):
for node in sorted_nodes:
instances = sorted(
model.mapping.get_node_instances(node),
key=lambda x: self.get_instance_utilization(x, model)['cpu'])
for instance in reversed(instances):
dsc = len(sorted_nodes) - 1
for destination_node in reversed(sorted_nodes):
if asc >= dsc:
break
if self.vm_fits(vm, dst_hypervisor, model, cc):
self.add_migration(vm, hypervisor,
dst_hypervisor, model)
if self.instance_fits(
instance, destination_node, model, cc):
self.add_migration(instance, node,
destination_node, model)
break
dsc -= 1
asc += 1
@ -504,7 +508,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* Offload phase - handling over-utilized resources
* Consolidation phase - handling under-utilized resources
* Solution optimization - reducing number of migrations
* Disability of unused hypervisors
* Disability of unused nodes
:param original_model: root_model object
"""
@ -524,14 +528,14 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# Optimize solution
self.optimize_solution(model)
# disable unused hypervisors
self.disable_unused_hypervisors(model)
# disable unused nodes
self.disable_unused_nodes(model)
rcu_after = self.get_relative_cluster_utilization(model)
info = {
'number_of_migrations': self.number_of_migrations,
'number_of_released_hypervisors':
self.number_of_released_hypervisors,
'number_of_released_nodes':
self.number_of_released_nodes,
'relative_cluster_utilization_before': str(rcu),
'relative_cluster_utilization_after': str(rcu_after)
}
@ -542,5 +546,5 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# self.solution.efficacy = rcu_after['cpu']
self.solution.set_efficacy_indicators(
released_compute_nodes_count=self.number_of_migrations,
vm_migrations_count=self.number_of_released_hypervisors,
instance_migrations_count=self.number_of_released_nodes,
)

View File

@ -21,8 +21,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@ -37,7 +36,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
servers. It generates solutions to move a workload whenever a server's
CPU utilization % is higher than the specified threshold.
The VM to be moved should make the host close to average workload
of all hypervisors.
of all compute nodes.
*Requirements*
@ -115,78 +114,83 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
},
}
def calculate_used_resource(self, hypervisor, cap_cores, cap_mem,
def calculate_used_resource(self, node, cap_cores, cap_mem,
cap_disk):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
vms = self.compute_model.get_mapping().get_node_vms(hypervisor)
instances = self.compute_model.mapping.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for vm_id in vms:
vm = self.compute_model.get_vm_from_id(vm_id)
vcpus_used += cap_cores.get_capacity(vm)
memory_mb_used += cap_mem.get_capacity(vm)
disk_gb_used += cap_disk.get_capacity(vm)
for instance_id in instances:
instance = self.compute_model.get_instance_from_id(instance_id)
vcpus_used += cap_cores.get_capacity(instance)
memory_mb_used += cap_mem.get_capacity(instance)
disk_gb_used += cap_disk.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used
def choose_vm_to_migrate(self, hosts, avg_workload, workload_cache):
"""Pick up an active vm instance to migrate from provided hosts
def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache):
"""Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains hypervisor object
:param avg_workload: the average workload value of all hypervisors
:param workload_cache: the map contains vm to workload mapping
:param hosts: the array of dict which contains node object
:param avg_workload: the average workload value of all nodes
:param workload_cache: the map contains instance to workload mapping
"""
for hvmap in hosts:
source_hypervisor = hvmap['hv']
source_vms = self.compute_model.get_mapping().get_node_vms(
source_hypervisor)
if source_vms:
delta_workload = hvmap['workload'] - avg_workload
for instance_data in hosts:
source_node = instance_data['node']
source_instances = self.compute_model.mapping.get_node_instances(
source_node)
if source_instances:
delta_workload = instance_data['workload'] - avg_workload
min_delta = 1000000
instance_id = None
for vm_id in source_vms:
for inst_id in source_instances:
try:
# select the first active VM to migrate
vm = self.compute_model.get_vm_from_id(vm_id)
if vm.state != vm_state.VMState.ACTIVE.value:
LOG.debug("VM not active; skipped: %s",
vm.uuid)
instance = self.compute_model.get_instance_from_id(
inst_id)
if (instance.state !=
element.InstanceState.ACTIVE.value):
LOG.debug("Instance not active, skipped: %s",
instance.uuid)
continue
current_delta = delta_workload - workload_cache[vm_id]
current_delta = (
delta_workload - workload_cache[inst_id])
if 0 <= current_delta < min_delta:
min_delta = current_delta
instance_id = vm_id
instance_id = inst_id
except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id)
LOG.error(_LE("Instance not found; error: %s"),
instance_id)
if instance_id:
return (source_hypervisor,
self.compute_model.get_vm_from_id(instance_id))
return (source_node,
self.compute_model.get_instance_from_id(
instance_id))
else:
LOG.info(_LI("VM not found on hypervisor: %s"),
source_hypervisor.uuid)
LOG.info(_LI("VM not found from node: %s"),
source_node.uuid)
def filter_destination_hosts(self, hosts, vm_to_migrate,
def filter_destination_hosts(self, hosts, instance_to_migrate,
avg_workload, workload_cache):
'''Only return hosts with sufficient available resources'''
cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores)
element.ResourceType.cpu_cores)
cap_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk)
element.ResourceType.disk)
cap_mem = self.compute_model.get_resource_from_id(
resource.ResourceType.memory)
element.ResourceType.memory)
required_cores = cap_cores.get_capacity(vm_to_migrate)
required_disk = cap_disk.get_capacity(vm_to_migrate)
required_mem = cap_mem.get_capacity(vm_to_migrate)
required_cores = cap_cores.get_capacity(instance_to_migrate)
required_disk = cap_disk.get_capacity(instance_to_migrate)
required_mem = cap_mem.get_capacity(instance_to_migrate)
# filter hypervisors without enough resource
# filter nodes without enough resource
destination_hosts = []
src_vm_workload = workload_cache[vm_to_migrate.uuid]
for hvmap in hosts:
host = hvmap['hv']
workload = hvmap['workload']
src_instance_workload = workload_cache[instance_to_migrate.uuid]
for instance_data in hosts:
host = instance_data['node']
workload = instance_data['workload']
# calculate the available resources
cores_used, mem_used, disk_used = self.calculate_used_resource(
host, cap_cores, cap_mem, cap_disk)
@ -197,29 +201,29 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
cores_available >= required_cores and
disk_available >= required_disk and
mem_available >= required_mem and
(src_vm_workload + workload) < self.threshold / 100 *
(src_instance_workload + workload) < self.threshold / 100 *
cap_cores.get_capacity(host)
):
destination_hosts.append(hvmap)
destination_hosts.append(instance_data)
return destination_hosts
def group_hosts_by_cpu_util(self):
"""Calculate the workloads of each hypervisor
"""Calculate the workloads of each node
try to find out the hypervisors which have reached threshold
and the hypervisors which are under threshold.
and also calculate the average workload value of all hypervisors.
and also generate the VM workload map.
try to find out the nodes which have reached threshold
and the nodes which are under threshold.
and also calculate the average workload value of all nodes.
and also generate the instance workload map.
"""
hypervisors = self.compute_model.get_all_hypervisors()
cluster_size = len(hypervisors)
if not hypervisors:
nodes = self.compute_model.get_all_compute_nodes()
cluster_size = len(nodes)
if not nodes:
raise wexc.ClusterEmpty()
# get cpu cores capacity of hypervisors and vms
# get cpu cores capacity of nodes and instances
cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores)
element.ResourceType.cpu_cores)
overload_hosts = []
nonoverload_hosts = []
# total workload of cluster
@ -227,16 +231,16 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
cluster_workload = 0.0
# use workload_cache to store the workload of VMs for reuse purpose
workload_cache = {}
for hypervisor_id in hypervisors:
hypervisor = self.compute_model.get_hypervisor_from_id(
hypervisor_id)
vms = self.compute_model.get_mapping().get_node_vms(hypervisor)
hypervisor_workload = 0.0
for vm_id in vms:
vm = self.compute_model.get_vm_from_id(vm_id)
for node_id in nodes:
node = self.compute_model.get_node_from_id(
node_id)
instances = self.compute_model.mapping.get_node_instances(node)
node_workload = 0.0
for instance_id in instances:
instance = self.compute_model.get_instance_from_id(instance_id)
try:
cpu_util = self.ceilometer.statistic_aggregation(
resource_id=vm_id,
resource_id=instance_id,
meter_name=self._meter,
period=self._period,
aggregate='avg')
@ -245,24 +249,25 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
LOG.error(_LE("Can not get cpu_util from Ceilometer"))
continue
if cpu_util is None:
LOG.debug("VM (%s): cpu_util is None", vm_id)
LOG.debug("Instance (%s): cpu_util is None", instance_id)
continue
vm_cores = cap_cores.get_capacity(vm)
workload_cache[vm_id] = cpu_util * vm_cores / 100
hypervisor_workload += workload_cache[vm_id]
LOG.debug("VM (%s): cpu_util %f", vm_id, cpu_util)
hypervisor_cores = cap_cores.get_capacity(hypervisor)
hy_cpu_util = hypervisor_workload / hypervisor_cores * 100
instance_cores = cap_cores.get_capacity(instance)
workload_cache[instance_id] = cpu_util * instance_cores / 100
node_workload += workload_cache[instance_id]
LOG.debug("VM (%s): cpu_util %f", instance_id, cpu_util)
node_cores = cap_cores.get_capacity(node)
hy_cpu_util = node_workload / node_cores * 100
cluster_workload += hypervisor_workload
cluster_workload += node_workload
hvmap = {'hv': hypervisor, "cpu_util": hy_cpu_util, 'workload':
hypervisor_workload}
instance_data = {
'node': node, "cpu_util": hy_cpu_util,
'workload': node_workload}
if hy_cpu_util >= self.threshold:
# mark the hypervisor to release resources
overload_hosts.append(hvmap)
# mark the node to release resources
overload_hosts.append(instance_data)
else:
nonoverload_hosts.append(hvmap)
nonoverload_hosts.append(instance_data)
avg_workload = cluster_workload / cluster_size
@ -285,52 +290,52 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
"""
self.threshold = self.input_parameters.threshold
self._period = self.input_parameters.period
src_hypervisors, target_hypervisors, avg_workload, workload_cache = (
source_nodes, target_nodes, avg_workload, workload_cache = (
self.group_hosts_by_cpu_util())
if not src_hypervisors:
if not source_nodes:
LOG.debug("No hosts require optimization")
return self.solution
if not target_hypervisors:
if not target_nodes:
LOG.warning(_LW("No hosts current have CPU utilization under %s "
"percent, therefore there are no possible target "
"hosts for any migrations"),
"hosts for any migration"),
self.threshold)
return self.solution
# choose the server with largest cpu_util
src_hypervisors = sorted(src_hypervisors,
reverse=True,
key=lambda x: (x[self.METER_NAME]))
source_nodes = sorted(source_nodes,
reverse=True,
key=lambda x: (x[self.METER_NAME]))
vm_to_migrate = self.choose_vm_to_migrate(
src_hypervisors, avg_workload, workload_cache)
if not vm_to_migrate:
instance_to_migrate = self.choose_instance_to_migrate(
source_nodes, avg_workload, workload_cache)
if not instance_to_migrate:
return self.solution
source_hypervisor, vm_src = vm_to_migrate
source_node, instance_src = instance_to_migrate
# find the hosts that have enough resource for the VM to be migrated
destination_hosts = self.filter_destination_hosts(
target_hypervisors, vm_src, avg_workload, workload_cache)
target_nodes, instance_src, avg_workload, workload_cache)
# sort the filtered result by workload
# pick up the lowest one as dest server
if not destination_hosts:
LOG.warning(_LW("No target host could be found; it might "
"be because there is not enough CPU, memory "
"or disk"))
# for instance.
LOG.warning(_LW("No proper target host could be found, it might "
"be because of there's no enough CPU/Memory/DISK"))
return self.solution
destination_hosts = sorted(destination_hosts,
key=lambda x: (x["cpu_util"]))
# always use the host with lowerest CPU utilization
mig_dst_hypervisor = destination_hosts[0]['hv']
# generate solution to migrate the vm to the dest server,
if self.compute_model.get_mapping().migrate_vm(
vm_src, source_hypervisor, mig_dst_hypervisor):
mig_destination_node = destination_hosts[0]['node']
# generate solution to migrate the instance to the dest server,
if self.compute_model.mapping.migrate_instance(
instance_src, source_node, mig_destination_node):
parameters = {'migration_type': 'live',
'src_hypervisor': source_hypervisor.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid}
'source_node': source_node.uuid,
'destination_node': mig_destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid,
resource_id=instance_src.uuid,
input_parameters=parameters)
def post_execute(self):

View File

@ -30,8 +30,7 @@ from watcher._i18n import _LI, _
from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer as \
ceilometer_cluster_history
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@ -39,8 +38,8 @@ LOG = log.getLogger(__name__)
metrics = ['cpu_util', 'memory.resident']
thresholds_dict = {'cpu_util': 0.2, 'memory.resident': 0.2}
weights_dict = {'cpu_util_weight': 1.0, 'memory.resident_weight': 1.0}
vm_host_measures = {'cpu_util': 'hardware.cpu.util',
'memory.resident': 'hardware.memory.used'}
instance_host_measures = {'cpu_util': 'hardware.cpu.util',
'memory.resident': 'hardware.memory.used'}
ws_opts = [
cfg.ListOpt('metrics',
@ -154,73 +153,75 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
def ceilometer(self, c):
self._ceilometer = c
def transform_vm_cpu(self, vm_load, host_vcpus):
"""This method transforms vm cpu utilization to overall host cpu utilization.
def transform_instance_cpu(self, instance_load, host_vcpus):
"""Transform instance cpu utilization to overall host cpu utilization.
:param vm_load: dict that contains vm uuid and utilization info.
:param instance_load: dict that contains instance uuid and
utilization info.
:param host_vcpus: int
:return: float value
"""
return vm_load['cpu_util'] * (vm_load['vcpus'] / float(host_vcpus))
return (instance_load['cpu_util'] *
(instance_load['vcpus'] / float(host_vcpus)))
@MEMOIZE
def get_vm_load(self, vm_uuid):
"""Gathering vm load through ceilometer statistic.
def get_instance_load(self, instance_uuid):
"""Gathering instance load through ceilometer statistic.
:param vm_uuid: vm for which statistic is gathered.
:param instance_uuid: instance for which statistic is gathered.
:return: dict
"""
LOG.debug('get_vm_load started')
vm_vcpus = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_vm_from_id(vm_uuid))
vm_load = {'uuid': vm_uuid, 'vcpus': vm_vcpus}
LOG.debug('get_instance_load started')
instance_vcpus = self.compute_model.get_resource_from_id(
element.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_instance_from_id(instance_uuid))
instance_load = {'uuid': instance_uuid, 'vcpus': instance_vcpus}
for meter in self.metrics:
avg_meter = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid,
resource_id=instance_uuid,
meter_name=meter,
period="120",
aggregate='min'
)
if avg_meter is None:
raise exception.NoMetricValuesForVM(resource_id=vm_uuid,
metric_name=meter)
vm_load[meter] = avg_meter
return vm_load
raise exception.NoMetricValuesForInstance(
resource_id=instance_uuid, metric_name=meter)
instance_load[meter] = avg_meter
return instance_load
def normalize_hosts_load(self, hosts):
normalized_hosts = deepcopy(hosts)
for host in normalized_hosts:
if 'memory.resident' in normalized_hosts[host]:
h_memory = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(
self.compute_model.get_hypervisor_from_id(host))
element.ResourceType.memory).get_capacity(
self.compute_model.get_node_from_id(host))
normalized_hosts[host]['memory.resident'] /= float(h_memory)
return normalized_hosts
def get_hosts_load(self):
"""Get load of every host by gathering vms load"""
"""Get load of every host by gathering instances load"""
hosts_load = {}
for hypervisor_id in self.compute_model.get_all_hypervisors():
hosts_load[hypervisor_id] = {}
for node_id in self.compute_model.get_all_compute_nodes():
hosts_load[node_id] = {}
host_vcpus = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_hypervisor_from_id(hypervisor_id))
hosts_load[hypervisor_id]['vcpus'] = host_vcpus
element.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_node_from_id(node_id))
hosts_load[node_id]['vcpus'] = host_vcpus
for metric in self.metrics:
avg_meter = self.ceilometer.statistic_aggregation(
resource_id=hypervisor_id,
meter_name=vm_host_measures[metric],
resource_id=node_id,
meter_name=instance_host_measures[metric],
period="60",
aggregate='avg'
)
if avg_meter is None:
raise exception.NoSuchMetricForHost(
metric=vm_host_measures[metric],
host=hypervisor_id)
hosts_load[hypervisor_id][metric] = avg_meter
metric=instance_host_measures[metric],
host=node_id)
hosts_load[node_id][metric] = avg_meter
return hosts_load
def get_sd(self, hosts, meter_name):
@ -249,33 +250,34 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
" for %s in weight dict.") % metric)
return weighted_sd
def calculate_migration_case(self, hosts, vm_id, src_hp_id, dst_hp_id):
def calculate_migration_case(self, hosts, instance_id,
src_node_id, dst_node_id):
"""Calculate migration case
Return list of standard deviation values, that appearing in case of
migration of vm from source host to destination host
migration of instance from source host to destination host
:param hosts: hosts with their workload
:param vm_id: the virtual machine
:param src_hp_id: the source hypervisor id
:param dst_hp_id: the destination hypervisor id
:param instance_id: the virtual machine
:param src_node_id: the source node id
:param dst_node_id: the destination node id
:return: list of standard deviation values
"""
migration_case = []
new_hosts = deepcopy(hosts)
vm_load = self.get_vm_load(vm_id)
d_host_vcpus = new_hosts[dst_hp_id]['vcpus']
s_host_vcpus = new_hosts[src_hp_id]['vcpus']
instance_load = self.get_instance_load(instance_id)
d_host_vcpus = new_hosts[dst_node_id]['vcpus']
s_host_vcpus = new_hosts[src_node_id]['vcpus']
for metric in self.metrics:
if metric is 'cpu_util':
new_hosts[src_hp_id][metric] -= self.transform_vm_cpu(
vm_load,
new_hosts[src_node_id][metric] -= self.transform_instance_cpu(
instance_load,
s_host_vcpus)
new_hosts[dst_hp_id][metric] += self.transform_vm_cpu(
vm_load,
new_hosts[dst_node_id][metric] += self.transform_instance_cpu(
instance_load,
d_host_vcpus)
else:
new_hosts[src_hp_id][metric] -= vm_load[metric]
new_hosts[dst_hp_id][metric] += vm_load[metric]
new_hosts[src_node_id][metric] -= instance_load[metric]
new_hosts[dst_node_id][metric] += instance_load[metric]
normalized_hosts = self.normalize_hosts_load(new_hosts)
for metric in self.metrics:
migration_case.append(self.get_sd(normalized_hosts, metric))
@ -283,45 +285,46 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
return migration_case
def simulate_migrations(self, hosts):
"""Make sorted list of pairs vm:dst_host"""
def yield_hypervisors(hypervisors):
"""Make sorted list of pairs instance:dst_host"""
def yield_nodes(nodes):
ct = CONF['watcher_strategies.workload_stabilization'].retry_count
if self.host_choice == 'cycle':
for i in itertools.cycle(hypervisors):
for i in itertools.cycle(nodes):
yield [i]
if self.host_choice == 'retry':
while True:
yield random.sample(hypervisors, ct)
yield random.sample(nodes, ct)
if self.host_choice == 'fullsearch':
while True:
yield hypervisors
yield nodes
vm_host_map = []
for source_hp_id in self.compute_model.get_all_hypervisors():
hypervisors = list(self.compute_model.get_all_hypervisors())
hypervisors.remove(source_hp_id)
hypervisor_list = yield_hypervisors(hypervisors)
vms_id = self.compute_model.get_mapping(). \
get_node_vms_from_id(source_hp_id)
for vm_id in vms_id:
instance_host_map = []
for source_hp_id in self.compute_model.get_all_compute_nodes():
nodes = list(self.compute_model.get_all_compute_nodes())
nodes.remove(source_hp_id)
node_list = yield_nodes(nodes)
instances_id = self.compute_model.get_mapping(). \
get_node_instances_from_id(source_hp_id)
for instance_id in instances_id:
min_sd_case = {'value': len(self.metrics)}
vm = self.compute_model.get_vm_from_id(vm_id)
if vm.state not in [vm_state.VMState.ACTIVE.value,
vm_state.VMState.PAUSED.value]:
instance = self.compute_model.get_instance_from_id(instance_id)
if instance.state not in [element.InstanceState.ACTIVE.value,
element.InstanceState.PAUSED.value]:
continue
for dst_hp_id in next(hypervisor_list):
sd_case = self.calculate_migration_case(hosts, vm_id,
for dst_node_id in next(node_list):
sd_case = self.calculate_migration_case(hosts, instance_id,
source_hp_id,
dst_hp_id)
dst_node_id)
weighted_sd = self.calculate_weighted_sd(sd_case[:-1])
if weighted_sd < min_sd_case['value']:
min_sd_case = {'host': dst_hp_id, 'value': weighted_sd,
's_host': source_hp_id, 'vm': vm_id}
vm_host_map.append(min_sd_case)
min_sd_case = {
'host': dst_node_id, 'value': weighted_sd,
's_host': source_hp_id, 'instance': instance_id}
instance_host_map.append(min_sd_case)
break
return sorted(vm_host_map, key=lambda x: x['value'])
return sorted(instance_host_map, key=lambda x: x['value'])
def check_threshold(self):
"""Check if cluster is needed in balancing"""
@ -335,32 +338,32 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
def add_migration(self,
resource_id,
migration_type,
src_hypervisor,
dst_hypervisor):
source_node,
destination_node):
parameters = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor,
'dst_hypervisor': dst_hypervisor}
'source_node': source_node,
'destination_node': destination_node}
self.solution.add_action(action_type=self.MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def create_migration_vm(self, mig_vm, mig_src_hypervisor,
mig_dst_hypervisor):
def create_migration_instance(self, mig_instance, mig_source_node,
mig_destination_node):
"""Create migration VM """
if self.compute_model.get_mapping().migrate_vm(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor):
self.add_migration(mig_vm.uuid, 'live',
mig_src_hypervisor.uuid,
mig_dst_hypervisor.uuid)
if self.compute_model.get_mapping().migrate_instance(
mig_instance, mig_source_node, mig_destination_node):
self.add_migration(mig_instance.uuid, 'live',
mig_source_node.uuid,
mig_destination_node.uuid)
def migrate(self, vm_uuid, src_host, dst_host):
mig_vm = self.compute_model.get_vm_from_id(vm_uuid)
mig_src_hypervisor = self.compute_model.get_hypervisor_from_id(
def migrate(self, instance_uuid, src_host, dst_host):
mig_instance = self.compute_model.get_instance_from_id(instance_uuid)
mig_source_node = self.compute_model.get_node_from_id(
src_host)
mig_dst_hypervisor = self.compute_model.get_hypervisor_from_id(
mig_destination_node = self.compute_model.get_node_from_id(
dst_host)
self.create_migration_vm(mig_vm, mig_src_hypervisor,
mig_dst_hypervisor)
self.create_migration_instance(mig_instance, mig_source_node,
mig_destination_node)
def fill_solution(self):
self.solution.model = self.compute_model
@ -378,28 +381,29 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
hosts_load = self.get_hosts_load()
min_sd = 1
balanced = False
for vm_host in migration:
for instance_host in migration:
dst_hp_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(
self.compute_model.get_hypervisor_from_id(
vm_host['host']))
vm_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(
self.compute_model.get_vm_from_id(vm_host['vm']))
if vm_disk > dst_hp_disk:
element.ResourceType.disk).get_capacity(
self.compute_model.get_node_from_id(
instance_host['host']))
instance_disk = self.compute_model.get_resource_from_id(
element.ResourceType.disk).get_capacity(
self.compute_model.get_instance_from_id(
instance_host['instance']))
if instance_disk > dst_hp_disk:
continue
vm_load = self.calculate_migration_case(hosts_load,
vm_host['vm'],
vm_host['s_host'],
vm_host['host'])
weighted_sd = self.calculate_weighted_sd(vm_load[:-1])
instance_load = self.calculate_migration_case(
hosts_load, instance_host['instance'],
instance_host['s_host'], instance_host['host'])
weighted_sd = self.calculate_weighted_sd(instance_load[:-1])
if weighted_sd < min_sd:
min_sd = weighted_sd
hosts_load = vm_load[-1]
self.migrate(vm_host['vm'],
vm_host['s_host'], vm_host['host'])
hosts_load = instance_load[-1]
self.migrate(instance_host['instance'],
instance_host['s_host'],
instance_host['host'])
for metric, value in zip(self.metrics, vm_load[:-1]):
for metric, value in zip(self.metrics, instance_load[:-1]):
if value < float(self.thresholds[metric]):
balanced = True
break

View File

@ -23,7 +23,7 @@ from watcher.applier.actions import base as baction
from watcher.applier.actions import change_nova_service_state
from watcher.common import clients
from watcher.common import nova_helper
from watcher.decision_engine.model import hypervisor_state as hstate
from watcher.decision_engine.model import element
from watcher.tests import base
@ -52,7 +52,7 @@ class TestChangeNovaServiceState(base.TestCase):
self.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1",
"state": hstate.HypervisorState.ENABLED.value,
"state": element.ServiceState.ENABLED.value,
}
self.action = change_nova_service_state.ChangeNovaServiceState(
mock.Mock())
@ -61,13 +61,13 @@ class TestChangeNovaServiceState(base.TestCase):
def test_parameters_down(self):
self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.DISABLED.value}
self.action.STATE: element.ServiceState.DISABLED.value}
self.assertTrue(self.action.validate_parameters())
def test_parameters_up(self):
self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.ENABLED.value}
self.action.STATE: element.ServiceState.ENABLED.value}
self.assertTrue(self.action.validate_parameters())
def test_parameters_exception_wrong_state(self):
@ -82,7 +82,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_parameters_resource_id_empty(self):
self.action.input_parameters = {
self.action.STATE: hstate.HypervisorState.ENABLED.value,
self.action.STATE: element.ServiceState.ENABLED.value,
}
exc = self.assertRaises(
voluptuous.Invalid, self.action.validate_parameters)
@ -123,7 +123,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_execute_change_service_state_with_disable_target(self):
self.action.input_parameters["state"] = (
hstate.HypervisorState.DISABLED.value)
element.ServiceState.DISABLED.value)
self.action.execute()
self.m_helper_cls.assert_called_once_with(osc=self.m_osc)
@ -139,7 +139,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_revert_change_service_state_with_disable_target(self):
self.action.input_parameters["state"] = (
hstate.HypervisorState.DISABLED.value)
element.ServiceState.DISABLED.value)
self.action.revert()
self.m_helper_cls.assert_called_once_with(osc=self.m_osc)

View File

@ -54,8 +54,8 @@ class TestMigration(base.TestCase):
self.input_parameters = {
"migration_type": "live",
"src_hypervisor": "hypervisor1-hostname",
"dst_hypervisor": "hypervisor2-hostname",
"source_node": "compute1-hostname",
"destination_node": "compute2-hostname",
baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID,
}
self.action = migration.Migrate(mock.Mock())
@ -63,8 +63,8 @@ class TestMigration(base.TestCase):
self.input_parameters_cold = {
"migration_type": "cold",
"src_hypervisor": "hypervisor1-hostname",
"dst_hypervisor": "hypervisor2-hostname",
"source_node": "compute1-hostname",
"destination_node": "compute2-hostname",
baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID,
}
self.action_cold = migration.Migrate(mock.Mock())
@ -74,8 +74,8 @@ class TestMigration(base.TestCase):
params = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID,
self.action.MIGRATION_TYPE: 'live',
self.action.DST_HYPERVISOR: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'}
self.action.DESTINATION_NODE: 'compute-2',
self.action.SOURCE_NODE: 'compute-3'}
self.action.input_parameters = params
self.assertTrue(self.action.validate_parameters())
@ -83,31 +83,31 @@ class TestMigration(base.TestCase):
params = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID,
self.action.MIGRATION_TYPE: 'cold',
self.action.DST_HYPERVISOR: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'}
self.action.DESTINATION_NODE: 'compute-2',
self.action.SOURCE_NODE: 'compute-3'}
self.action_cold.input_parameters = params
self.assertTrue(self.action_cold.validate_parameters())
def test_parameters_exception_empty_fields(self):
parameters = {baction.BaseAction.RESOURCE_ID: None,
'migration_type': None,
'src_hypervisor': None,
'dst_hypervisor': None}
'source_node': None,
'destination_node': None}
self.action.input_parameters = parameters
exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual(
sorted([(['migration_type'], voluptuous.ScalarInvalid),
(['src_hypervisor'], voluptuous.TypeInvalid),
(['dst_hypervisor'], voluptuous.TypeInvalid)]),
(['source_node'], voluptuous.TypeInvalid),
(['destination_node'], voluptuous.TypeInvalid)]),
sorted([(e.path, type(e)) for e in exc.errors]))
def test_parameters_exception_migration_type(self):
parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID,
'migration_type': 'unknown',
'src_hypervisor': 'compute-2',
'dst_hypervisor': 'compute-3'}
'source_node': 'compute-2',
'destination_node': 'compute-3'}
self.action.input_parameters = parameters
exc = self.assertRaises(
voluptuous.Invalid, self.action.validate_parameters)
@ -115,37 +115,37 @@ class TestMigration(base.TestCase):
[(['migration_type'], voluptuous.ScalarInvalid)],
[(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_src_hypervisor(self):
def test_parameters_exception_source_node(self):
parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID,
'migration_type': 'live',
'src_hypervisor': None,
'dst_hypervisor': 'compute-3'}
'source_node': None,
'destination_node': 'compute-3'}
self.action.input_parameters = parameters
exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual(
[(['src_hypervisor'], voluptuous.TypeInvalid)],
[(['source_node'], voluptuous.TypeInvalid)],
[(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_dst_hypervisor(self):
def test_parameters_exception_destination_node(self):
parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID,
'migration_type': 'live',
'src_hypervisor': 'compute-1',
'dst_hypervisor': None}
'source_node': 'compute-1',
'destination_node': None}
self.action.input_parameters = parameters
exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual(
[(['dst_hypervisor'], voluptuous.TypeInvalid)],
[(['destination_node'], voluptuous.TypeInvalid)],
[(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_resource_id(self):
parameters = {baction.BaseAction.RESOURCE_ID: "EFEF",
'migration_type': 'live',
'src_hypervisor': 'compute-2',
'dst_hypervisor': 'compute-3'}
'source_node': 'compute-2',
'destination_node': 'compute-3'}
self.action.input_parameters = parameters
exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters)
@ -189,7 +189,7 @@ class TestMigration(base.TestCase):
self.m_helper.live_migrate_instance.assert_called_once_with(
instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname")
dest_hostname="compute2-hostname")
def test_execute_cold_migration(self):
self.m_helper.find_instance.return_value = self.INSTANCE_UUID
@ -202,7 +202,7 @@ class TestMigration(base.TestCase):
self.m_helper.watcher_non_live_migrate_instance.\
assert_called_once_with(
instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname"
dest_hostname="compute2-hostname"
)
def test_revert_live_migration(self):
@ -213,7 +213,7 @@ class TestMigration(base.TestCase):
self.m_helper_cls.assert_called_once_with(osc=self.m_osc)
self.m_helper.live_migrate_instance.assert_called_once_with(
instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor1-hostname"
dest_hostname="compute1-hostname"
)
def test_revert_cold_migration(self):
@ -225,7 +225,7 @@ class TestMigration(base.TestCase):
self.m_helper.watcher_non_live_migrate_instance.\
assert_called_once_with(
instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor1-hostname"
dest_hostname="compute1-hostname"
)
def test_live_migrate_non_shared_storage_instance(self):
@ -241,16 +241,16 @@ class TestMigration(base.TestCase):
self.m_helper.live_migrate_instance.assert_has_calls([
mock.call(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname"),
dest_hostname="compute2-hostname"),
mock.call(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname",
dest_hostname="compute2-hostname",
block_migration=True)
])
])
expected = [mock.call.first(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname"),
dest_hostname="compute2-hostname"),
mock.call.second(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname",
dest_hostname="compute2-hostname",
block_migration=True)
]
self.m_helper.live_migrate_instance.mock_calls == expected

View File

@ -56,7 +56,7 @@ class TestCeilometerHelper(base.BaseTestCase):
mock_ceilometer.return_value = ceilometer
cm = ceilometer_helper.CeilometerHelper()
val = cm.statistic_aggregation(
resource_id="VM_ID",
resource_id="INSTANCE_ID",
meter_name="cpu_util",
period="7300"
)

View File

@ -26,7 +26,7 @@ from watcher.common import clients
from watcher.tests import base
class TestClients(base.BaseTestCase):
class TestClients(base.TestCase):
def setUp(self):
super(TestClients, self).setUp()

View File

@ -36,8 +36,8 @@ class TestNovaHelper(base.TestCase):
def setUp(self):
super(TestNovaHelper, self).setUp()
self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe"
self.source_hypervisor = "ldev-indeedsrv005"
self.destination_hypervisor = "ldev-indeedsrv006"
self.source_node = "ldev-indeedsrv005"
self.destination_node = "ldev-indeedsrv006"
def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron,
mock_nova):
@ -71,7 +71,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.servers = mock.MagicMock()
nova_util.nova.servers.list.return_value = [server]
instance = nova_util.live_migrate_instance(
self.instance_uuid, self.destination_hypervisor
self.instance_uuid, self.destination_node
)
self.assertIsNotNone(instance)
@ -83,7 +83,7 @@ class TestNovaHelper(base.TestCase):
is_success = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid,
self.destination_hypervisor)
self.destination_node)
self.assertFalse(is_success)
@ -92,12 +92,12 @@ class TestNovaHelper(base.TestCase):
self, mock_glance, mock_cinder, mock_neutron, mock_nova):
nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock(id=self.instance_uuid)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
nova_util.nova.servers.list.return_value = [instance]
nova_util.nova.servers.find.return_value = instance
instance = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid,
self.destination_hypervisor)
self.destination_node)
self.assertIsNotNone(instance)
@mock.patch.object(time, 'sleep', mock.Mock())
@ -105,7 +105,7 @@ class TestNovaHelper(base.TestCase):
self, mock_glance, mock_cinder, mock_neutron, mock_nova):
nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock(id=self.instance_uuid)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
addresses = mock.MagicMock()
network_type = mock.MagicMock()
networks = []
@ -119,7 +119,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.servers.find.return_value = instance
instance = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid,
self.destination_hypervisor, keep_original_image_name=False)
self.destination_node, keep_original_image_name=False)
self.assertIsNotNone(instance)
@mock.patch.object(time, 'sleep', mock.Mock())
@ -128,7 +128,7 @@ class TestNovaHelper(base.TestCase):
nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock()
image = mock.MagicMock()
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
nova_util.nova.servers.list.return_value = [instance]
nova_util.nova.servers.find.return_value = instance
image_uuid = 'fake-image-uuid'

View File

@ -35,7 +35,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
def test_nova_cdmc_execute(self, m_nova_helper_cls):
m_nova_helper = mock.Mock()
m_nova_helper_cls.return_value = m_nova_helper
fake_hypervisor = mock.Mock(
fake_compute_node = mock.Mock(
service={'id': 123},
hypervisor_hostname='test_hostname',
memory_mb=333,
@ -45,19 +45,19 @@ class TestNovaClusterDataModelCollector(base.TestCase):
state='TEST_STATE',
status='TEST_STATUS',
)
fake_vm = mock.Mock(
fake_instance = mock.Mock(
id='ef500f7e-dac8-470f-960c-169486fce71b',
state=mock.Mock(**{'OS-EXT-STS:vm_state': 'VM_STATE'}),
state=mock.Mock(**{'OS-EXT-STS:instance_state': 'VM_STATE'}),
flavor={'ram': 333, 'disk': 222, 'vcpus': 4},
)
m_nova_helper.get_hypervisors_list.return_value = [fake_hypervisor]
m_nova_helper.get_vms_by_hypervisor.return_value = [fake_vm]
m_nova_helper.get_compute_node_list.return_value = [fake_compute_node]
m_nova_helper.get_instances_by_node.return_value = [fake_instance]
m_nova_helper.nova.services.find.return_value = mock.Mock(
host='test_hostname')
def m_get_flavor_instance(vm, cache):
vm.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4}
return vm
def m_get_flavor_instance(instance, cache):
instance.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4}
return instance
m_nova_helper.get_flavor_instance.side_effect = m_get_flavor_instance
@ -69,14 +69,14 @@ class TestNovaClusterDataModelCollector(base.TestCase):
model = nova_cdmc.execute()
hypervisors = model.get_all_hypervisors()
vms = model.get_all_vms()
compute_nodes = model.get_all_compute_nodes()
instances = model.get_all_instances()
self.assertEqual(1, len(hypervisors))
self.assertEqual(1, len(vms))
self.assertEqual(1, len(compute_nodes))
self.assertEqual(1, len(instances))
hypervisor = list(hypervisors.values())[0]
vm = list(vms.values())[0]
node = list(compute_nodes.values())[0]
instance = list(instances.values())[0]
self.assertEqual(hypervisor.uuid, 'test_hostname')
self.assertEqual(vm.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
self.assertEqual(node.uuid, 'test_hostname')
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')

View File

@ -17,13 +17,13 @@
# limitations under the License.
#
from watcher.decision_engine.model import disk_info
from watcher.decision_engine.model import element
from watcher.tests import base
class TestDiskInfo(base.BaseTestCase):
class TestDiskInfo(base.TestCase):
def test_all(self):
disk_information = disk_info.DiskInfo()
disk_information = element.DiskInfo()
disk_information.set_size(1024)
self.assertEqual(1024, disk_information.get_size())

View File

@ -16,15 +16,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.decision_engine.model import vm as vm_model
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.tests import base
class TestVm(base.BaseTestCase):
class TestInstance(base.TestCase):
def test_namedelement(self):
vm = vm_model.VM()
vm.state = vm_state.VMState.ACTIVE
self.assertEqual(vm_state.VMState.ACTIVE, vm.state)
vm.human_id = "human_05"
self.assertEqual("human_05", vm.human_id)
instance = element.Instance()
instance.state = element.InstanceState.ACTIVE
self.assertEqual(element.InstanceState.ACTIVE, instance.state)
instance.human_id = "human_05"
self.assertEqual("human_05", instance.human_id)

View File

@ -18,92 +18,99 @@
#
import uuid
from watcher.decision_engine.model import hypervisor as modelhyp
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies import \
faker_cluster_state
class TestMapping(base.BaseTestCase):
class TestMapping(base.TestCase):
VM1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
VM2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517"
INST1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
INST2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517"
def setUp(self):
super(TestMapping, self).setUp()
self.fake_cluster = faker_cluster_state.FakerModelCollector()
def test_get_node_from_vm(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_node_from_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms()
keys = list(vms.keys())
vm = vms[keys[0]]
if vm.uuid != self.VM1_UUID:
vm = vms[keys[1]]
node = model.mapping.get_node_from_vm(vm)
instances = model.get_all_instances()
keys = list(instances.keys())
instance = instances[keys[0]]
if instance.uuid != self.INST1_UUID:
instance = instances[keys[1]]
node = model.mapping.get_node_from_instance(instance)
self.assertEqual('Node_0', node.uuid)
def test_get_node_from_vm_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_node_from_instance_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
hyps = model.mapping.get_node_vms_from_id("BLABLABLA")
self.assertEqual(0, hyps.__len__())
nodes = model.mapping.get_node_instances_from_id("BLABLABLA")
self.assertEqual(0, len(nodes))
def test_get_all_vms(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_all_instances(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms()
self.assertEqual(2, vms.__len__())
self.assertEqual(vm_state.VMState.ACTIVE.value,
vms[self.VM1_UUID].state)
self.assertEqual(self.VM1_UUID, vms[self.VM1_UUID].uuid)
self.assertEqual(vm_state.VMState.ACTIVE.value,
vms[self.VM2_UUID].state)
self.assertEqual(self.VM2_UUID, vms[self.VM2_UUID].uuid)
instances = model.get_all_instances()
self.assertEqual(2, len(instances))
self.assertEqual(element.InstanceState.ACTIVE.value,
instances[self.INST1_UUID].state)
self.assertEqual(self.INST1_UUID, instances[self.INST1_UUID].uuid)
self.assertEqual(element.InstanceState.ACTIVE.value,
instances[self.INST2_UUID].state)
self.assertEqual(self.INST2_UUID, instances[self.INST2_UUID].uuid)
def test_get_mapping(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
mapping_vm = model.mapping.get_mapping_vm()
self.assertEqual(2, mapping_vm.__len__())
self.assertEqual('Node_0', mapping_vm[self.VM1_UUID])
self.assertEqual('Node_1', mapping_vm[self.VM2_UUID])
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instance_mapping = model.mapping.instance_mapping
self.assertEqual(2, len(instance_mapping))
self.assertEqual('Node_0', instance_mapping[self.INST1_UUID])
self.assertEqual('Node_1', instance_mapping[self.INST2_UUID])
def test_migrate_vm(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid)
vm1 = vms[keys[1]]
hyp1 = model.mapping.get_node_from_vm_id(vm1.uuid)
def test_migrate_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
instance1 = instances[keys[1]]
node1 = model.mapping.get_node_from_instance_id(instance1.uuid)
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp1, hyp1))
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp0, hyp0))
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp1, hyp0))
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp0, hyp1))
self.assertEqual(
False,
model.mapping.migrate_instance(instance1, node1, node1))
self.assertEqual(
False,
model.mapping.migrate_instance(instance1, node0, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node1, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node0, node1))
def test_unmap_from_id_log_warning(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
id = "{0}".format(uuid.uuid4())
hypervisor = modelhyp.Hypervisor()
hypervisor.uuid = id
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.mapping.unmap_from_id(hypervisor.uuid, vm0.uuid)
# self.assertEqual(len(model.mapping.get_node_vms_from_id(
# hypervisor.uuid)), 1)
model.mapping.unmap_from_id(node.uuid, instance0.uuid)
# self.assertEqual(len(model.mapping.get_node_instances_from_id(
# node.uuid)), 1)
def test_unmap_from_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid)
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
model.mapping.unmap_from_id(hyp0.uuid, vm0.uuid)
self.assertEqual(0, len(model.mapping.get_node_vms_from_id(
hyp0.uuid)))
model.mapping.unmap_from_id(node0.uuid, instance0.uuid)
self.assertEqual(0, len(model.mapping.get_node_instances_from_id(
node0.uuid)))

View File

@ -19,120 +19,107 @@
import uuid
from watcher.common import exception
from watcher.decision_engine.model import hypervisor as hypervisor_model
from watcher.decision_engine.model import hypervisor_state
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_state
class TestModel(base.BaseTestCase):
class TestModel(base.TestCase):
def test_model(self):
fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1()
self.assertEqual(5, len(model._hypervisors))
self.assertEqual(35, len(model._vms))
self.assertEqual(5, len(model.get_mapping().get_mapping()))
self.assertEqual(5, len(model._nodes))
self.assertEqual(35, len(model._instances))
self.assertEqual(5, len(model.mapping.get_mapping()))
def test_add_hypervisor(self):
def test_add_node(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertEqual(node, model.get_node_from_id(id_))
def test_delete_hypervisor(self):
def test_delete_node(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
model.remove_hypervisor(hypervisor)
self.assertRaises(exception.HypervisorNotFound,
model.get_hypervisor_from_id, id)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertEqual(node, model.get_node_from_id(id_))
model.remove_node(node)
self.assertRaises(exception.ComputeNodeNotFound,
model.get_node_from_id, id_)
def test_get_all_hypervisors(self):
def test_get_all_compute_nodes(self):
model = model_root.ModelRoot()
for i in range(10):
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
all_hypervisors = model.get_all_hypervisors()
for id in all_hypervisors:
hyp = model.get_hypervisor_from_id(id)
model.assert_hypervisor(hyp)
for _ in range(10):
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
all_nodes = model.get_all_compute_nodes()
for id_ in all_nodes:
node = model.get_node_from_id(id_)
model.assert_node(node)
def test_set_get_state_hypervisors(self):
def test_set_get_state_nodes(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertIsInstance(hypervisor.state,
hypervisor_state.HypervisorState)
self.assertIsInstance(node.state, element.ServiceState)
hyp = model.get_hypervisor_from_id(id)
hyp.state = hypervisor_state.HypervisorState.OFFLINE
self.assertIsInstance(hyp.state, hypervisor_state.HypervisorState)
node = model.get_node_from_id(id_)
node.state = element.ServiceState.OFFLINE
self.assertIsInstance(node.state, element.ServiceState)
# /watcher/decision_engine/framework/model/hypervisor.py
# set_state accept any char chain.
# verification (IsInstance) should be used in the function
# hyp.set_state('blablabla')
# self.assertEqual(hyp.get_state(), 'blablabla')
# self.assertIsInstance(hyp.get_state(), HypervisorState)
# def test_get_all_vms(self):
# model = ModelRoot()
# vms = model.get_all_vms()
# self.assert(len(model._vms))
def test_hypervisor_from_id_raise(self):
def test_node_from_id_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
id2 = "{0}".format(uuid.uuid4())
self.assertRaises(exception.HypervisorNotFound,
model.get_hypervisor_from_id, id2)
self.assertRaises(exception.ComputeNodeNotFound,
model.get_node_from_id, id2)
def test_remove_hypervisor_raise(self):
def test_remove_node_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
id2 = "{0}".format(uuid.uuid4())
hypervisor2 = hypervisor_model.Hypervisor()
hypervisor2.uuid = id2
node2 = element.ComputeNode()
node2.uuid = id2
self.assertRaises(exception.HypervisorNotFound,
model.remove_hypervisor, hypervisor2)
self.assertRaises(exception.ComputeNodeNotFound,
model.remove_node, node2)
def test_assert_hypervisor_raise(self):
def test_assert_node_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertRaises(exception.IllegalArgumentException,
model.assert_hypervisor, "objet_qcq")
model.assert_node, "objet_qcq")
def test_vm_from_id_raise(self):
def test_instance_from_id_raise(self):
fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1()
self.assertRaises(exception.InstanceNotFound,
model.get_vm_from_id, "valeur_qcq")
model.get_instance_from_id, "valeur_qcq")
def test_assert_vm_raise(self):
def test_assert_instance_raise(self):
model = model_root.ModelRoot()
self.assertRaises(exception.IllegalArgumentException,
model.assert_vm, "valeur_qcq")
model.assert_instance, "valeur_qcq")

View File

@ -1,32 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.decision_engine.model import compute_resource
from watcher.tests import base
class TestNamedElement(base.BaseTestCase):
def test_namedelement(self):
id = compute_resource.ComputeResource()
id.uuid = "BLABLABLA"
self.assertEqual("BLABLABLA", id.uuid)
def test_set_get_human_id(self):
id = compute_resource.ComputeResource()
id.human_id = "BLABLABLA"
self.assertEqual("BLABLABLA", id.human_id)

View File

@ -50,7 +50,7 @@ class SolutionFakerSingleHyp(object):
current_state_cluster = faker_cluster_state.FakerModelCollector()
sercon = strategies.BasicConsolidation(config=mock.Mock())
sercon._compute_model = (
current_state_cluster.generate_scenario_3_with_2_hypervisors())
current_state_cluster.generate_scenario_3_with_2_nodes())
sercon.ceilometer = mock.MagicMock(
get_statistics=metrics.mock_get_statistics)
@ -66,8 +66,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@ -93,8 +93,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@ -125,8 +125,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"src_uuid_node": "server1",
"dst_uuid_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",

View File

@ -20,13 +20,14 @@ from watcher.decision_engine.solution import default
from watcher.tests import base
class TestDefaultSolution(base.BaseTestCase):
class TestDefaultSolution(base.TestCase):
def test_default_solution(self):
solution = default.DefaultSolution(
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="nop",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@ -34,8 +35,8 @@ class TestDefaultSolution(base.BaseTestCase):
self.assertEqual(1, len(solution.actions))
expected_action_type = "nop"
expected_parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
"resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36"
}
self.assertEqual(expected_action_type,

View File

@ -20,11 +20,8 @@
import mock
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
from watcher.decision_engine.model import vm_state
class FakerModelCollector(base.BaseClusterDataModelCollector):
@ -38,17 +35,17 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1()
def generate_scenario_1(self):
"""Simulates cluster with 2 hypervisors and 2 VMs using 1:1 mapping"""
"""Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
current_state_cluster = modelroot.ModelRoot()
count_node = 2
count_vm = 2
count_instance = 2
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@ -57,7 +54,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'enabled'
@ -65,43 +62,43 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
current_state_cluster.add_vm(vm)
for i in range(0, count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_1"))
current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_instance_from_id("INSTANCE_1"))
return current_state_cluster
def generate_scenario_2(self):
"""Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor
With 4 nodes and 6 instances all mapped to a single node
"""
current_state_cluster = modelroot.ModelRoot()
count_node = 4
count_vm = 6
count_instance = 6
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@ -110,7 +107,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'up'
@ -118,39 +115,39 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 16)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
current_state_cluster.add_vm(vm)
for i in range(0, count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster
def generate_scenario_3(self):
"""Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor
With 4 nodes and 6 instances all mapped to one node
"""
current_state_cluster = modelroot.ModelRoot()
count_node = 2
count_vm = 4
count_instance = 4
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@ -159,7 +156,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'up'
@ -167,21 +164,21 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 10)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(6, 6 + count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 2 ** (i-6))
current_state_cluster.add_vm(vm)
for i in range(6, 6 + count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 2 ** (i-6))
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster
@ -193,76 +190,77 @@ class FakeCeilometerMetrics(object):
def mock_get_statistics(self, resource_id, meter_name, period=3600,
aggregate='avg'):
if meter_name == "compute.node.cpu.percent":
return self.get_hypervisor_cpu_util(resource_id)
return self.get_node_cpu_util(resource_id)
elif meter_name == "cpu_util":
return self.get_vm_cpu_util(resource_id)
return self.get_instance_cpu_util(resource_id)
elif meter_name == "memory.usage":
return self.get_vm_ram_util(resource_id)
return self.get_instance_ram_util(resource_id)
elif meter_name == "disk.root.size":
return self.get_vm_disk_root_size(resource_id)
return self.get_instance_disk_root_size(resource_id)
def get_hypervisor_cpu_util(self, r_id):
"""Calculates hypervisor utilization dynamicaly.
def get_node_cpu_util(self, r_id):
"""Calculates node utilization dynamicaly.
Hypervisor CPU utilization should consider
and corelate with actual VM-hypervisor mappings
node CPU utilization should consider
and corelate with actual instance-node mappings
provided within a cluster model.
Returns relative hypervisor CPU utilization <0, 100>.
Returns relative node CPU utilization <0, 100>.
:param r_id: resource id
"""
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
vms = self.model.get_mapping().get_node_vms_from_id(id)
instances = self.model.get_mapping().get_node_instances_from_id(id)
util_sum = 0.0
hypervisor_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity_from_id(id)
for vm_uuid in vms:
vm_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).\
get_capacity(self.model.get_vm_from_id(vm_uuid))
total_cpu_util = vm_cpu_cores * self.get_vm_cpu_util(vm_uuid)
node_cpu_cores = self.model.get_resource_from_id(
element.ResourceType.cpu_cores).get_capacity_from_id(id)
for instance_uuid in instances:
instance_cpu_cores = self.model.get_resource_from_id(
element.ResourceType.cpu_cores).\
get_capacity(self.model.get_instance_from_id(instance_uuid))
total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
instance_uuid)
util_sum += total_cpu_util / 100.0
util_sum /= hypervisor_cpu_cores
util_sum /= node_cpu_cores
return util_sum * 100.0
def get_vm_cpu_util(self, r_id):
vm_cpu_util = dict()
vm_cpu_util['VM_0'] = 10
vm_cpu_util['VM_1'] = 30
vm_cpu_util['VM_2'] = 60
vm_cpu_util['VM_3'] = 20
vm_cpu_util['VM_4'] = 40
vm_cpu_util['VM_5'] = 50
vm_cpu_util['VM_6'] = 100
vm_cpu_util['VM_7'] = 100
vm_cpu_util['VM_8'] = 100
vm_cpu_util['VM_9'] = 100
return vm_cpu_util[str(r_id)]
def get_instance_cpu_util(self, r_id):
instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30
instance_cpu_util['INSTANCE_2'] = 60
instance_cpu_util['INSTANCE_3'] = 20
instance_cpu_util['INSTANCE_4'] = 40
instance_cpu_util['INSTANCE_5'] = 50
instance_cpu_util['INSTANCE_6'] = 100
instance_cpu_util['INSTANCE_7'] = 100
instance_cpu_util['INSTANCE_8'] = 100
instance_cpu_util['INSTANCE_9'] = 100
return instance_cpu_util[str(r_id)]
def get_vm_ram_util(self, r_id):
vm_ram_util = dict()
vm_ram_util['VM_0'] = 1
vm_ram_util['VM_1'] = 2
vm_ram_util['VM_2'] = 4
vm_ram_util['VM_3'] = 8
vm_ram_util['VM_4'] = 3
vm_ram_util['VM_5'] = 2
vm_ram_util['VM_6'] = 1
vm_ram_util['VM_7'] = 2
vm_ram_util['VM_8'] = 4
vm_ram_util['VM_9'] = 8
return vm_ram_util[str(r_id)]
def get_instance_ram_util(self, r_id):
instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2
instance_ram_util['INSTANCE_2'] = 4
instance_ram_util['INSTANCE_3'] = 8
instance_ram_util['INSTANCE_4'] = 3
instance_ram_util['INSTANCE_5'] = 2
instance_ram_util['INSTANCE_6'] = 1
instance_ram_util['INSTANCE_7'] = 2
instance_ram_util['INSTANCE_8'] = 4
instance_ram_util['INSTANCE_9'] = 8
return instance_ram_util[str(r_id)]
def get_vm_disk_root_size(self, r_id):
vm_disk_util = dict()
vm_disk_util['VM_0'] = 10
vm_disk_util['VM_1'] = 15
vm_disk_util['VM_2'] = 30
vm_disk_util['VM_3'] = 35
vm_disk_util['VM_4'] = 20
vm_disk_util['VM_5'] = 25
vm_disk_util['VM_6'] = 25
vm_disk_util['VM_7'] = 25
vm_disk_util['VM_8'] = 25
vm_disk_util['VM_9'] = 25
return vm_disk_util[str(r_id)]
def get_instance_disk_root_size(self, r_id):
instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15
instance_disk_util['INSTANCE_2'] = 30
instance_disk_util['INSTANCE_3'] = 35
instance_disk_util['INSTANCE_4'] = 20
instance_disk_util['INSTANCE_5'] = 25
instance_disk_util['INSTANCE_6'] = 25
instance_disk_util['INSTANCE_7'] = 25
instance_disk_util['INSTANCE_8'] = 25
instance_disk_util['INSTANCE_9'] = 25
return instance_disk_util[str(r_id)]

View File

@ -19,10 +19,8 @@
import mock
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
class FakerModelCollector(base.BaseClusterDataModelCollector):
@ -36,292 +34,292 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1()
def generate_scenario_1(self):
vms = []
instances = []
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 5
# number max of vm per node
node_count_vm = 7
node_count = 5
# number max of instance per node
node_instance_count = 7
# total number of virtual machine
count_vm = (count_node * node_count_vm)
instance_count = (node_count * node_instance_count)
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
vms.append(vm)
current_state_cluster.add_vm(vm)
for i in range(0, instance_count):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
instances.append(instance)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_1"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_1"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_2"))
current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_instance_from_id("INSTANCE_2"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_3"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_3"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_4"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_4"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_5"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_5"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_3"),
current_state_cluster.get_vm_from_id("VM_6"))
current_state_cluster.get_node_from_id("Node_3"),
current_state_cluster.get_instance_from_id("INSTANCE_6"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_4"),
current_state_cluster.get_vm_from_id("VM_7"))
current_state_cluster.get_node_from_id("Node_4"),
current_state_cluster.get_instance_from_id("INSTANCE_7"))
return current_state_cluster
def map(self, model, h_id, vm_id):
def map(self, model, h_id, instance_id):
model.get_mapping().map(
model.get_hypervisor_from_id(h_id),
model.get_vm_from_id(vm_id))
model.get_node_from_id(h_id),
model.get_instance_from_id(instance_id))
def generate_scenario_3_with_2_hypervisors(self):
vms = []
def generate_scenario_3_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
node_count = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
root.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 10)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 10)
instances.append(instance1)
root.add_instance(instance1)
vm2 = modelvm.VM()
vm2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
return root
def generate_scenario_4_with_1_hypervisor_no_vm(self):
def generate_scenario_4_with_1_node_no_instance(self):
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 1
node_count = 1
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 1)
disk.set_capacity(node, 1)
num_cores.set_capacity(node, 1)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
return current_state_cluster
def generate_scenario_5_with_vm_disk_0(self):
vms = []
def generate_scenario_5_with_instance_disk_0(self):
instances = []
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 1
# number of vms
count_vm = 1
node_count = 1
# number of instances
instance_count = 1
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 4)
disk.set_capacity(node, 4)
num_cores.set_capacity(node, 4)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 0)
num_cores.set_capacity(vm, 4)
vms.append(vm)
current_state_cluster.add_vm(vm)
for i in range(0, instance_count):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 0)
num_cores.set_capacity(instance, 4)
instances.append(instance)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
return current_state_cluster
def generate_scenario_6_with_2_hypervisors(self):
vms = []
def generate_scenario_6_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
node_count = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
root.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "VM_1"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 10)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "INSTANCE_1"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 10)
instances.append(instance1)
root.add_instance(instance1)
vm11 = modelvm.VM()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2)
disk.set_capacity(vm11, 20)
num_cores.set_capacity(vm11, 10)
vms.append(vm11)
root.add_vm(vm11)
instance11 = element.Instance()
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance11, 2)
disk.set_capacity(instance11, 20)
num_cores.set_capacity(instance11, 10)
instances.append(instance11)
root.add_instance(instance11)
vm2 = modelvm.VM()
vm2.uuid = "VM_3"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "INSTANCE_3"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
vm21 = modelvm.VM()
vm21.uuid = "VM_4"
mem.set_capacity(vm21, 2)
disk.set_capacity(vm21, 20)
num_cores.set_capacity(vm21, 10)
vms.append(vm21)
root.add_vm(vm21)
instance21 = element.Instance()
instance21.uuid = "INSTANCE_4"
mem.set_capacity(instance21, 2)
disk.set_capacity(instance21, 20)
num_cores.set_capacity(instance21, 10)
instances.append(instance21)
root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance21.uuid)))
return root
def generate_scenario_7_with_2_hypervisors(self):
vms = []
def generate_scenario_7_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
@ -329,54 +327,54 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 50)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 15)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 15)
instances.append(instance1)
root.add_instance(instance1)
vm11 = modelvm.VM()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2)
disk.set_capacity(vm11, 20)
num_cores.set_capacity(vm11, 10)
vms.append(vm11)
root.add_vm(vm11)
instance11 = element.Instance()
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance11, 2)
disk.set_capacity(instance11, 20)
num_cores.set_capacity(instance11, 10)
instances.append(instance11)
root.add_instance(instance11)
vm2 = modelvm.VM()
vm2.uuid = "VM_3"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "INSTANCE_3"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
vm21 = modelvm.VM()
vm21.uuid = "VM_4"
mem.set_capacity(vm21, 2)
disk.set_capacity(vm21, 20)
num_cores.set_capacity(vm21, 10)
vms.append(vm21)
root.add_vm(vm21)
instance21 = element.Instance()
instance21.uuid = "INSTANCE_4"
mem.set_capacity(instance21, 2)
disk.set_capacity(instance21, 20)
num_cores.set_capacity(instance21, 10)
instances.append(instance21)
root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance21.uuid)))
return root

View File

@ -15,7 +15,6 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
@ -37,9 +36,9 @@ class FakerMetricsCollector(object):
elif meter_name == "hardware.memory.used":
result = self.get_usage_node_ram(resource_id)
elif meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu(resource_id)
result = self.get_average_usage_instance_cpu(resource_id)
elif meter_name == "memory.resident":
result = self.get_average_usage_vm_memory(resource_id)
result = self.get_average_usage_instance_memory(resource_id)
elif meter_name == "hardware.ipmi.node.outlet_temperature":
result = self.get_average_outlet_temperature(resource_id)
elif meter_name == "hardware.ipmi.node.airflow":
@ -54,7 +53,7 @@ class FakerMetricsCollector(object):
aggregate='avg'):
result = 0
if meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu_wb(resource_id)
result = self.get_average_usage_instance_cpu_wb(resource_id)
return result
def get_average_outlet_temperature(self, uuid):
@ -135,7 +134,7 @@ class FakerMetricsCollector(object):
mock['Node_6_hostname_6'] = 8
mock['Node_19_hostname_19'] = 10
# node 4
mock['VM_7_hostname_7'] = 4
mock['INSTANCE_7_hostname_7'] = 4
mock['Node_0'] = 0.07
mock['Node_1'] = 0.05
@ -149,7 +148,7 @@ class FakerMetricsCollector(object):
return float(mock[str(uuid)])
def get_average_usage_vm_cpu_wb(self, uuid):
def get_average_usage_instance_cpu_wb(self, uuid):
"""The last VM CPU usage values to average
:param uuid:00
@ -162,14 +161,14 @@ class FakerMetricsCollector(object):
# Normalize
mock = {}
# node 0
mock['VM_1'] = 80
mock['INSTANCE_1'] = 80
mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50
# node 1
mock['VM_3'] = 20
mock['VM_4'] = 10
mock['INSTANCE_3'] = 20
mock['INSTANCE_4'] = 10
return float(mock[str(uuid)])
def get_average_usage_vm_cpu(self, uuid):
def get_average_usage_instance_cpu(self, uuid):
"""The last VM CPU usage values to average
:param uuid:00
@ -182,66 +181,66 @@ class FakerMetricsCollector(object):
# Normalize
mock = {}
# node 0
mock['VM_0'] = 7
mock['VM_1'] = 7
mock['INSTANCE_0'] = 7
mock['INSTANCE_1'] = 7
# node 1
mock['VM_2'] = 10
mock['INSTANCE_2'] = 10
# node 2
mock['VM_3'] = 5
mock['VM_4'] = 5
mock['VM_5'] = 10
mock['INSTANCE_3'] = 5
mock['INSTANCE_4'] = 5
mock['INSTANCE_5'] = 10
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
mock[uuid] = 8
return mock[str(uuid)]
def get_average_usage_vm_memory(self, uuid):
def get_average_usage_instance_memory(self, uuid):
mock = {}
# node 0
mock['VM_0'] = 2
mock['VM_1'] = 5
mock['INSTANCE_0'] = 2
mock['INSTANCE_1'] = 5
# node 1
mock['VM_2'] = 5
mock['INSTANCE_2'] = 5
# node 2
mock['VM_3'] = 8
mock['VM_4'] = 5
mock['VM_5'] = 16
mock['INSTANCE_3'] = 8
mock['INSTANCE_4'] = 5
mock['INSTANCE_5'] = 16
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
mock[uuid] = 10
return mock[str(uuid)]
def get_average_usage_vm_disk(self, uuid):
def get_average_usage_instance_disk(self, uuid):
mock = {}
# node 0
mock['VM_0'] = 2
mock['VM_1'] = 2
mock['INSTANCE_0'] = 2
mock['INSTANCE_1'] = 2
# node 1
mock['VM_2'] = 2
mock['INSTANCE_2'] = 2
# node 2
mock['VM_3'] = 10
mock['VM_4'] = 15
mock['VM_5'] = 20
mock['INSTANCE_3'] = 10
mock['INSTANCE_4'] = 15
mock['INSTANCE_5'] = 20
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
@ -249,7 +248,7 @@ class FakerMetricsCollector(object):
return mock[str(uuid)]
def get_virtual_machine_capacity(self, vm_uuid):
def get_virtual_machine_capacity(self, instance_uuid):
return random.randint(1, 4)
def get_average_network_incomming(self, node):

View File

@ -30,7 +30,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestBasicConsolidation(base.BaseTestCase):
class TestBasicConsolidation(base.TestCase):
def setUp(self):
super(TestBasicConsolidation, self).setUp()
@ -58,63 +58,75 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_cluster_size(self):
size_cluster = len(
self.fake_cluster.generate_scenario_1().get_all_hypervisors())
self.fake_cluster.generate_scenario_1().get_all_compute_nodes())
size_cluster_assert = 5
self.assertEqual(size_cluster_assert, size_cluster)
def test_basic_consolidation_score_hypervisor(self):
def test_basic_consolidation_score_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
node_1_score = 0.023333333333333317
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_1")))
model.get_node_from_id("Node_1")))
node_2_score = 0.26666666666666666
self.assertEqual(node_2_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_2")))
model.get_node_from_id("Node_2")))
node_0_score = 0.023333333333333317
self.assertEqual(node_0_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_0")))
model.get_node_from_id("Node_0")))
def test_basic_consolidation_score_vm(self):
def test_basic_consolidation_score_instance(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
vm_0_score = 0.023333333333333317
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_0_score = 0.023333333333333317
self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0))
vm_1 = model.get_vm_from_id("VM_1")
vm_1_score = 0.023333333333333317
self.assertEqual(vm_1_score, self.strategy.calculate_score_vm(vm_1))
vm_2 = model.get_vm_from_id("VM_2")
vm_2_score = 0.033333333333333326
self.assertEqual(vm_2_score, self.strategy.calculate_score_vm(vm_2))
vm_6 = model.get_vm_from_id("VM_6")
vm_6_score = 0.02666666666666669
self.assertEqual(vm_6_score, self.strategy.calculate_score_vm(vm_6))
vm_7 = model.get_vm_from_id("VM_7")
vm_7_score = 0.013333333333333345
self.assertEqual(vm_7_score, self.strategy.calculate_score_vm(vm_7))
instance_1 = model.get_instance_from_id("INSTANCE_1")
instance_1_score = 0.023333333333333317
self.assertEqual(
instance_1_score,
self.strategy.calculate_score_instance(instance_1))
instance_2 = model.get_instance_from_id("INSTANCE_2")
instance_2_score = 0.033333333333333326
self.assertEqual(
instance_2_score,
self.strategy.calculate_score_instance(instance_2))
instance_6 = model.get_instance_from_id("INSTANCE_6")
instance_6_score = 0.02666666666666669
self.assertEqual(
instance_6_score,
self.strategy.calculate_score_instance(instance_6))
instance_7 = model.get_instance_from_id("INSTANCE_7")
instance_7_score = 0.013333333333333345
self.assertEqual(
instance_7_score,
self.strategy.calculate_score_instance(instance_7))
def test_basic_consolidation_score_vm_disk(self):
model = self.fake_cluster.generate_scenario_5_with_vm_disk_0()
def test_basic_consolidation_score_instance_disk(self):
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
vm_0_score = 0.023333333333333355
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0, ))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_0_score = 0.023333333333333355
self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0, ))
def test_basic_consolidation_weight(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
instance_0 = model.get_instance_from_id("INSTANCE_0")
cores = 16
# 80 Go
disk = 80
# mem 8 Go
mem = 8
vm_0_weight_assert = 3.1999999999999997
instance_0_weight_assert = 3.1999999999999997
self.assertEqual(
vm_0_weight_assert,
self.strategy.calculate_weight(vm_0, cores, disk, mem))
instance_0_weight_assert,
self.strategy.calculate_weight(instance_0, cores, disk, mem))
def test_calculate_migration_efficacy(self):
self.strategy.calculate_migration_efficacy()
@ -130,28 +142,28 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_check_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
all_vms = model.get_all_vms()
all_hyps = model.get_all_hypervisors()
vm0 = all_vms[list(all_vms.keys())[0]]
hyp0 = all_hyps[list(all_hyps.keys())[0]]
all_instances = model.get_all_instances()
all_nodes = model.get_all_compute_nodes()
instance0 = all_instances[list(all_instances.keys())[0]]
node0 = all_nodes[list(all_nodes.keys())[0]]
self.strategy.check_migration(hyp0, hyp0, vm0)
self.strategy.check_migration(node0, node0, instance0)
def test_threshold(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
all_hyps = model.get_all_hypervisors()
hyp0 = all_hyps[list(all_hyps.keys())[0]]
all_nodes = model.get_all_compute_nodes()
node0 = all_nodes[list(all_nodes.keys())[0]]
self.assertFalse(self.strategy.check_threshold(
hyp0, 1000, 1000, 1000))
node0, 1000, 1000, 1000))
def test_basic_consolidation_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
@ -163,10 +175,10 @@ class TestBasicConsolidation(base.BaseTestCase):
expected_power_state = 0
num_migrations = actions_counter.get("migrate", 0)
num_hypervisor_state_change = actions_counter.get(
"change_hypervisor_state", 0)
num_node_state_change = actions_counter.get(
"change_node_state", 0)
self.assertEqual(expected_num_migrations, num_migrations)
self.assertEqual(expected_power_state, num_hypervisor_state_change)
self.assertEqual(expected_power_state, num_node_state_change)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
@ -180,7 +192,7 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_execute_no_workload(self):
model = (
self.fake_cluster
.generate_scenario_4_with_1_hypervisor_no_vm())
.generate_scenario_4_with_1_node_no_instance())
self.m_model.return_value = model
with mock.patch.object(
@ -191,7 +203,7 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertEqual(0, solution.efficacy.global_efficacy.value)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@ -52,7 +52,7 @@ class TestDummyStrategy(base.TestCase):
self.assertEqual(3, len(solution.actions))
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'})

View File

@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestOutletTempControl(base.BaseTestCase):
class TestOutletTempControl(base.TestCase):
def setUp(self):
super(TestOutletTempControl, self).setUp()
@ -63,41 +63,41 @@ class TestOutletTempControl(base.BaseTestCase):
self.strategy.threshold = 34.3
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.strategy.calc_used_res(
hypervisor, cap_cores, cap_mem, cap_disk)
node, cap_cores, cap_mem, cap_disk)
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts_by_outlet_temp(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', h1[0]['hv'].uuid)
self.assertEqual('Node_0', h2[0]['hv'].uuid)
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', n1[0]['node'].uuid)
self.assertEqual('Node_0', n2[0]['node'].uuid)
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual('Node_1', vm_to_mig[0].uuid)
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual('Node_1', instance_to_mig[0].uuid)
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
vm_to_mig[1].uuid)
instance_to_mig[1].uuid)
def test_filter_dest_servers(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
dest_hosts = self.strategy.filter_dest_servers(h2, vm_to_mig[1])
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
self.assertEqual(1, len(dest_hosts))
self.assertEqual('Node_0', dest_hosts[0]['hv'].uuid)
self.assertEqual('Node_0', dest_hosts[0]['node'].uuid)
def test_exception_model(self):
self.m_model.return_value = None
@ -123,14 +123,14 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@ -140,7 +140,7 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertEqual(1, num_migrations)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestUniformAirflow(base.BaseTestCase):
class TestUniformAirflow(base.TestCase):
def setUp(self):
super(TestUniformAirflow, self).setUp()
@ -68,72 +68,73 @@ class TestUniformAirflow(base.BaseTestCase):
self._period = 300
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.\
strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk)
node, cap_cores, cap_mem, cap_disk)
self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40))
def test_group_hosts_by_airflow(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
h1, h2 = self.strategy.group_hosts_by_airflow()
# print h1, h2, avg, w_map
self.assertEqual(h1[0]['hv'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1')
n1, n2 = self.strategy.group_hosts_by_airflow()
# print n1, n2, avg, w_map
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 1)
self.assertEqual(vm_to_mig[1][0].uuid,
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(instance_to_mig[1]), 1)
self.assertEqual(instance_to_mig[1][0].uuid,
"cae81432-1631-4d4e-b29c-6f3acdcde906")
def test_choose_vm_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 2)
self.assertEqual(vm_to_mig[1][1].uuid,
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(instance_to_mig[1]), 2)
self.assertEqual(instance_to_mig[1][1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vms = model.get_all_vms()
vms.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertIsNone(vm_to_mig)
n1, n2 = self.strategy.group_hosts_by_airflow()
instances = model.get_all_instances()
instances.clear()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
dest_hosts = self.strategy.filter_destination_hosts(h2, vm_to_mig[1])
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_destination_hosts(
n2, instance_to_mig[1])
self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['vm'].uuid,
self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['instance'].uuid,
'cae81432-1631-4d4e-b29c-6f3acdcde906')
def test_exception_model(self):
@ -163,7 +164,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
@ -172,7 +173,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@ -182,7 +183,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.assertEqual(num_migrations, 2)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_and_metrics
class TestVMWorkloadConsolidation(base.BaseTestCase):
class TestVMWorkloadConsolidation(base.TestCase):
def setUp(self):
super(TestVMWorkloadConsolidation, self).setUp()
@ -65,41 +65,42 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_get_vm_utilization(self):
def test_get_instance_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
vm_0 = model.get_vm_from_id("VM_0")
vm_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(vm_util,
self.strategy.get_vm_utilization(vm_0.uuid, model))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(
instance_util,
self.strategy.get_instance_utilization(instance_0.uuid, model))
def test_get_hypervisor_utilization(self):
def test_get_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0")
node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(
node_util,
self.strategy.get_hypervisor_utilization(node_0, model))
self.strategy.get_node_utilization(node_0, model))
def test_get_hypervisor_capacity(self):
def test_get_node_capacity(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0")
node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=40, ram=64, disk=250)
self.assertEqual(node_util,
self.strategy.get_hypervisor_capacity(node_0, model))
self.strategy.get_node_capacity(node_0, model))
def test_get_relative_hypervisor_utilization(self):
def test_get_relative_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
hypervisor = model.get_hypervisor_from_id('Node_0')
rhu = self.strategy.get_relative_hypervisor_utilization(
hypervisor, model)
node = model.get_node_from_id('Node_0')
rhu = self.strategy.get_relative_node_utilization(
node, model)
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
self.assertEqual(expected_rhu, rhu)
@ -115,85 +116,85 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
self.strategy.add_migration(vm_uuid, h1, h2, model)
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
self.strategy.add_migration(instance_uuid, n1, n2, model)
self.assertEqual(1, len(self.strategy.solution.actions))
expected = {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2.uuid,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': vm_uuid}}
'resource_id': instance_uuid}}
self.assertEqual(expected, self.strategy.solution.actions[0])
def test_is_overloaded(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res)
cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertTrue(res)
def test_vm_fits(self):
def test_instance_fits(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
n = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertTrue(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertFalse(res)
def test_add_action_enable_hypervisor(self):
def test_add_action_enable_compute_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0')
self.strategy.add_action_enable_hypervisor(h)
n = model.get_node_from_id('Node_0')
self.strategy.add_action_enable_compute_node(n)
expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'enabled',
'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_action_disable_hypervisor(self):
def test_add_action_disable_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0')
self.strategy.add_action_disable_hypervisor(h)
n = model.get_node_from_id('Node_0')
self.strategy.add_action_disable_node(n)
expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_disable_unused_hypervisors(self):
def test_disable_unused_nodes(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
self.strategy.disable_unused_hypervisors(model)
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
self.strategy.disable_unused_nodes(model)
self.assertEqual(0, len(self.strategy.solution.actions))
# Migrate VM to free the hypervisor
self.strategy.add_migration(vm_uuid, h1, h2, model)
# Migrate VM to free the node
self.strategy.add_migration(instance_uuid, n1, n2, model)
self.strategy.disable_unused_hypervisors(model)
self.strategy.disable_unused_nodes(model)
expected = {'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}}
@ -213,39 +214,39 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.consolidation_phase(model, cc)
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2.uuid,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': vm_uuid}}]
'resource_id': instance_uuid}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_strategy(self):
model = self.fake_cluster.generate_scenario_2()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc)
self.strategy.consolidation_phase(model, cc)
self.strategy.optimize_solution(model)
h2 = self.strategy.solution.actions[0][
'input_parameters']['dst_hypervisor']
n2 = self.strategy.solution.actions[0][
'input_parameters']['destination_node']
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_3'}},
'resource_id': 'INSTANCE_3'}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_1'}}]
'resource_id': 'INSTANCE_1'}}]
self.assertEqual(expected, self.strategy.solution.actions)
@ -253,32 +254,32 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_3()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc)
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_6',
'src_hypervisor': h1.uuid}},
'resource_id': 'INSTANCE_6',
'source_node': n1.uuid}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_7',
'src_hypervisor': h1.uuid}},
'resource_id': 'INSTANCE_7',
'source_node': n1.uuid}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_8',
'src_hypervisor': h1.uuid}}]
'resource_id': 'INSTANCE_8',
'source_node': n1.uuid}}]
self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.consolidation_phase(model, cc)
expected.append({'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_7',
'src_hypervisor': h2.uuid}})
'resource_id': 'INSTANCE_7',
'source_node': n2.uuid}})
self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.optimize_solution(model)
del expected[3]

View File

@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestWorkloadBalance(base.BaseTestCase):
class TestWorkloadBalance(base.TestCase):
def setUp(self):
super(TestWorkloadBalance, self).setUp()
@ -59,59 +59,64 @@ class TestWorkloadBalance(base.BaseTestCase):
self.strategy = strategies.WorkloadBalance(config=mock.Mock())
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'threshold': 25.0,
'period': 300})
'period': 300})
self.strategy.threshold = 25.0
self.strategy._period = 300
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk))
node, cap_cores, cap_mem, cap_disk))
self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40))
def test_group_hosts_by_cpu_util(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold = 30
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
self.assertEqual(h1[0]['hv'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1')
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
self.assertEqual(avg, 8.0)
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(vm_to_mig[1].uuid,
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(instance_to_mig[1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vms = model.get_all_vms()
vms.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.assertIsNone(vm_to_mig)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instances = model.get_all_instances()
instances.clear()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.strategy.ceilometer = mock.MagicMock(
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
dest_hosts = self.strategy.filter_destination_hosts(
h2, vm_to_mig[1], avg, w_map)
n2, instance_to_mig[1], avg, w_map)
self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
def test_exception_model(self):
self.m_model.return_value = None
@ -137,13 +142,13 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@ -153,7 +158,7 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertEqual(num_migrations, 1)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestWorkloadStabilization(base.BaseTestCase):
class TestWorkloadStabilization(base.TestCase):
def setUp(self):
super(TestWorkloadStabilization, self).setUp()
@ -63,11 +63,13 @@ class TestWorkloadStabilization(base.BaseTestCase):
statistic_aggregation=self.fake_metrics.mock_get_statistics)
self.strategy = strategies.WorkloadStabilization(config=mock.Mock())
def test_get_vm_load(self):
def test_get_instance_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
vm_0_dict = {'uuid': 'VM_0', 'vcpus': 10,
'cpu_util': 7, 'memory.resident': 2}
self.assertEqual(vm_0_dict, self.strategy.get_vm_load("VM_0"))
instance_0_dict = {
'uuid': 'INSTANCE_0', 'vcpus': 10,
'cpu_util': 7, 'memory.resident': 2}
self.assertEqual(
instance_0_dict, self.strategy.get_instance_load("INSTANCE_0"))
def test_normalize_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
@ -109,7 +111,7 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.assertEqual(
self.strategy.calculate_migration_case(
self.hosts_load_assert, "VM_5",
self.hosts_load_assert, "INSTANCE_5",
"Node_2", "Node_1")[-1]["Node_1"],
{'cpu_util': 2.55, 'memory.resident': 21, 'vcpus': 40})
@ -131,20 +133,25 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2}
self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'}]
return_value=[
{'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'}]
)
with mock.patch.object(self.strategy, 'migrate') as mock_migration:
self.strategy.execute()
mock_migration.assert_called_once_with(
'VM_4', 'Node_2', 'Node_1')
'INSTANCE_4', 'Node_2', 'Node_1')
def test_execute_multiply_migrations(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.00001,
'memory.resident': 0.0001}
self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'},
{'vm': 'VM_3', 's_host': 'Node_2', 'host': 'Node_3'}]
return_value=[
{'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'},
{'instance': 'INSTANCE_3', 's_host': 'Node_2',
'host': 'Node_3'}]
)
with mock.patch.object(self.strategy, 'migrate') as mock_migrate:
self.strategy.execute()