summaryrefslogtreecommitdiff
path: root/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py
diff options
context:
space:
mode:
Diffstat (limited to 'watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py')
-rw-r--r--watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py198
1 files changed, 0 insertions, 198 deletions
diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py b/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py
deleted file mode 100644
index 8594e94..0000000
--- a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py
+++ /dev/null
@@ -1,198 +0,0 @@
1# -*- encoding: utf-8 -*-
2# Copyright (c) 2016 b<>com
3#
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14# implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18from __future__ import unicode_literals
19
20import functools
21
22from oslo_log import log
23from tempest import config
24from tempest.lib.common.utils import test_utils
25
26from watcher_tempest_plugin.tests.scenario import base
27
28CONF = config.CONF
29LOG = log.getLogger(__name__)
30
31
32class TestExecuteWorkloadBalancingStrategy(base.BaseInfraOptimScenarioTest):
33 """Tests for action plans"""
34
35 GOAL = "workload_balancing"
36
37 @classmethod
38 def skip_checks(cls):
39 super(TestExecuteWorkloadBalancingStrategy, cls).skip_checks()
40
41 @classmethod
42 def resource_setup(cls):
43 super(TestExecuteWorkloadBalancingStrategy, cls).resource_setup()
44 if CONF.compute.min_compute_nodes < 2:
45 raise cls.skipException(
46 "Less than 2 compute nodes, skipping multinode tests.")
47 if not CONF.compute_feature_enabled.live_migration:
48 raise cls.skipException("Live migration is not enabled")
49
50 cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup()
51 enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup
52 if cn.get('status') == 'enabled']
53
54 cls.wait_for_compute_node_setup()
55
56 if len(enabled_compute_nodes) < 2:
57 raise cls.skipException(
58 "Less than 2 compute nodes are enabled, "
59 "skipping multinode tests.")
60
61 @classmethod
62 def get_hypervisors_setup(cls):
63 hypervisors_client = cls.mgr.hypervisor_client
64 hypervisors = hypervisors_client.list_hypervisors(
65 detail=True)['hypervisors']
66 return hypervisors
67
68 @classmethod
69 def get_compute_nodes_setup(cls):
70 services_client = cls.mgr.services_client
71 available_services = services_client.list_services()['services']
72
73 return [srv for srv in available_services
74 if srv.get('binary') == 'nova-compute']
75
76 def _migrate_server_to(self, server_id, dest_host, volume_backed=False):
77 kwargs = dict()
78 kwargs['disk_over_commit'] = False
79 block_migration = (CONF.compute_feature_enabled.
80 block_migration_for_live_migration and
81 not volume_backed)
82 body = self.mgr.servers_client.live_migrate_server(
83 server_id, host=dest_host, block_migration=block_migration,
84 **kwargs)
85 return body
86
87 @classmethod
88 def wait_for_compute_node_setup(cls):
89
90 def _are_compute_nodes_setup():
91 try:
92 hypervisors = cls.get_hypervisors_setup()
93 available_hypervisors = set(
94 hyp['hypervisor_hostname'] for hyp in hypervisors
95 if hyp['state'] == 'up')
96 available_services = set(
97 service['host']
98 for service in cls.get_compute_nodes_setup()
99 if service['state'] == 'up')
100 return (
101 len(available_hypervisors) == len(available_services) and
102 len(hypervisors) >= 2)
103 except Exception as exc:
104 LOG.exception(exc)
105 return False
106
107 assert test_utils.call_until_true(
108 func=_are_compute_nodes_setup,
109 duration=600,
110 sleep_for=2
111 )
112
113 @classmethod
114 def rollback_compute_nodes_status(cls):
115 current_compute_nodes_setup = cls.get_compute_nodes_setup()
116 for cn_setup in current_compute_nodes_setup:
117 cn_hostname = cn_setup.get('host')
118 matching_cns = [
119 cns for cns in cls.initial_compute_nodes_setup
120 if cns.get('host') == cn_hostname
121 ]
122 initial_cn_setup = matching_cns[0] # Should return a single result
123 if cn_setup.get('status') != initial_cn_setup.get('status'):
124 if initial_cn_setup.get('status') == 'enabled':
125 rollback_func = cls.mgr.services_client.enable_service
126 else:
127 rollback_func = cls.mgr.services_client.disable_service
128 rollback_func(binary='nova-compute', host=cn_hostname)
129
130 def _create_one_instance_per_host(self):
131 """Create 1 instance per compute node
132
133 This goes up to the min_compute_nodes threshold so that things don't
134 get crazy if you have 1000 compute nodes but set min to 3.
135 """
136 host_client = self.mgr.hosts_client
137 all_hosts = host_client.list_hosts()['hosts']
138 compute_nodes = [x for x in all_hosts if x['service'] == 'compute']
139
140 created_instances = []
141 for _ in compute_nodes[:CONF.compute.min_compute_nodes]:
142 # by getting to active state here, this means this has
143 # landed on the host in question.
144 created_instances.append(
145 self.create_server(image_id=CONF.compute.image_ref,
146 wait_until='ACTIVE', clients=self.mgr))
147 return created_instances
148
149 def _pack_all_created_instances_on_one_host(self, instances):
150 hypervisors = [
151 hyp['hypervisor_hostname'] for hyp in self.get_hypervisors_setup()
152 if hyp['state'] == 'up']
153 node = hypervisors[0]
154 for instance in instances:
155 if instance.get('OS-EXT-SRV-ATTR:hypervisor_hostname') != node:
156 self._migrate_server_to(instance['id'], node)
157
158 def test_execute_workload_stabilization(self):
159 """Execute an action plan using the workload_stabilization strategy"""
160 self.addCleanup(self.rollback_compute_nodes_status)
161 instances = self._create_one_instance_per_host()
162 self._pack_all_created_instances_on_one_host(instances)
163
164 audit_parameters = {
165 "metrics": ["cpu_util"],
166 "thresholds": {"cpu_util": 0.2},
167 "weights": {"cpu_util_weight": 1.0},
168 "instance_metrics": {"cpu_util": "compute.node.cpu.percent"}}
169
170 _, goal = self.client.show_goal(self.GOAL)
171 _, strategy = self.client.show_strategy("workload_stabilization")
172 _, audit_template = self.create_audit_template(
173 goal['uuid'], strategy=strategy['uuid'])
174 _, audit = self.create_audit(
175 audit_template['uuid'], parameters=audit_parameters)
176
177 try:
178 self.assertTrue(test_utils.call_until_true(
179 func=functools.partial(
180 self.has_audit_finished, audit['uuid']),
181 duration=600,
182 sleep_for=2
183 ))
184 except ValueError:
185 self.fail("The audit has failed!")
186
187 _, finished_audit = self.client.show_audit(audit['uuid'])
188 if finished_audit.get('state') in ('FAILED', 'CANCELLED'):
189 self.fail("The audit ended in unexpected state: %s!" %
190 finished_audit.get('state'))
191
192 _, action_plans = self.client.list_action_plans(
193 audit_uuid=audit['uuid'])
194 action_plan = action_plans['action_plans'][0]
195
196 _, action_plan = self.client.show_action_plan(action_plan['uuid'])
197 _, action_list = self.client.list_actions(
198 action_plan_uuid=action_plan["uuid"])