RST directive to discover and generate drivers doc

This patchset introduces a new custom directive called 'drivers-doc'
which loads all available drivers under a given namespace and import
their respective docstring into the .rst document.

This patchset also contains some modification/addition to the
docstring of these drivers to make the final document complete.

Change-Id: Ib3df59fa45cea9d11d20fb73a5f0f1d564135bca
Closes-Bug: #1536218
Closes-Bug: #1536735
This commit is contained in:
Vincent Françoise 2016-02-11 14:49:19 +01:00
parent 02f0f8e70a
commit 98a65efb16
12 changed files with 323 additions and 69 deletions

View File

@ -0,0 +1,38 @@
..
Except where otherwise noted, this document is licensed under Creative
Commons Attribution 3.0 License. You can view the license at:
https://creativecommons.org/licenses/by/3.0/
=================
Available Plugins
=================
.. _watcher_strategies:
Strategies
==========
.. drivers-doc:: watcher_strategies
.. _watcher_actions:
Actions
=======
.. drivers-doc:: watcher_actions
.. _watcher_workflow_engines:
Workflow Engines
================
.. drivers-doc:: watcher_workflow_engines
.. _watcher_planners:
Planners
========
.. drivers-doc:: watcher_planners

View File

@ -70,6 +70,7 @@ Plugins
:maxdepth: 1
dev/strategy-plugin
dev/plugins
Admin Guide

View File

@ -27,6 +27,24 @@ from watcher.decision_engine.model import hypervisor_state as hstate
class ChangeNovaServiceState(base.BaseAction):
"""Disables or enables the nova-compute service, deployed on a host
By using this action, you will be able to update the state of a
nova-compute service. A disabled nova-compute service can not be selected
by the nova scheduler for future deployment of server.
The action schema is::
schema = Schema({
'resource_id': str,
'state': str,
})
The `resource_id` references a nova-compute service name (list of available
nova-compute services is returned by this command: ``nova service-list
--binary nova-compute``).
The `state` value should either be `ONLINE` or `OFFLINE`.
"""
STATE = 'state'

View File

@ -31,6 +31,32 @@ LOG = log.getLogger(__name__)
class Migrate(base.BaseAction):
"""Live-Migrates a server to a destination nova-compute host
This action will allow you to migrate a server to another compute
destination host. As of now, only live migration can be performed using
this action.
.. If either host uses shared storage, you can use ``live``
.. as ``migration_type``. If both source and destination hosts provide
.. local disks, you can set the block_migration parameter to True (not
.. supported for yet).
The action schema is::
schema = Schema({
'resource_id': str, # should be a UUID
'migration_type': str, # choices -> "live" only
'dst_hypervisor': str,
'src_hypervisor': str,
})
The `resource_id` is the UUID of the server to migrate. Only live migration
is supported.
The `src_hypervisor` and `dst_hypervisor` parameters are respectively the
source and the destination compute hostname (list of available compute
hosts is returned by this command: ``nova service-list --binary
nova-compute``).
"""
# input parameters constants
MIGRATION_TYPE = 'migration_type'

View File

@ -28,6 +28,16 @@ LOG = log.getLogger(__name__)
class Nop(base.BaseAction):
"""logs a message
The action schema is::
schema = Schema({
'message': str,
})
The `message` is the actual message that will be logged.
"""
MESSAGE = 'message'

View File

@ -28,6 +28,16 @@ LOG = log.getLogger(__name__)
class Sleep(base.BaseAction):
"""Makes the executor of the action plan wait for a given duration
The action schema is::
schema = Schema({
'duration': float,
})
The `duration` is expressed in seconds.
"""
DURATION = 'duration'

View File

@ -28,6 +28,12 @@ LOG = log.getLogger(__name__)
class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
"""Taskflow as a workflow engine for Watcher
Full documentation on taskflow at
http://docs.openstack.org/developer/taskflow/
"""
def decider(self, history):
# FIXME(jed) not possible with the current Watcher Planner
#

View File

@ -28,6 +28,13 @@ LOG = log.getLogger(__name__)
class DefaultPlanner(base.BasePlanner):
"""Default planner implementation
This implementation comes with basic rules with a fixed set of action types
that are weighted. An action having a lower weight will be scheduled before
the other ones.
"""
priorities = {
'nop': 0,
'sleep': 1,

View File

@ -16,6 +16,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
*Good server consolidation strategy*
Consolidation of VMs is essential to achieve energy optimization in cloud
environments such as OpenStack. As VMs are spinned up and/or moved over time,
it becomes necessary to migrate VMs among servers to lower the costs. However,
migration of VMs introduces runtime overheads and consumes extra energy, thus
a good server consolidation strategy should carefully plan for migration in
order to both minimize energy consumption and comply to the various SLAs.
"""
from oslo_log import log
@ -32,6 +42,29 @@ LOG = log.getLogger(__name__)
class BasicConsolidation(base.BaseStrategy):
"""Basic offline consolidation using live migration
*Description*
This is server consolidation algorithm which not only minimizes the overall
number of used servers, but also minimizes the number of migrations.
*Requirements*
* You must have at least 2 physical compute nodes to run this strategy.
*Limitations*
- It has been developed only for tests.
- It assumes that the virtual machine and the compute node are on the same
private network.
- It assume that live migrations are possible
*Spec URL*
<None>
"""
DEFAULT_NAME = "basic"
DEFAULT_DESCRIPTION = "Basic offline consolidation"
@ -45,31 +78,11 @@ class BasicConsolidation(base.BaseStrategy):
osc=None):
"""Basic offline Consolidation using live migration
The basic consolidation algorithm has several limitations.
It has been developed only for tests.
eg: The BasicConsolidation assumes that the virtual mahine and
the compute node are on the same private network.
Good Strategy :
The workloads of the VMs are changing over the time
and often tend to migrate from one physical machine to another.
Hence, the traditional and offline heuristics such as bin packing
are not applicable for the placement VM in cloud computing.
So, the decision Engine optimizer provides placement strategy considering
not only the performance effects but also the workload characteristics of
VMs and others metrics like the power consumption and
the tenants constraints (SLAs).
The watcher optimizer uses an online VM placement technique
based on machine learning and meta-heuristics that must handle :
- multi-objectives
- Contradictory objectives
- Adapt to changes dynamically
- Fast convergence
:param name: the name of the strategy
:param description: a description of the strategy
:param osc: an OpenStackClients object
:param name: The name of the strategy (Default: "basic")
:param description: The description of the strategy
(Default: "Basic offline consolidation")
:param osc: An :py:class:`~watcher.common.clients.OpenStackClients`
instance
"""
super(BasicConsolidation, self).__init__(name, description, osc)

View File

@ -24,6 +24,26 @@ LOG = log.getLogger(__name__)
class DummyStrategy(base.BaseStrategy):
"""Dummy strategy used for integration testing via Tempest
*Description*
This strategy does not provide any useful optimization. Indeed, its only
purpose is to be used by Tempest tests.
*Requirements*
<None>
*Limitations*
Do not use in production.
*Spec URL*
<None>
"""
DEFAULT_NAME = "dummy"
DEFAULT_DESCRIPTION = "Dummy Strategy"

View File

@ -16,6 +16,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
*Good Thermal Strategy*:
Towards to software defined infrastructure, the power and thermal
intelligences is being adopted to optimize workload, which can help
improve efficiency, reduce power, as well as to improve datacenter PUE
and lower down operation cost in data center.
Outlet (Exhaust Air) Temperature is one of the important thermal
telemetries to measure thermal/workload status of server.
"""
from oslo_log import log
from watcher._i18n import _LE
@ -30,6 +42,34 @@ LOG = log.getLogger(__name__)
class OutletTempControl(base.BaseStrategy):
"""[PoC] Outlet temperature control using live migration
*Description*
It is a migration strategy based on the outlet temperature of compute
hosts. It generates solutions to move a workload whenever a server's
outlet temperature is higher than the specified threshold.
*Requirements*
* Hardware: All computer hosts should support IPMI and PTAS technology
* Software: Ceilometer component ceilometer-agent-ipmi running
in each compute host, and Ceilometer API can report such telemetry
``hardware.ipmi.node.outlet_temperature`` successfully.
* You must have at least 2 physical compute hosts to run this strategy.
*Limitations*
- This is a proof of concept that is not meant to be used in production
- We cannot forecast how many servers should be migrated. This is the
reason why we only plan a single virtual machine migration at a time.
So it's better to use this algorithm with `CONTINUOUS` audits.
- It assume that live migrations are possible
*Spec URL*
https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/approved/outlet-temperature-based-strategy.rst
""" # noqa
DEFAULT_NAME = "outlet_temp_control"
DEFAULT_DESCRIPTION = "outlet temperature based migration strategy"
@ -42,29 +82,7 @@ class OutletTempControl(base.BaseStrategy):
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION,
osc=None):
"""[PoC]Outlet temperature control using live migration
It is a migration strategy based on the Outlet Temperature of physical
servers. It generates solutions to move a workload whenever a servers
outlet temperature is higher than the specified threshold. As of now,
we cannot forecast how many instances should be migrated. This is the
reason why we simply plan a single virtual machine migration.
So it's better to use this algorithm with CONTINUOUS audits.
Requirements:
* Hardware: computer node should support IPMI and PTAS technology
* Software: Ceilometer component ceilometer-agent-ipmi running
in each compute node, and Ceilometer API can report such telemetry
"hardware.ipmi.node.outlet_temperature" successfully.
* You must have at least 2 physical compute nodes to run this strategy.
Good Strategy:
Towards to software defined infrastructure, the power and thermal
intelligences is being adopted to optimize workload, which can help
improve efficiency, reduce power, as well as to improve datacenter PUE
and lower down operation cost in data center.
Outlet(Exhaust Air) Temperature is one of the important thermal
telemetries to measure thermal/workload status of server.
"""Outlet temperature control using live migration
:param name: the name of the strategy
:param description: a description of the strategy

View File

@ -17,21 +17,56 @@
from __future__ import unicode_literals
import importlib
import inspect
from docutils import nodes
from docutils.parsers import rst
from docutils import statemachine as sm
from docutils import statemachine
from stevedore import extension
from watcher.version import version_info
import textwrap
class BaseWatcherDirective(rst.Directive):
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
super(BaseWatcherDirective, self).__init__(
name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine)
self.result = statemachine.ViewList()
def run(self):
raise NotImplementedError('Must override run() is subclass.')
def add_line(self, line, *lineno):
"""Append one line of generated reST to the output."""
self.result.append(line, rst.directives.unchanged, *lineno)
def add_textblock(self, textblock):
for line in textblock.splitlines():
self.add_line(line)
def add_object_docstring(self, obj):
obj_raw_docstring = obj.__doc__ or ""
# Maybe it's within the __init__
if not obj_raw_docstring and hasattr(obj, "__init__"):
if obj.__init__.__doc__:
obj_raw_docstring = obj.__init__.__doc__
if not obj_raw_docstring:
# Raise a warning to make the tests fail wit doc8
raise self.error("No docstring available for this plugin!")
obj_docstring = inspect.cleandoc(obj_raw_docstring)
self.add_textblock(obj_docstring)
class WatcherTerm(rst.Directive):
class WatcherTerm(BaseWatcherDirective):
"""Directive to import an RST formatted docstring into the Watcher glossary
How to use it
-------------
**How to use it**
# inside your .py file
class DocumentedObject(object):
@ -47,17 +82,7 @@ class WatcherTerm(rst.Directive):
# You need to put an import path as an argument for this directive to work
required_arguments = 1
def add_textblock(self, textblock):
for line in textblock.splitlines():
self.add_line(line)
def add_line(self, line, *lineno):
"""Append one line of generated reST to the output."""
self.result.append(line, rst.directives.unchanged, *lineno)
def run(self):
self.result = sm.ViewList()
cls_path = self.arguments[0]
try:
@ -65,20 +90,82 @@ class WatcherTerm(rst.Directive):
except Exception as exc:
raise self.error(exc)
self.add_class_docstring(cls)
self.add_object_docstring(cls)
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
return node.children
def add_class_docstring(self, cls):
# Added 4 spaces to align the first line with the rest of the text
# to be able to dedent it correctly
cls_docstring = textwrap.dedent("%s%s" % (" " * 4, cls.__doc__))
self.add_textblock(cls_docstring)
class DriversDoc(BaseWatcherDirective):
"""Directive to import an RST formatted docstring into the Watcher doc
This directive imports the RST formatted docstring of every driver declared
within an entry point namespace provided as argument
**How to use it**
# inside your .py file
class DocumentedClassReferencedInEntrypoint(object):
'''My *.rst* docstring'''
def foo(self):
'''Foo docstring'''
# Inside your .rst file
.. drivers-doc:: entrypoint_namespace
:append_methods_doc: foo
This directive will then import the docstring and then interprete it.
Note that no section/sub-section can be imported via this directive as it
is a Sphinx restriction.
"""
# You need to put an import path as an argument for this directive to work
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = False
option_spec = dict(
# CSV formatted list of method names whose return values will be zipped
# together in the given order
append_methods_doc=lambda opts: [
opt.strip() for opt in opts.split(",") if opt.strip()],
# By default, we always start by adding the driver object docstring
exclude_driver_docstring=rst.directives.flag,
)
def run(self):
ext_manager = extension.ExtensionManager(namespace=self.arguments[0])
extensions = ext_manager.extensions
# Aggregates drivers based on their module name (i.e import path)
classes = [(ext.name, ext.plugin) for ext in extensions]
for name, cls in classes:
self.add_line(".. rubric:: %s" % name)
self.add_line("")
if "exclude_driver_docstring" not in self.options:
self.add_object_docstring(cls)
self.add_line("")
for method_name in self.options.get("append_methods_doc", []):
if hasattr(cls, method_name):
method = getattr(cls, method_name)
method_result = inspect.cleandoc(method)
self.add_textblock(method_result())
self.add_line("")
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
return node.children
def setup(app):
app.add_directive('drivers-doc', DriversDoc)
app.add_directive('watcher-term', WatcherTerm)
return {'version': version_info.version_string()}