Refactor global manager

Change-Id: I9e3fa343e9974576f816c5d4f625064b304969fa
This commit is contained in:
kong 2015-05-01 23:57:25 +08:00
parent 1d285abb17
commit 149b1ca0f7
65 changed files with 1931 additions and 2450 deletions

50
.gitignore vendored
View File

@ -1,7 +1,45 @@
*.pyc
.ropeproject
openstack_neat.egg-info
build
*.py[cod]
*.sqlite
# C extensions
*.so
# Packages
*.egg
*.egg-info
dist
distribute-*
.idea/
build
.venv
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
.coverage
.tox
nosetests.xml
cover/*
.testrepository/
subunit.log
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
.idea
.DS_Store
etc/*.conf
tools/lintstack.head.py
tools/pylint_exceptions

49
.pylintrc Normal file
View File

@ -0,0 +1,49 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
ignore=tests
ignore=openstack
[Messages Control]
# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future
# C0111: Don't require docstrings on every method
# W0511: TODOs in code comments are fine.
# W0142: *args and **kwargs are fine.
# W0622: Redefining id is fine.
# W0703: Catch "Exception".
disable=C0111,W0511,W0142,W0622,W0703
[Basic]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Type attributes names can be 2 to 31 characters long, with lowercase and underscores
attr-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names shold be at least 3 characters long and be lowercased with underscores
method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$
# Module names matching savanna-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(savanna-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[Design]
max-public-methods=100
min-public-methods=0
max-args=6
[Variables]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_
[TYPECHECK]
generated-members=query,node_template,status_code,data

9
.testr.conf Normal file
View File

@ -0,0 +1,9 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
${PYTHON:-python} -m subunit.run discover -t ./ ./mistral/tests/unit $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,11 +0,0 @@
language: python
python:
- 2.7
install:
- sudo apt-get update
- sudo apt-get install -qq python-libvirt python-numpy python-scipy
- cp /usr/lib/python2.7/dist-packages/libvirt* ~/virtualenv/python2.7/lib/python2.7/site-packages/
- cp -r /usr/lib/python2.7/dist-packages/numpy* ~/virtualenv/python2.7/lib/python2.7/site-packages/
- cp -r /usr/lib/python2.7/dist-packages/scipy* ~/virtualenv/python2.7/lib/python2.7/site-packages/
- pip install --use-mirrors pyqcy mocktest PyContracts nose SQLAlchemy bottle requests python-novaclient
script: nosetests

View File

@ -1,6 +0,0 @@
include README.rst
include LICENSE
include NOTICE
include distribute_setup.py
include init.d/*
include neat.conf

16
TODO
View File

@ -1,16 +0,0 @@
RPM package
1. python2 setup.py bdist_rpm
2. Added #!/usr/bin/python2 to start-*.py
3. cp start-data-collector.py /usr/bin/neat-data-collector
4. cp initscripts/* /etc/init.d/
5. cp neat.conf /etc/neat/neat.conf
RPM manuals:
https://fedoraproject.org/wiki/How_to_create_an_RPM_package
https://fedoraproject.org/wiki/Packaging:Guidelines
https://fedoraproject.org/wiki/Packaging:Python
http://fedoraproject.org/wiki/Packaging:SysVInitScript
http://docs.python.org/distutils/builtdist.html
http://stackoverflow.com/questions/2324933/creating-python-rpm

32
etc/logging.conf.sample Normal file
View File

@ -0,0 +1,32 @@
[loggers]
keys=root
[handlers]
keys=consoleHandler, fileHandler
[formatters]
keys=verboseFormatter, simpleFormatter
[logger_root]
level=DEBUG
handlers=consoleHandler, fileHandler
[handler_consoleHandler]
class=StreamHandler
level=INFO
formatter=simpleFormatter
args=(sys.stdout,)
[handler_fileHandler]
class=FileHandler
level=INFO
formatter=verboseFormatter
args=("/var/log/mistral.log",)
[formatter_verboseFormatter]
format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s
datefmt=
[formatter_simpleFormatter]
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=

View File

@ -1,19 +1,3 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the default configuration file for OpenStack Neat
[DEFAULT]
# The directory, where log files will be created by the Neat services
@ -66,15 +50,6 @@ local_data_directory = /var/lib/neat
# manager in seconds
local_manager_interval = 300
# The time interval between subsequent invocations of the data
# collector in seconds
data_collector_interval = 300
# The number of the latest data values stored locally by the data
# collector and passed to the underload / overload detection and VM
# placement algorithms
data_collector_data_length = 100
# The threshold on the overall (all cores) utilization of the physical
# CPU of a host, above which the host is considered to be overloaded.
# This is used for logging host overloads into the database.
@ -148,3 +123,17 @@ algorithm_vm_placement_factory = neat.globals.vm_placement.bin_packing.best_fit_
# A JSON encoded parameters, which will be parsed and passed to the
# specified VM placement algorithm factory
algorithm_vm_placement_parameters = {"cpu_threshold": 0.8, "ram_threshold": 0.95, "last_n_vm_cpu": 2}
[global_manager]
[local_manager]
[collector]
# The time interval between subsequent invocations of the data
# collector in seconds
data_collector_interval = 300
# The number of the latest data values stored locally by the data
# collector and passed to the underload / overload detection and VM
# placement algorithms
data_collector_data_length = 100

17
openstack-common.conf Normal file
View File

@ -0,0 +1,17 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
module=config.generator
module=log
module=jsonutils
module=lockutils
module=loopingcall
module=periodic_task
module=threadgroup
module=timeutils
module=importutils
module=strutils
module=uuidutils
# The base module to hold the copy of openstack.common
base=mistral

33
requirements.txt Normal file
View File

@ -0,0 +1,33 @@
alembic>=0.7.2
pbr>=0.6,!=0.7,<1.0
eventlet>=0.15.0
PyYAML>=3.1.0
pecan>=0.8.0
WSME>=0.6
amqplib>=0.6.1 # This is not in global requirements (master branch)
argparse
Babel>=1.3
iso8601>=0.1.9
posix_ipc
croniter>=0.3.4 # MIT License
requests>=1.2.1,!=2.4.0
kombu>=2.4.8
oslo.config>=1.4.0,<1.10.0 # Apache-2.0
oslo.db>=1.0.0 # Apache-2.0
oslo.messaging>=1.4.0
oslo.utils>=1.2.0,<1.5.0 # Apache-2.0
paramiko>=1.13.0
python-cinderclient>=1.1.0
python-heatclient>=0.2.9
python-keystoneclient>=0.10.0
python-neutronclient>=2.3.6,<3
python-novaclient>=2.18.0
python-glanceclient>=0.14.0
networkx>=1.8
six>=1.7.0
SQLAlchemy>=0.9.7,<=0.9.99
stevedore>=1.0.0 # Apache-2.0
yaql==0.2.4 # This is not in global requirements
jsonschema>=2.0.0,<3.0.0
mock>=1.0
keystonemiddleware>=1.0.0

226
run_tests.sh Executable file
View File

@ -0,0 +1,226 @@
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Mistral's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --virtual-env-path <path> Location of the virtualenv directory"
echo " Default: \$(pwd)"
echo " --virtual-env-name <name> Name of the virtualenv directory"
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
i=1
while [ $i -le $# ]; do
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
;;
--virtual-env-name)
(( i++ ))
venv_dir=${!i}
;;
--tools-path)
(( i++ ))
tools_path=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
debug=0
recreate_db=1
update=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
process_options $@
# Make our paths available to other scripts we call
export venv_path
export venv_dir
export venv_name
export tools_dir
export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ $debug -eq 1 ]; then
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testrargs="discover ./mistral/tests/unit"
fi
${wrapper} python -m testtools.run $testropts $testrargs
# Short circuit because all of the testr and coverage stuff
# below does not make sense when running testtools.run for
# debugging purposes.
return $?
fi
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
TESTRTESTS="$TESTRTESTS --slowest"
fi
# Just run the test suites in current environment
set +e
testrargs=$(echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/')
TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'"
OS_TEST_PATH=$(echo $testrargs|grep -o 'mistral\.tests[^[:space:]:]*\+'|tr . /)
if [ -d "$OS_TEST_PATH" ]; then
wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper"
elif [ -d "$(dirname $OS_TEST_PATH)" ]; then
wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper"
fi
echo "Running ${wrapper} $TESTRTESTS"
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
RESULT=$?
set -e
copy_subunit_log
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='mistral/*' --omit='mistral/openstack/common/*' -d covhtml -i
fi
return $RESULT
}
function copy_subunit_log {
LOGNAME=$(cat .testrepository/next-stream)
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function run_pep8 {
echo "Running flake8 ..."
${wrapper} flake8
}
TESTRTESTS="python -m mistral.openstack.common.lockutils python setup.py testr"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi

34
setup.cfg Normal file
View File

@ -0,0 +1,34 @@
[metadata]
name = terracotta
summary = Dynamic Scheduling Serice for OpenStack Cloud
description-file =
README.rst
license = Apache License, Version 2.0
home-page = https://launchpad.net/terracotta
classifiers =
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
#License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
author = Openstack Terracotta Team
author-email = openstack-dev@lists.openstack.org
[files]
packages =
terracotta
[build_sphinx]
source-dir = doc/source
build-dir = doc/build
all_files = 1
[upload_sphinx]
upload-dir = doc/build/html
[entry_points]
console_scripts =
terracotta-server = terracotta.cmd.launch:main

104
setup.py
View File

@ -1,100 +1,30 @@
# Copyright 2012 Anton Beloglazov
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenStack Neat Project
==========================
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
OpenStack Neat is a project intended to provide an extension to
OpenStack implementing dynamic consolidation of Virtual Machines (VMs)
using live migration. The major objective of dynamic VM consolidation
is to improve the utilization of physical resources and reduce energy
consumption by re-allocating VMs using live migration according to
their real-time resource demand and switching idle hosts to the sleep
mode. Apart from consolidating VMs, the system should be able to react
to increases in the resource demand and deconsolidate VMs when
necessary to avoid performance degradation. In general, the problem of
dynamic VM consolidation includes 4 sub-problems: host underload /
overload detection, VM selection, and VM placement.
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
This work is conducted within the Cloud Computing and Distributed
Systems (CLOUDS) Laboratory (http://www.cloudbus.org/) at the
University of Melbourne. The problem of dynamic VM consolidation
considering Quality of Service (QoS) constraints has been studied from
the theoretical perspective and algorithms addressing the sub-problems
listed above have been proposed [1], [2]. The algorithms have been
evaluated using CloudSim (http://code.google.com/p/cloudsim/) and
real-world workload traces collected from more than a thousand
PlanetLab VMs hosted on servers located in more than 500 places around
the world.
The aim of the OpenStack Neat project is to provide an extensible
framework for dynamic consolidation of VMs based on the OpenStack
platform. The framework should provide an infrastructure enabling the
interaction of components implementing the decision-making algorithms.
The framework should allow configuration-driven switching of different
implementations of the decision-making algorithms. The implementation
of the framework will include the algorithms proposed in our previous
works [1], [2].
[1] Anton Beloglazov and Rajkumar Buyya, "Optimal Online Deterministic
Algorithms and Adaptive Heuristics for Energy and Performance
Efficient Dynamic Consolidation of Virtual Machines in Cloud Data
Centers", Concurrency and Computation: Practice and Experience (CCPE),
Volume 24, Issue 13, Pages: 1397-1420, John Wiley & Sons, Ltd, New
York, USA, 2012. Download:
http://beloglazov.info/papers/2012-optimal-algorithms-ccpe.pdf
[2] Anton Beloglazov and Rajkumar Buyya, "Managing Overloaded Hosts
for Dynamic Consolidation of Virtual Machines in Cloud Data Centers
Under Quality of Service Constraints", IEEE Transactions on Parallel
and Distributed Systems (TPDS), IEEE CS Press, USA, 2012 (in press,
accepted on August 2, 2012). Download:
http://beloglazov.info/papers/2012-host-overload-detection-tpds.pdf
"""
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='openstack-neat',
version='0.1',
description='The OpenStack Neat Project',
long_description=__doc__,
author='Anton Beloglazov',
author_email='anton.beloglazov@gmail.com',
url='https://github.com/beloglazov/openstack-neat',
platforms='any',
include_package_data=True,
license='LICENSE',
packages=find_packages(),
test_suite='tests',
tests_require=['pyqcy', 'mocktest', 'PyContracts'],
entry_points = {
'console_scripts': [
'neat-data-collector = neat.locals.collector:start',
'neat-local-manager = neat.locals.manager:start',
'neat-global-manager = neat.globals.manager:start',
'neat-db-cleaner = neat.globals.db_cleaner:start',
]
},
data_files = [('/etc/init.d', ['init.d/openstack-neat-data-collector',
'init.d/openstack-neat-local-manager',
'init.d/openstack-neat-global-manager',
'init.d/openstack-neat-db-cleaner']),
('/etc/neat', ['neat.conf'])],
)
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

@ -1,7 +0,0 @@
1. Create a MySQL database and user for OpenStack Neat:
```
CREATE DATABASE neat;
GRANT ALL ON neat.* TO 'neat'@'controller' IDENTIFIED BY 'neatpassword';
GRANT ALL ON neat.* TO 'neat'@'%' IDENTIFIED BY 'neatpassword';
```

View File

@ -1,18 +0,0 @@
#!/bin/sh
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sudo pip2 install --upgrade pyqcy mocktest PyContracts SQLAlchemy bottle requests Sphinx python-novaclient
sudo pacman -S python2-numpy python2-scipy

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sudo yum install -y python-pip numpy scipy libvirt-python
sudo pip install --upgrade pyqcy PyContracts SQLAlchemy bottle requests Sphinx python-novaclient
sudo pip install mocktest

View File

@ -1,6 +0,0 @@
#!/bin/sh
chkconfig --add openstack-neat-data-collector
chkconfig --add openstack-neat-db-cleaner
chkconfig --add openstack-neat-global-manager
chkconfig --add openstack-neat-local-manager

View File

@ -25,22 +25,20 @@ eventlet.monkey_patch(
import os
# If ../mistral/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'mistral', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from wsgiref import simple_server
from mistral.api import app
from terracotta import config
from terracotta import rpc
from terracotta.locals import collector
from terracotta.locals import manager as local_mgr
from terracotta.globals import manager as global_mgr
from mistral import context as ctx
@ -55,41 +53,14 @@ from mistral import version
LOG = logging.getLogger(__name__)
def launch_executor(transport):
def launch_lm(transport):
target = messaging.Target(
topic=cfg.CONF.executor.topic,
server=cfg.CONF.executor.host
topic=cfg.CONF.local_manager.topic,
server=cfg.CONF.local_manager.host
)
executor_v2 = def_executor.DefaultExecutor(rpc.get_engine_client())
endpoints = [rpc.ExecutorServer(executor_v2)]
server = messaging.get_rpc_server(
transport,
target,
endpoints,
executor='eventlet',
serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
)
server.start()
server.wait()
def launch_engine(transport):
target = messaging.Target(
topic=cfg.CONF.engine.topic,
server=cfg.CONF.engine.host
)
engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client())
endpoints = [rpc.EngineServer(engine_v2)]
# Setup scheduler in engine.
db_api.setup_db()
scheduler.setup()
local_manager = local_mgr.LocalManager()
endpoints = [rpc.LocalManagerServer(local_manager)]
server = messaging.get_rpc_server(
transport,
@ -109,13 +80,8 @@ def launch_gm(transport):
server=cfg.CONF.global_manager.host
)
engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client())
endpoints = [rpc.EngineServer(engine_v2)]
# Setup scheduler in engine.
db_api.setup_db()
scheduler.setup()
global_manager = global_mgr.GlobalManager()
endpoints = [rpc.GlobalManagerServer(global_manager)]
server = messaging.get_rpc_server(
transport,
@ -129,24 +95,28 @@ def launch_gm(transport):
server.wait()
def launch_api(transport):
host = cfg.CONF.api.host
port = cfg.CONF.api.port
server = simple_server.make_server(
host,
port,
app.setup_app()
def launch_collector(transport):
target = messaging.Target(
topic=cfg.CONF.local_collector.topic,
server=cfg.CONF.local_collector.host
)
LOG.info("Mistral API is serving on http://%s:%s (PID=%s)" %
(host, port, os.getpid()))
global_manager = collector.Collector()
endpoints = [rpc.GlobalManagerServer(global_manager)]
server.serve_forever()
server = messaging.get_rpc_server(
transport,
target,
endpoints,
executor='eventlet',
serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
)
server.start()
server.wait()
def launch_any(transport, options):
# Launch the servers on different threads.
threads = [eventlet.spawn(LAUNCH_OPTIONS[option], transport)
for option in options]
@ -156,7 +126,6 @@ def launch_any(transport, options):
LAUNCH_OPTIONS = {
# 'api': launch_api,
'global-manager': launch_gm,
'local-collector': launch_collector,
'local-manager': launch_lm
@ -175,7 +144,7 @@ Terracotta Dynamic Scheduling Service, version %s
""" % version.version_string()
def print_server_info():
def print_service_info():
print(TERRACOTTA_TITLE)
comp_str = ("[%s]" % ','.join(LAUNCH_OPTIONS)
@ -187,16 +156,10 @@ def print_server_info():
def main():
try:
config.parse_args()
print_server_info()
print_service_info()
logging.setup(cfg.CONF, 'Terracotta')
transport = rpc.get_transport()
# Validate launch option.
if set(cfg.CONF.server) - set(LAUNCH_OPTIONS.keys()):
raise Exception('Valid options are all or any combination of '
'api, engine, and executor.')
# Launch distinct set of server(s).
launch_any(transport, set(cfg.CONF.server))
except RuntimeError as excp:

View File

@ -1,10 +1,11 @@
# Copyright 2012 Anton Beloglazov
# Copyright 2015 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -14,60 +15,17 @@
""" The functions from this module are shared by other components.
"""
import json
import numpy
import os
import re
import subprocess
import time
from contracts import contract
from neat.contracts_primitive import *
from neat.contracts_extra import *
import os
import time
import json
import re
import numpy
import subprocess
from neat.config import *
from neat.db_utils import *
import logging
log = logging.getLogger(__name__)
@contract
def start(init_state, execute, config, time_interval, iterations=-1):
""" Start the processing loop.
:param init_state: A function accepting a config and
returning a state dictionary.
:type init_state: function
:param execute: A function performing the processing at each iteration.
:type execute: function
:param config: A config dictionary.
:type config: dict(str: *)
:param time_interval: The time interval to wait between iterations.
:type time_interval: int
:param iterations: The number of iterations to perform, -1 for infinite.
:type iterations: int
:return: The final state.
:rtype: dict(str: *)
"""
state = init_state(config)
if iterations == -1:
while True:
state = execute(config, state)
time.sleep(time_interval)
else:
for _ in xrange(iterations):
state = execute(config, state)
time.sleep(time_interval)
return state
from terracotta.contracts_primitive import *
from terracotta.contracts_extra import *
@contract
@ -133,7 +91,7 @@ def physical_cpu_mhz_total(vir_connection):
:rtype: int
"""
return physical_cpu_count(vir_connection) * \
physical_cpu_mhz(vir_connection)
physical_cpu_mhz(vir_connection)
@contract
@ -154,57 +112,6 @@ def frange(start, end, step):
start += step
@contract
def init_logging(log_directory, log_file, log_level):
""" Initialize the logging system.
:param log_directory: The directory to store log files.
:type log_directory: str
:param log_file: The file name to store log messages.
:type log_file: str
:param log_level: The level of emitted log messages.
:type log_level: int
:return: Whether the logging system has been initialized.
:rtype: bool
"""
if log_level == 0:
logging.disable(logging.CRITICAL)
return True
if not os.access(log_file, os.F_OK):
if not os.access(log_directory, os.F_OK):
os.makedirs(log_directory)
elif not os.access(log_directory, os.W_OK):
raise IOError(
'Cannot write to the log directory: ' + log_directory)
elif not os.access(log_file, os.W_OK):
raise IOError('Cannot write to the log file: ' + log_file)
if log_level == 3:
level = logging.DEBUG
elif log_level == 2:
level = logging.INFO
else:
level = logging.WARNING
logger = logging.root
logger.handlers = []
logger.filters = []
logger.setLevel(level)
handler = logging.FileHandler(
os.path.join(log_directory, log_file))
handler.setFormatter(
logging.Formatter(
'%(asctime)s %(levelname)-8s %(name)s %(message)s'))
logger.addHandler(handler)
return True
@contract
def call_function_by_name(name, args):
""" Call a function specified by a fully qualified name.

View File

@ -1,4 +1,5 @@
# Copyright 2012 Anton Beloglazov
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -21,119 +22,6 @@ from oslo_log import log as logging
from terracotta import version
from contracts import contract
import os
import ConfigParser
log = logging.getLogger(__name__)
# This is the default config, which should not be modified
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__),
'..',
'neat.conf')
# This is the custom config, which may override the defaults
CONFIG_PATH = "/etc/neat/neat.conf"
# The following value is used for testing purposes
#CONFIG_PATH = os.path.join(os.path.dirname(__file__),
# '..',
# 'neat.conf')
# These fields must present in the configuration file
REQUIRED_FIELDS = [
'log_directory',
'log_level',
'vm_instance_directory',
'sql_connection',
'os_admin_tenant_name',
'os_admin_user',
'os_admin_password',
'os_auth_url',
'compute_hosts',
'global_manager_host',
'global_manager_port',
'db_cleaner_interval',
'local_data_directory',
'local_manager_interval',
'data_collector_interval',
'data_collector_data_length',
'host_cpu_overload_threshold',
'host_cpu_usable_by_vms',
'compute_user',
'compute_password',
'sleep_command',
'ether_wake_interface',
'block_migration',
'network_migration_bandwidth',
'algorithm_underload_detection_factory',
'algorithm_underload_detection_parameters',
'algorithm_overload_detection_factory',
'algorithm_overload_detection_parameters',
'algorithm_vm_selection_factory',
'algorithm_vm_selection_parameters',
'algorithm_vm_placement_factory',
'algorithm_vm_placement_parameters',
]
@contract
def read_config(paths):
""" Read the configuration files and return the options.
:param paths: A list of required configuration file paths.
:type paths: list(str)
:return: A dictionary of the configuration options.
:rtype: dict(str: str)
"""
configParser = ConfigParser.ConfigParser()
for path in paths:
configParser.read(path)
return dict(configParser.items("DEFAULT"))
@contract
def validate_config(config, required_fields):
""" Check that the config contains all the required fields.
:param config: A config dictionary to check.
:type config: dict(str: str)
:param required_fields: A list of required fields.
:type required_fields: list(str)
:return: Whether the config is valid.
:rtype: bool
"""
for field in required_fields:
if not field in config:
return False
return True
@contract
def read_and_validate_config(paths, required_fields):
""" Read the configuration files, validate and return the options.
:param paths: A list of required configuration file paths.
:type paths: list(str)
:param required_fields: A list of required fields.
:type required_fields: list(str)
:return: A dictionary of the configuration options.
:rtype: dict(str: str)
"""
config = read_config(paths)
if not validate_config(config, required_fields):
message = 'The config dictionary does not contain ' + \
'all the required fields'
log.critical(message)
raise KeyError(message)
return config
launch_opt = cfg.ListOpt(
'server',
@ -171,7 +59,7 @@ use_debugger = cfg.BoolOpt(
'Use at your own risk.'
)
engine_opts = [
global_manager_opts = [
cfg.StrOpt('engine', default='default',
help='Mistral engine plugin'),
cfg.StrOpt('host', default='0.0.0.0',
@ -184,7 +72,7 @@ engine_opts = [
help='The version of the engine.')
]
executor_opts = [
local_manager_opts = [
cfg.StrOpt('host', default='0.0.0.0',
help='Name of the executor node. This can be an opaque '
'identifier. It is not necessarily a hostname, '
@ -195,60 +83,28 @@ executor_opts = [
help='The version of the executor.')
]
wf_trace_log_name_opt = cfg.StrOpt(
'workflow_trace_log_name',
default='workflow_trace',
help='Logger name for pretty '
'workflow trace output.'
)
collector_opts = [
cfg.StrOpt('host', default='0.0.0.0',
help='Name of the executor node. This can be an opaque '
'identifier. It is not necessarily a hostname, '
'FQDN, or IP address.'),
cfg.StrOpt('topic', default='executor',
help='The message topic that the executor listens on.'),
cfg.StrOpt('version', default='1.0',
help='The version of the executor.')
]
CONF = cfg.CONF
CONF.register_opts(api_opts, group='api')
CONF.register_opts(engine_opts, group='engine')
CONF.register_opts(pecan_opts, group='pecan')
CONF.register_opts(executor_opts, group='executor')
CONF.register_opt(wf_trace_log_name_opt)
CONF.register_opts(api_opts, group='api')
CONF.register_opts(global_manager_opts, group='global_manager')
CONF.register_opts(local_manager_opts, group='local_manager')
CONF.register_opts(collector_opts, group='collector')
CONF.register_cli_opt(use_debugger)
CONF.register_cli_opt(launch_opt)
CONF.import_opt('verbose', 'mistral.openstack.common.log')
CONF.set_default('verbose', True)
CONF.import_opt('debug', 'mistral.openstack.common.log')
CONF.import_opt('log_dir', 'mistral.openstack.common.log')
CONF.import_opt('log_file', 'mistral.openstack.common.log')
CONF.import_opt('log_config_append', 'mistral.openstack.common.log')
CONF.import_opt('log_format', 'mistral.openstack.common.log')
CONF.import_opt('log_date_format', 'mistral.openstack.common.log')
CONF.import_opt('use_syslog', 'mistral.openstack.common.log')
CONF.import_opt('syslog_log_facility', 'mistral.openstack.common.log')
# Extend oslo default_log_levels to include some that are useful for mistral
# some are in oslo logging already, this is just making sure it stays this
# way.
default_log_levels = cfg.CONF.default_log_levels
logs_to_quieten = [
'sqlalchemy=WARN',
'oslo.messaging=INFO',
'iso8601=WARN',
'eventlet.wsgi.server=WARN',
'stevedore=INFO',
'mistral.openstack.common.loopingcall=INFO',
'mistral.openstack.common.periodic_task=INFO',
'mistral.services.periodic=INFO'
]
for chatty in logs_to_quieten:
if chatty not in default_log_levels:
default_log_levels.append(chatty)
cfg.set_defaults(
log.log_opts,
default_log_levels=default_log_levels
)
def parse_args(args=None, usage=None, default_config_files=None):
CONF(

View File

@ -1,10 +1,11 @@
# Copyright 2012 Anton Beloglazov
# Copyright 2015 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -12,29 +13,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from contracts import contract
from neat.contracts_primitive import *
import datetime
from sqlalchemy import *
from sqlalchemy.engine.base import Connection
import logging
log = logging.getLogger(__name__)
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class Database(object):
""" A class representing the database, where fields are tables.
"""
@contract(connection=Connection,
hosts=Table,
host_resource_usage=Table,
vms=Table,
vm_resource_usage=Table,
vm_migrations=Table,
host_states=Table,
host_overload=Table)
def __init__(self, connection, hosts, host_resource_usage, vms,
vm_resource_usage, vm_migrations, host_states, host_overload):
""" Initialize the database.
@ -56,36 +48,28 @@ class Database(object):
self.vm_migrations = vm_migrations
self.host_states = host_states
self.host_overload = host_overload
log.debug('Instantiated a Database object')
LOG.debug('Instantiated a Database object')
@contract
def select_cpu_mhz_for_vm(self, uuid, n):
""" Select n last values of CPU MHz for a VM UUID.
:param uuid: The UUID of a VM.
:type uuid: str[36]
:param n: The number of last values to select.
:type n: int,>0
:return: The list of n last CPU Mhz values.
:rtype: list(int)
"""
sel = select([self.vm_resource_usage.c.cpu_mhz]). \
where(and_(
self.vms.c.id == self.vm_resource_usage.c.vm_id,
self.vms.c.uuid == uuid)). \
self.vms.c.id == self.vm_resource_usage.c.vm_id,
self.vms.c.uuid == uuid)). \
order_by(self.vm_resource_usage.c.id.desc()). \
limit(n)
res = self.connection.execute(sel).fetchall()
return list(reversed([int(x[0]) for x in res]))
@contract
def select_last_cpu_mhz_for_vms(self):
""" Select the last value of CPU MHz for all the VMs.
:return: A dict of VM UUIDs to the last CPU MHz values.
:rtype: dict(str: int)
"""
vru1 = self.vm_resource_usage
vru2 = self.vm_resource_usage.alias()
@ -105,31 +89,25 @@ class Database(object):
vms_last_mhz[str(uuid)] = 0
return vms_last_mhz
@contract
def select_vm_id(self, uuid):
""" Select the ID of a VM by the VM UUID, or insert a new record.
:param uuid: The UUID of a VM.
:type uuid: str[36]
:return: The ID of the VM.
:rtype: int
"""
sel = select([self.vms.c.id]).where(self.vms.c.uuid == uuid)
row = self.connection.execute(sel).fetchone()
if row is None:
id = self.vms.insert().execute(uuid=uuid).inserted_primary_key[0]
log.info('Created a new DB record for a VM %s, id=%d', uuid, id)
LOG.info('Created a new DB record for a VM %s, id=%d', uuid, id)
return int(id)
else:
return int(row['id'])
@contract
def insert_vm_cpu_mhz(self, data):
""" Insert a set of CPU MHz values for a set of VMs.
:param data: A dictionary of VM UUIDs and CPU MHz values.
:type data: dict(str : int)
"""
if data:
query = []
@ -139,24 +117,14 @@ class Database(object):
'cpu_mhz': cpu_mhz})
self.vm_resource_usage.insert().execute(query)
@contract
def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
""" Insert new or update the corresponding host record.
:param hostname: A host name.
:type hostname: str
:param cpu_mhz: The total CPU frequency of the host in MHz.
:type cpu_mhz: int,>0
:param cpu_cores: The number of physical CPU cores.
:type cpu_cores: int,>0
:param ram: The total amount of RAM of the host in MB.
:type ram: long,>0
:return: The ID of the host.
:rtype: int
"""
sel = select([self.hosts.c.id]). \
where(self.hosts.c.hostname == hostname)
@ -167,7 +135,7 @@ class Database(object):
cpu_mhz=cpu_mhz,
cpu_cores=cpu_cores,
ram=ram).inserted_primary_key[0]
log.info('Created a new DB record for a host %s, id=%d',
LOG.info('Created a new DB record for a host %s, id=%d',
hostname, id)
return int(id)
else:
@ -178,48 +146,36 @@ class Database(object):
ram=ram))
return int(row['id'])
@contract
def insert_host_cpu_mhz(self, hostname, cpu_mhz):
""" Insert a CPU MHz value for a host.
:param hostname: A host name.
:type hostname: str
:param cpu_mhz: The CPU usage of the host in MHz.
:type cpu_mhz: int
"""
self.host_resource_usage.insert().execute(
host_id=self.select_host_id(hostname),
cpu_mhz=cpu_mhz)
@contract
def select_cpu_mhz_for_host(self, hostname, n):
""" Select n last values of CPU MHz for a host.
:param hostname: A host name.
:type hostname: str
:param n: The number of last values to select.
:type n: int,>0
:return: The list of n last CPU Mhz values.
:rtype: list(int)
"""
sel = select([self.host_resource_usage.c.cpu_mhz]). \
where(and_(
self.hosts.c.id == self.host_resource_usage.c.host_id,
self.hosts.c.hostname == hostname)). \
self.hosts.c.id == self.host_resource_usage.c.host_id,
self.hosts.c.hostname == hostname)). \
order_by(self.host_resource_usage.c.id.desc()). \
limit(n)
res = self.connection.execute(sel).fetchall()
return list(reversed([int(x[0]) for x in res]))
@contract
def select_last_cpu_mhz_for_hosts(self):
""" Select the last value of CPU MHz for all the hosts.
:return: A dict of host names to the last CPU MHz values.
:rtype: dict(str: int)
"""
hru1 = self.host_resource_usage
hru2 = self.host_resource_usage.alias()
@ -227,7 +183,7 @@ class Database(object):
hru1.outerjoin(hru2, and_(
hru1.c.host_id == hru2.c.host_id,
hru1.c.id < hru2.c.id))]). \
where(hru2.c.id == None)
where(hru2.c.id == None)
hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
sel = select([self.hosts.c.id, self.hosts.c.hostname])
@ -241,12 +197,10 @@ class Database(object):
hosts_last_mhz[str(hostname)] = 0
return hosts_last_mhz
@contract
def select_host_characteristics(self):
""" Select the characteristics of all the hosts.
:return: Three dicts of hostnames to CPU MHz, cores, and RAM.
:rtype: tuple(dict(str: int), dict(str: int), dict(str: int))
"""
hosts_cpu_mhz = {}
hosts_cpu_cores = {}
@ -258,15 +212,11 @@ class Database(object):
hosts_ram[hostname] = int(x[4])
return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram
@contract
def select_host_id(self, hostname):
""" Select the ID of a host.
:param hostname: A host name.
:type hostname: str
:return: The ID of the host.
:rtype: int
"""
sel = select([self.hosts.c.id]). \
where(self.hosts.c.hostname == hostname)
@ -275,44 +225,36 @@ class Database(object):
raise LookupError('No host found for hostname: %s', hostname)
return int(row['id'])
@contract
def select_host_ids(self):
""" Select the IDs of all the hosts.
:return: A dict of host names to IDs.
:rtype: dict(str: int)
"""
return dict((str(x[1]), int(x[0]))
for x in self.hosts.select().execute().fetchall())
@contract(datetime_threshold=datetime.datetime)
def cleanup_vm_resource_usage(self, datetime_threshold):
""" Delete VM resource usage data older than the threshold.
:param datetime_threshold: A datetime threshold.
:type datetime_threshold: datetime.datetime
"""
self.connection.execute(
self.vm_resource_usage.delete().where(
self.vm_resource_usage.c.timestamp < datetime_threshold))
@contract(datetime_threshold=datetime.datetime)
def cleanup_host_resource_usage(self, datetime_threshold):
""" Delete host resource usage data older than the threshold.
:param datetime_threshold: A datetime threshold.
:type datetime_threshold: datetime.datetime
"""
self.connection.execute(
self.host_resource_usage.delete().where(
self.host_resource_usage.c.timestamp < datetime_threshold))
@contract
def insert_host_states(self, hosts):
""" Insert host states for a set of hosts.
:param hosts: A dict of hostnames to states (0, 1).
:type hosts: dict(str: int)
"""
host_ids = self.select_host_ids()
to_insert = [{'host_id': host_ids[k],
@ -321,12 +263,10 @@ class Database(object):
self.connection.execute(
self.host_states.insert(), to_insert)
@contract
def select_host_states(self):
""" Select the current states of all the hosts.
:return: A dict of host names to states.
:rtype: dict(str: int)
"""
hs1 = self.host_states
hs2 = self.host_states.alias()
@ -334,7 +274,7 @@ class Database(object):
hs1.outerjoin(hs2, and_(
hs1.c.host_id == hs2.c.host_id,
hs1.c.id < hs2.c.id))]). \
where(hs2.c.id == None)
where(hs2.c.id == None)
data = dict(self.connection.execute(sel).fetchall())
host_ids = self.select_host_ids()
host_states = {}
@ -345,51 +285,39 @@ class Database(object):
host_states[str(host)] = 1
return host_states
@contract
def select_active_hosts(self):
""" Select the currently active hosts.
:return: A list of host names.
:rtype: list(str)
"""
return [host
for host, state in self.select_host_states().items()
if state == 1]
@contract
def select_inactive_hosts(self):
""" Select the currently inactive hosts.
:return: A list of host names.
:rtype: list(str)
"""
return [host
for host, state in self.select_host_states().items()
if state == 0]
@contract
def insert_host_overload(self, hostname, overload):
""" Insert whether a host is overloaded.
:param hostname: A host name.
:type hostname: str
:param overload: Whether the host is overloaded.
:type overload: bool
"""
self.host_overload.insert().execute(
host_id=self.select_host_id(hostname),
overload=int(overload))
@contract
def insert_vm_migration(self, vm, hostname):
""" Insert a VM migration.
:param hostname: A VM UUID.
:type hostname: str[36]
:param hostname: A host name.
:type hostname: str
"""
self.vm_migrations.insert().execute(
vm_id=self.select_vm_id(vm),

0
terracotta/exceptions.py Normal file
View File

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -113,215 +113,177 @@ from neat.config import *
from neat.db_utils import *
import logging
from terracotta.openstack.common import service
log = logging.getLogger(__name__)
@contract
def start():
""" Start the local manager loop.
class LocalManager(service.Service):
:return: The final state.
:rtype: dict(str: *)
"""
config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH],
REQUIRED_FIELDS)
def __init__(self):
super(Service, self).__init__()
self.state = self.init_state()
common.init_logging(
config['log_directory'],
'local-manager.log',
int(config['log_level']))
self.tg.add_dynamic_timer(
self.execute,
initial_delay=initial_delay,
periodic_interval_max=self.periodic_interval_max,
self.state
)
interval = config['local_manager_interval']
if log.isEnabledFor(logging.INFO):
log.info('Starting the local manager, ' +
'iterations every %s seconds', interval)
return common.start(
init_state,
execute,
config,
int(interval))
def init_state(self):
""" Initialize a dict for storing the state of the local manager.
:param config: A config dictionary.
:type config: dict(str: *)
@contract
def init_state(config):
""" Initialize a dict for storing the state of the local manager.
:return: A dictionary containing the initial state of the local manager.
:rtype: dict
"""
vir_connection = libvirt.openReadOnly(None)
if vir_connection is None:
message = 'Failed to open a connection to the hypervisor'
log.critical(message)
raise OSError(message)
:param config: A config dictionary.
:type config: dict(str: *)
physical_cpu_mhz_total = int(
common.physical_cpu_mhz_total(vir_connection) *
float(config['host_cpu_usable_by_vms']))
return {'previous_time': 0.,
'vir_connection': vir_connection,
'db': init_db(config['sql_connection']),
'physical_cpu_mhz_total': physical_cpu_mhz_total,
'hostname': vir_connection.getHostname(),
'hashed_username': sha1(config['os_admin_user']).hexdigest(),
'hashed_password': sha1(config['os_admin_password']).hexdigest()}
:return: A dictionary containing the initial state of the local manager.
:rtype: dict
"""
vir_connection = libvirt.openReadOnly(None)
if vir_connection is None:
message = 'Failed to open a connection to the hypervisor'
log.critical(message)
raise OSError(message)
def execute(self, state):
""" Execute an iteration of the local manager.
physical_cpu_mhz_total = int(
common.physical_cpu_mhz_total(vir_connection) *
float(config['host_cpu_usable_by_vms']))
return {'previous_time': 0.,
'vir_connection': vir_connection,
'db': init_db(config['sql_connection']),
'physical_cpu_mhz_total': physical_cpu_mhz_total,
'hostname': vir_connection.getHostname(),
'hashed_username': sha1(config['os_admin_user']).hexdigest(),
'hashed_password': sha1(config['os_admin_password']).hexdigest()}
1. Read the data on resource usage by the VMs running on the host from
the <local_data_directory>/vm directory.
2. Call the function specified in the algorithm_underload_detection
configuration option and pass the data on the resource usage by the
VMs, as well as the frequency of the CPU as arguments.
@contract
def execute(config, state):
""" Execute an iteration of the local manager.
3. If the host is underloaded, send a request to the REST API of the
global manager and pass a list of the UUIDs of all the VMs
currently running on the host in the vm_uuids parameter, as well as
the reason for migration as being 0.
1. Read the data on resource usage by the VMs running on the host from
the <local_data_directory>/vm directory.
4. If the host is not underloaded, call the function specified in the
algorithm_overload_detection configuration option and pass the data
on the resource usage by the VMs, as well as the frequency of the
host's CPU as arguments.
2. Call the function specified in the algorithm_underload_detection
configuration option and pass the data on the resource usage by the
VMs, as well as the frequency of the CPU as arguments.
5. If the host is overloaded, call the function specified in the
algorithm_vm_selection configuration option and pass the data on
the resource usage by the VMs, as well as the frequency of the
host's CPU as arguments
3. If the host is underloaded, send a request to the REST API of the
global manager and pass a list of the UUIDs of all the VMs
currently running on the host in the vm_uuids parameter, as well as
the reason for migration as being 0.
6. If the host is overloaded, send a request to the REST API of the
global manager and pass a list of the UUIDs of the VMs selected by
the VM selection algorithm in the vm_uuids parameter, as well as
the reason for migration as being 1.
4. If the host is not underloaded, call the function specified in the
algorithm_overload_detection configuration option and pass the data
on the resource usage by the VMs, as well as the frequency of the
host's CPU as arguments.
:param config: A config dictionary.
:type config: dict(str: *)
5. If the host is overloaded, call the function specified in the
algorithm_vm_selection configuration option and pass the data on
the resource usage by the VMs, as well as the frequency of the
host's CPU as arguments
:param state: A state dictionary.
:type state: dict(str: *)
6. If the host is overloaded, send a request to the REST API of the
global manager and pass a list of the UUIDs of the VMs selected by
the VM selection algorithm in the vm_uuids parameter, as well as
the reason for migration as being 1.
:return: The updated state dictionary.
:rtype: dict(str: *)
"""
log.info('Started an iteration')
vm_path = common.build_local_vm_path(config['local_data_directory'])
vm_cpu_mhz = get_local_vm_data(vm_path)
vm_ram = get_ram(state['vir_connection'], vm_cpu_mhz.keys())
vm_cpu_mhz = cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())
:param config: A config dictionary.
:type config: dict(str: *)
if not vm_cpu_mhz:
if log.isEnabledFor(logging.INFO):
log.info('The host is idle')
log.info('Skipped an iteration')
return state
:param state: A state dictionary.
:type state: dict(str: *)
host_path = common.build_local_host_path(config['local_data_directory'])
host_cpu_mhz = get_local_host_data(host_path)
:return: The updated state dictionary.
:rtype: dict(str: *)
"""
log.info('Started an iteration')
vm_path = common.build_local_vm_path(config['local_data_directory'])
vm_cpu_mhz = get_local_vm_data(vm_path)
vm_ram = get_ram(state['vir_connection'], vm_cpu_mhz.keys())
vm_cpu_mhz = cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())
host_cpu_utilization = vm_mhz_to_percentage(
vm_cpu_mhz.values(),
host_cpu_mhz,
state['physical_cpu_mhz_total'])
if log.isEnabledFor(logging.DEBUG):
log.debug('The total physical CPU Mhz: %s', str(state['physical_cpu_mhz_total']))
log.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
log.debug('Host CPU MHz: %s', str(host_cpu_mhz))
log.debug('CPU utilization: %s', str(host_cpu_utilization))
if not host_cpu_utilization:
log.info('Not enough data yet - skipping to the next iteration')
log.info('Skipped an iteration')
return state
time_step = int(config['data_collector_interval'])
migration_time = common.calculate_migration_time(
vm_ram, float(config['network_migration_bandwidth']))
if 'underload_detection' not in state:
underload_detection_params = common.parse_parameters(
config['algorithm_underload_detection_parameters'])
underload_detection = common.call_function_by_name(
config['algorithm_underload_detection_factory'],
[time_step,
migration_time,
underload_detection_params])
state['underload_detection'] = underload_detection
state['underload_detection_state'] = {}
overload_detection_params = common.parse_parameters(
config['algorithm_overload_detection_parameters'])
overload_detection = common.call_function_by_name(
config['algorithm_overload_detection_factory'],
[time_step,
migration_time,
overload_detection_params])
state['overload_detection'] = overload_detection
state['overload_detection_state'] = {}
vm_selection_params = common.parse_parameters(
config['algorithm_vm_selection_parameters'])
vm_selection = common.call_function_by_name(
config['algorithm_vm_selection_factory'],
[time_step,
migration_time,
vm_selection_params])
state['vm_selection'] = vm_selection
state['vm_selection_state'] = {}
else:
underload_detection = state['underload_detection']
overload_detection = state['overload_detection']
vm_selection = state['vm_selection']
if not vm_cpu_mhz:
if log.isEnabledFor(logging.INFO):
log.info('The host is idle')
log.info('Skipped an iteration')
return state
host_path = common.build_local_host_path(config['local_data_directory'])
host_cpu_mhz = get_local_host_data(host_path)
host_cpu_utilization = vm_mhz_to_percentage(
vm_cpu_mhz.values(),
host_cpu_mhz,
state['physical_cpu_mhz_total'])
if log.isEnabledFor(logging.DEBUG):
log.debug('The total physical CPU Mhz: %s', str(state['physical_cpu_mhz_total']))
log.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
log.debug('Host CPU MHz: %s', str(host_cpu_mhz))
log.debug('CPU utilization: %s', str(host_cpu_utilization))
if not host_cpu_utilization:
log.info('Not enough data yet - skipping to the next iteration')
log.info('Skipped an iteration')
return state
time_step = int(config['data_collector_interval'])
migration_time = common.calculate_migration_time(
vm_ram, float(config['network_migration_bandwidth']))
if 'underload_detection' not in state:
underload_detection_params = common.parse_parameters(
config['algorithm_underload_detection_parameters'])
underload_detection = common.call_function_by_name(
config['algorithm_underload_detection_factory'],
[time_step,
migration_time,
underload_detection_params])
state['underload_detection'] = underload_detection
state['underload_detection_state'] = {}
overload_detection_params = common.parse_parameters(
config['algorithm_overload_detection_parameters'])
overload_detection = common.call_function_by_name(
config['algorithm_overload_detection_factory'],
[time_step,
migration_time,
overload_detection_params])
state['overload_detection'] = overload_detection
state['overload_detection_state'] = {}
vm_selection_params = common.parse_parameters(
config['algorithm_vm_selection_parameters'])
vm_selection = common.call_function_by_name(
config['algorithm_vm_selection_factory'],
[time_step,
migration_time,
vm_selection_params])
state['vm_selection'] = vm_selection
state['vm_selection_state'] = {}
else:
underload_detection = state['underload_detection']
overload_detection = state['overload_detection']
vm_selection = state['vm_selection']
if log.isEnabledFor(logging.INFO):
log.info('Started underload detection')
underload, state['underload_detection_state'] = underload_detection(
host_cpu_utilization, state['underload_detection_state'])
if log.isEnabledFor(logging.INFO):
log.info('Completed underload detection')
if log.isEnabledFor(logging.INFO):
log.info('Started overload detection')
overload, state['overload_detection_state'] = overload_detection(
host_cpu_utilization, state['overload_detection_state'])
if log.isEnabledFor(logging.INFO):
log.info('Completed overload detection')
if underload:
log.info('Started underload detection')
underload, state['underload_detection_state'] = underload_detection(
host_cpu_utilization, state['underload_detection_state'])
if log.isEnabledFor(logging.INFO):
log.info('Underload detected')
try:
r = requests.put('http://' + config['global_manager_host'] +
':' + config['global_manager_port'],
{'username': state['hashed_username'],
'password': state['hashed_password'],
'time': time.time(),
'host': state['hostname'],
'reason': 0})
if log.isEnabledFor(logging.INFO):
log.info('Received response: [%s] %s',
r.status_code, r.content)
except requests.exceptions.ConnectionError:
log.exception('Exception at underload request:')
log.info('Completed underload detection')
else:
if overload:
if log.isEnabledFor(logging.INFO):
log.info('Overload detected')
log.info('Started VM selection')
vm_uuids, state['vm_selection_state'] = vm_selection(
vm_cpu_mhz, vm_ram, state['vm_selection_state'])
log.info('Completed VM selection')
if log.isEnabledFor(logging.INFO):
log.info('Started overload detection')
overload, state['overload_detection_state'] = overload_detection(
host_cpu_utilization, state['overload_detection_state'])
if log.isEnabledFor(logging.INFO):
log.info('Completed overload detection')
if underload:
if log.isEnabledFor(logging.INFO):
log.info('Selected VMs to migrate: %s', str(vm_uuids))
log.info('Underload detected')
try:
r = requests.put('http://' + config['global_manager_host'] +
':' + config['global_manager_port'],
@ -329,137 +291,163 @@ def execute(config, state):
'password': state['hashed_password'],
'time': time.time(),
'host': state['hostname'],
'reason': 1,
'vm_uuids': ','.join(vm_uuids)})
'reason': 0})
if log.isEnabledFor(logging.INFO):
log.info('Received response: [%s] %s',
r.status_code, r.content)
except requests.exceptions.ConnectionError:
log.exception('Exception at overload request:')
log.exception('Exception at underload request:')
else:
if log.isEnabledFor(logging.INFO):
log.info('No underload or overload detected')
if overload:
if log.isEnabledFor(logging.INFO):
log.info('Overload detected')
if log.isEnabledFor(logging.INFO):
log.info('Completed an iteration')
log.info('Started VM selection')
vm_uuids, state['vm_selection_state'] = vm_selection(
vm_cpu_mhz, vm_ram, state['vm_selection_state'])
log.info('Completed VM selection')
return state
if log.isEnabledFor(logging.INFO):
log.info('Selected VMs to migrate: %s', str(vm_uuids))
try:
r = requests.put('http://' + config['global_manager_host'] +
':' + config['global_manager_port'],
{'username': state['hashed_username'],
'password': state['hashed_password'],
'time': time.time(),
'host': state['hostname'],
'reason': 1,
'vm_uuids': ','.join(vm_uuids)})
if log.isEnabledFor(logging.INFO):
log.info('Received response: [%s] %s',
r.status_code, r.content)
except requests.exceptions.ConnectionError:
log.exception('Exception at overload request:')
else:
if log.isEnabledFor(logging.INFO):
log.info('No underload or overload detected')
if log.isEnabledFor(logging.INFO):
log.info('Completed an iteration')
return state
@contract
def get_local_vm_data(path):
""" Read the data about VMs from the local storage.
@contract
def get_local_vm_data(self, path):
""" Read the data about VMs from the local storage.
:param path: A path to read VM UUIDs from.
:type path: str
:param path: A path to read VM UUIDs from.
:type path: str
:return: A map of VM UUIDs onto the corresponing CPU MHz values.
:rtype: dict(str : list(int))
"""
result = {}
for uuid in os.listdir(path):
with open(os.path.join(path, uuid), 'r') as f:
result[uuid] = [int(x) for x in f.read().strip().splitlines()]
return result
:return: A map of VM UUIDs onto the corresponing CPU MHz values.
:rtype: dict(str : list(int))
"""
result = {}
for uuid in os.listdir(path):
with open(os.path.join(path, uuid), 'r') as f:
result[uuid] = [int(x) for x in f.read().strip().splitlines()]
return result
@contract
def get_local_host_data(path):
""" Read the data about the host from the local storage.
@contract
def get_local_host_data(self, path):
""" Read the data about the host from the local storage.
:param path: A path to read the host data from.
:type path: str
:param path: A path to read the host data from.
:type path: str
:return: A history of the host CPU usage in MHz.
:rtype: list(int)
"""
if not os.access(path, os.F_OK):
return []
with open(path, 'r') as f:
result = [int(x) for x in f.read().strip().splitlines()]
return result
:return: A history of the host CPU usage in MHz.
:rtype: list(int)
"""
if not os.access(path, os.F_OK):
return []
with open(path, 'r') as f:
result = [int(x) for x in f.read().strip().splitlines()]
return result
@contract
def cleanup_vm_data(vm_data, uuids):
""" Remove records for the VMs that are not in the list of UUIDs.
@contract
def cleanup_vm_data(self, vm_data, uuids):
""" Remove records for the VMs that are not in the list of UUIDs.
:param vm_data: A map of VM UUIDs to some data.
:type vm_data: dict(str: *)
:param vm_data: A map of VM UUIDs to some data.
:type vm_data: dict(str: *)
:param uuids: A list of VM UUIDs.
:type uuids: list(str)
:param uuids: A list of VM UUIDs.
:type uuids: list(str)
:return: The cleaned up map of VM UUIDs to data.
:rtype: dict(str: *)
"""
for uuid, _ in vm_data.items():
if uuid not in uuids:
del vm_data[uuid]
return vm_data
:return: The cleaned up map of VM UUIDs to data.
:rtype: dict(str: *)
"""
for uuid, _ in vm_data.items():
if uuid not in uuids:
del vm_data[uuid]
return vm_data
@contract
def get_ram(vir_connection, vms):
""" Get the maximum RAM for a set of VM UUIDs.
@contract
def get_ram(self, vir_connection, vms):
""" Get the maximum RAM for a set of VM UUIDs.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param vms: A list of VM UUIDs.
:type vms: list(str)
:param vms: A list of VM UUIDs.
:type vms: list(str)
:return: The maximum RAM for the VM UUIDs.
:rtype: dict(str : long)
"""
vms_ram = {}
for uuid in vms:
ram = get_max_ram(vir_connection, uuid)
if ram:
vms_ram[uuid] = ram
:return: The maximum RAM for the VM UUIDs.
:rtype: dict(str : long)
"""
vms_ram = {}
for uuid in vms:
ram = get_max_ram(vir_connection, uuid)
if ram:
vms_ram[uuid] = ram
return vms_ram
return vms_ram
@contract
def get_max_ram(vir_connection, uuid):
""" Get the max RAM allocated to a VM UUID using libvirt.
@contract
def get_max_ram(self, vir_connection, uuid):
""" Get the max RAM allocated to a VM UUID using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param uuid: The UUID of a VM.
:type uuid: str[36]
:param uuid: The UUID of a VM.
:type uuid: str[36]
:return: The maximum RAM of the VM in MB.
:rtype: long|None
"""
try:
domain = vir_connection.lookupByUUIDString(uuid)
return domain.maxMemory() / 1024
except libvirt.libvirtError:
return None
:return: The maximum RAM of the VM in MB.
:rtype: long|None
"""
try:
domain = vir_connection.lookupByUUIDString(uuid)
return domain.maxMemory() / 1024
except libvirt.libvirtError:
return None
@contract
def vm_mhz_to_percentage(vm_mhz_history, host_mhz_history, physical_cpu_mhz):
""" Convert VM CPU utilization to the host's CPU utilization.
@contract
def vm_mhz_to_percentage(self, vm_mhz_history, host_mhz_history, physical_cpu_mhz):
""" Convert VM CPU utilization to the host's CPU utilization.
:param vm_mhz_history: A list of CPU utilization histories of VMs in MHz.
:type vm_mhz_history: list(list(int))
:param vm_mhz_history: A list of CPU utilization histories of VMs in MHz.
:type vm_mhz_history: list(list(int))
:param host_mhz_history: A history if the CPU usage by the host in MHz.
:type host_mhz_history: list(int)
:param host_mhz_history: A history if the CPU usage by the host in MHz.
:type host_mhz_history: list(int)
:param physical_cpu_mhz: The total frequency of the physical CPU in MHz.
:type physical_cpu_mhz: int,>0
:param physical_cpu_mhz: The total frequency of the physical CPU in MHz.
:type physical_cpu_mhz: int,>0
:return: The history of the host's CPU utilization in percentages.
:rtype: list(float)
"""
max_len = max(len(x) for x in vm_mhz_history)
if len(host_mhz_history) > max_len:
host_mhz_history = host_mhz_history[-max_len:]
mhz_history = [[0] * (max_len - len(x)) + x
for x in vm_mhz_history + [host_mhz_history]]
return [float(sum(x)) / physical_cpu_mhz for x in zip(*mhz_history)]
:return: The history of the host's CPU utilization in percentages.
:rtype: list(float)
"""
max_len = max(len(x) for x in vm_mhz_history)
if len(host_mhz_history) > max_len:
host_mhz_history = host_mhz_history[-max_len:]
mhz_history = [[0] * (max_len - len(x)) + x
for x in vm_mhz_history + [host_mhz_history]]
return [float(sum(x)) / physical_cpu_mhz for x in zip(*mhz_history)]

View File

@ -1,5 +1,4 @@
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -71,116 +70,11 @@ def get_executor_client():
return _EXECUTOR_CLIENT
class EngineServer(object):
class GlobalManagerServer(object):
"""RPC Engine server."""
def __init__(self, engine):
self._engine = engine
def start_workflow(self, rpc_ctx, workflow_name, workflow_input, params):
"""Receives calls over RPC to start workflows on engine.
:param rpc_ctx: RPC request context.
:return: Workflow execution.
"""
LOG.info(
"Received RPC request 'start_workflow'[rpc_ctx=%s,"
" workflow_name=%s, workflow_input=%s, params=%s]"
% (rpc_ctx, workflow_name, workflow_input, params)
)
return self._engine.start_workflow(
workflow_name,
workflow_input,
**params
)
def on_task_state_change(self, rpc_ctx, task_ex_id, state):
return self._engine.on_task_state_change(task_ex_id, state)
def on_action_complete(self, rpc_ctx, action_ex_id, result_data,
result_error):
"""Receives RPC calls to communicate action result to engine.
:param rpc_ctx: RPC request context.
:param action_ex_id: Action execution id.
:return: Action execution.
"""
result = wf_utils.Result(result_data, result_error)
LOG.info(
"Received RPC request 'on_action_complete'[rpc_ctx=%s,"
" action_ex_id=%s, result=%s]" % (rpc_ctx, action_ex_id, result)
)
return self._engine.on_action_complete(action_ex_id, result)
def pause_workflow(self, rpc_ctx, execution_id):
"""Receives calls over RPC to pause workflows on engine.
:param rpc_ctx: Request context.
:return: Workflow execution.
"""
LOG.info(
"Received RPC request 'pause_workflow'[rpc_ctx=%s,"
" execution_id=%s]" % (rpc_ctx, execution_id)
)
return self._engine.pause_workflow(execution_id)
def resume_workflow(self, rpc_ctx, execution_id):
"""Receives calls over RPC to resume workflows on engine.
:param rpc_ctx: RPC request context.
:return: Workflow execution.
"""
LOG.info(
"Received RPC request 'resume_workflow'[rpc_ctx=%s,"
" execution_id=%s]" % (rpc_ctx, execution_id)
)
return self._engine.resume_workflow(execution_id)
def stop_workflow(self, rpc_ctx, execution_id, state, message=None):
"""Receives calls over RPC to stop workflows on engine.
Sets execution state to SUCCESS or ERROR. No more tasks will be
scheduled. Running tasks won't be killed, but their results
will be ignored.
:param rpc_ctx: RPC request context.
:param execution_id: Workflow execution id.
:param state: State assigned to the workflow. Permitted states are
SUCCESS or ERROR.
:param message: Optional information string.
:return: Workflow execution.
"""
LOG.info(
"Received RPC request 'stop_workflow'[rpc_ctx=%s, execution_id=%s,"
" state=%s, message=%s]" % (rpc_ctx, execution_id, state, message)
)
return self._engine.stop_workflow(execution_id, state, message)
def rollback_workflow(self, rpc_ctx, execution_id):
"""Receives calls over RPC to rollback workflows on engine.
:param rpc_ctx: RPC request context.
:return: Workflow execution.
"""
LOG.info(
"Received RPC request 'rollback_workflow'[rpc_ctx=%s,"
" execution_id=%s]" % (rpc_ctx, execution_id)
)
return self._engine.resume_workflow(execution_id)
def __init__(self, manager):
self._manager = manager
def wrap_messaging_exception(method):
@ -330,11 +224,11 @@ class EngineClient(base.Engine):
)
class ExecutorServer(object):
class LocalManagerServer(object):
"""RPC Executor server."""
def __init__(self, executor):
self._executor = executor
def __init__(self, manager):
self._executor = manager
def run_action(self, rpc_ctx, action_ex_id, action_class_str,
attributes, params):

View File

View File

@ -1,10 +1,11 @@
# Copyright 2012 Anton Beloglazov
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -12,30 +13,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from contracts import contract
from neat.contracts_primitive import *
from neat.contracts_extra import *
from sqlalchemy import *
from sqlalchemy.sql import func
from neat.db import Database
from oslo_config import cfg
from oslo_log import log as logging
import logging
log = logging.getLogger(__name__)
from terracotta import db as database
@contract
def init_db(sql_connection):
LOG = logging.getLogger(__name__)
def init_db():
""" Initialize the database.
:param sql_connection: A database connection URL.
:type sql_connection: str
:return: The initialized database.
:rtype: Database
"""
engine = create_engine(sql_connection) # 'sqlite:///:memory:'
engine = create_engine(CONF.sql_connection)
metadata = MetaData()
metadata.bind = engine
@ -49,7 +45,8 @@ def init_db(sql_connection):
host_resource_usage = \
Table('host_resource_usage', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'),
nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('cpu_mhz', Integer, nullable=False))
@ -68,27 +65,31 @@ def init_db(sql_connection):
Table('vm_migrations', metadata,
Column('id', Integer, primary_key=True),
Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'),
nullable=False),
Column('timestamp', DateTime, default=func.now()))
host_states = \
Table('host_states', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'),
nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('state', Integer, nullable=False))
host_overload = \
Table('host_overload', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'),
nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('overload', Integer, nullable=False))
metadata.create_all()
connection = engine.connect()
db = Database(connection, hosts, host_resource_usage, vms,
vm_resource_usage, vm_migrations, host_states, host_overload)
db = database.Database(connection, hosts, host_resource_usage, vms,
vm_resource_usage, vm_migrations, host_states,
host_overload)
log.debug('Initialized a DB connection to %s', sql_connection)
LOG.debug('Initialized a DB connection to %s', CONF.sql_connection)
return db

18
terracotta/version.py Normal file
View File

@ -0,0 +1,18 @@
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pbr import version
version_info = version.VersionInfo('terracotta')
version_string = version_info.version_string

16
test-requirements.txt Normal file
View File

@ -0,0 +1,16 @@
hacking>=0.9.2,<0.10
coverage>=3.6
pyflakes==0.8.1
pylint==0.25.2
sphinx>=1.1.2,!=1.2.0,<1.3
unittest2
oslotest
oslosphinx
sphinxcontrib-pecanwsme>=0.8
sphinxcontrib-httpdomain
docutils==0.9.1
fixtures>=0.3.14
nose
testrepository>=0.0.18
testtools>=0.9.34
lockfile>=0.8

46
tox.ini Normal file
View File

@ -0,0 +1,46 @@
[tox]
envlist = py27,py33,py34,pep8
minversion = 1.6
skipsdist = True
[testenv]
sitepackages = True
usedevelop = True
install_command = pip install -U --force-reinstall {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
PYTHONDONTWRITEBYTECODE = 1
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
python -m mistral.openstack.common.lockutils python setup.py test --slowest --testr-args='{posargs}'
whitelist_externals = rm
[testenv:pep8]
commands = flake8 {posargs}
[testenv:cover]
# Also do not run test_coverage_ext tests while gathering coverage as those
# tests conflict with coverage.
setenv = VIRTUAL_ENV={envdir}
commands =
python setup.py testr --coverage \
--testr-args='^(?!.*test.*coverage).*$'
[testenv:venv]
commands = {posargs}
[testenv:docs]
commands =
rm -rf doc/html doc/build
rm -rf doc/source/apidoc doc/source/api
python setup.py build_sphinx
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}
commands = bash tools/lintstack.sh
[flake8]
show-source = true
ignore = H803,H305,H405
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools,scripts

View File

@ -1,196 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contracts import contract
import datetime
from sqlalchemy import *
from sqlalchemy.engine.base import Connection
class Database(object):
""" A class representing the database, where fields are tables.
"""
@contract(connection=Connection,
hosts=Table,
host_resource_usage=Table,
vms=Table,
vm_resource_usage=Table,
vm_migrations=Table,
host_states=Table,
host_overload=Table)
def __init__(self, connection, hosts, host_resource_usage, vms,
vm_resource_usage, vm_migrations, host_states, host_overload):
""" Initialize the database.
:param connection: A database connection table.
:param hosts: The hosts table.
:param host_resource_usage: The host_resource_usage table.
:param vms: The vms table.
:param vm_resource_usage: The vm_resource_usage table.
:param vm_migrations: The vm_migrations table.
:param host_states: The host_states table.
:param host_overload: The host_overload table.
"""
self.connection = connection
self.hosts = hosts
self.host_resource_usage = host_resource_usage
self.vms = vms
self.vm_resource_usage = vm_resource_usage
self.vm_migrations = vm_migrations
self.host_states = host_states
self.host_overload = host_overload
@contract
def select_host_ids(self):
""" Select the IDs of all the hosts.
:return: A dict of host names to IDs.
:rtype: dict(str: int)
"""
return dict((str(x[1]), int(x[0]))
for x in self.hosts.select().execute().fetchall())
@contract
def select_host_states(self, host_id, start_time, end_time):
""" Select the states of a host.
:param start_time: The start time to select host states.
:type start_time: *
:param end_time: The end time to select host states.
:type end_time: *
:return: A list of timestamps and host states.
:rtype: list(tuple(*, int))
"""
hs = self.host_states
sel = select([hs.c.timestamp, hs.c.state]). \
where(and_(hs.c.host_id == host_id,
hs.c.timestamp >= start_time,
hs.c.timestamp <= end_time)). \
order_by(hs.c.id.asc())
return [(x[0], int(x[1]))
for x in self.connection.execute(sel).fetchall()]
@contract
def select_host_overload(self, host_id, start_time, end_time):
""" Select the overload of a host.
:param start_time: The start time to select host overload.
:type start_time: *
:param end_time: The end time to select host states.
:type end_time: *
:return: A list of timestamps and overloads.
:rtype: list(tuple(*, int))
"""
ho = self.host_overload
sel = select([ho.c.timestamp, ho.c.overload]). \
where(and_(ho.c.host_id == host_id,
ho.c.timestamp >= start_time,
ho.c.timestamp <= end_time)). \
order_by(ho.c.id.asc())
return [(x[0], int(x[1]))
for x in self.connection.execute(sel).fetchall()]
@contract
def select_vm_migrations(self, start_time, end_time):
""" Select VM migrations.
:param start_time: The start time to select data.
:type start_time: *
:param end_time: The end time to select data.
:type end_time: *
:return: A list of timestamps and VM IDs.
:rtype: list(tuple(*, int))
"""
vm = self.vm_migrations
sel = select([vm.c.timestamp, vm.c.vm_id]). \
where(and_(vm.c.timestamp >= start_time,
vm.c.timestamp <= end_time)). \
order_by(vm.c.id.asc())
return [(x[0], int(x[1]))
for x in self.connection.execute(sel).fetchall()]
@contract
def init_db(sql_connection):
""" Initialize the database.
:param sql_connection: A database connection URL.
:type sql_connection: str
:return: The initialized database.
:rtype: *
"""
engine = create_engine(sql_connection) # 'sqlite:///:memory:'
metadata = MetaData()
metadata.bind = engine
hosts = Table('hosts', metadata,
Column('id', Integer, primary_key=True),
Column('hostname', String(255), nullable=False),
Column('cpu_mhz', Integer, nullable=False),
Column('cpu_cores', Integer, nullable=False),
Column('ram', Integer, nullable=False))
host_resource_usage = \
Table('host_resource_usage', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('cpu_mhz', Integer, nullable=False))
vms = Table('vms', metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(36), nullable=False))
vm_resource_usage = \
Table('vm_resource_usage', metadata,
Column('id', Integer, primary_key=True),
Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('cpu_mhz', Integer, nullable=False))
vm_migrations = \
Table('vm_migrations', metadata,
Column('id', Integer, primary_key=True),
Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('timestamp', DateTime, default=func.now()))
host_states = \
Table('host_states', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('state', Integer, nullable=False))
host_overload = \
Table('host_overload', metadata,
Column('id', Integer, primary_key=True),
Column('host_id', Integer, ForeignKey('hosts.id'), nullable=False),
Column('timestamp', DateTime, default=func.now()),
Column('overload', Integer, nullable=False))
metadata.create_all()
connection = engine.connect()
db = Database(connection, hosts, host_resource_usage, vms,
vm_resource_usage, vm_migrations, host_states, host_overload)
return db