Retire stackforge/libra

This commit is contained in:
Monty Taylor 2015-10-17 16:03:27 -04:00
parent a0e8b91e16
commit c7082fa72a
232 changed files with 7 additions and 34123 deletions

19
.gitignore vendored
View File

@ -1,19 +0,0 @@
*.egg
*.egg-info
*.pyc
*.swp
.cache
.testrepository
.tox
AUTHORS
build
ChangeLog
debian/files
debian/libra
debian/libra.debhelper.log
debian/libra.postinst.debhelper
debian/libra.preinst.debhelper
debian/libra.prerm.debhelper
debian/libra.substvars
dist
doc/html

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/libra.git

View File

@ -1 +0,0 @@
<shrewsbury.dave@gmail.com> <=>

View File

@ -1,4 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} ${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,9 +0,0 @@
include README
exclude .gitignore
exclude .gitreview
global-exclude *.pyc
graft doc
graft etc

47
README
View File

@ -1,47 +0,0 @@
Description
-----------
Libra is a tool set to create and manage load balancers in an OpenStack
environment.
Tools
-----
* libra_pool_mgm
Python daemon that manages a pool of Nova instances.
* libra_worker
Python daemon that will receive messages from an API server via
a Gearman job server to create/modify load balancers on the local
machine.
* libra_api
Python daemon to act as the client API server.
* libra_admin_api
Python daemon providing an admininstrative API server primarily for
libra_pool_mgm and libra_statsd
Running Tests
-------------
Tox is the best way to run the tests. Tox, if unavailable, can be installed
via the Python pip command:
$ pip install tox
Once it is installed, run the tests:
$ tox
More Documentation
------------------
You can build the complete documentation with:
$ pip install Sphinx
$ python setup.py build_sphinx

7
README.rst Normal file
View File

@ -0,0 +1,7 @@
This project is no longer maintained.
The contents of this repository are still available in the Git source code
management system. To see the contents of this repository before it reached
its end of life, please check out the previous commit with
"git checkout HEAD^1".

View File

@ -1,88 +0,0 @@
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import json
import socket
from gearman import GearmanClient, DataEncoder, JOB_UNKNOWN
class JSONDataEncoder(DataEncoder):
@classmethod
def encode(cls, encodable_object):
s = json.dumps(encodable_object)
print("Encoding JSON object to string: %s" % s)
return s
@classmethod
def decode(cls, decodable_string):
s = json.loads(decodable_string)
print("Decoding string (%s) to JSON object" % s)
return s
class JSONGearmanClient(GearmanClient):
data_encoder = JSONDataEncoder
def check_request_status(job_request):
if job_request.complete:
print "Job %s finished! Result: %s -\n%s" % (job_request.job.unique,
job_request.state,
json.dumps(
job_request.result,
indent=2
))
elif job_request.timed_out:
print "Job %s timed out!" % job_request.unique
elif job_request.state == JOB_UNKNOWN:
print "Job %s connection failed!" % job_request.unique
def main():
hostname = socket.gethostname()
task = hostname
client = JSONGearmanClient(['localhost:4730'])
data = """
{
"hpcs_action": "update",
"loadbalancers": [
{
"name": "a-new-loadbalancer",
"protocol": "http",
"nodes": [
{
"address": "10.1.1.1",
"port": "80"
},
{
"address": "10.1.1.2",
"port": "81"
}
]
}
]
}
"""
# Worker class expects the data as a JSON object, not string
json_data = json.loads(data)
request = client.submit_job(task, json_data)
check_request_status(request)
if __name__ == "__main__":
main()

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging as std_logging
import time
from oslo.config import cfg
from libra.openstack.common import log as logging
from libra.common.api.mnb import update_mnb
from libra import __version__
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CONF.register_opts([
cfg.IntOpt('testcount',
metavar='COUNT',
default=1,
help='Number of messages to send')
])
def main():
CONF(project='mnbtest', version=__version__)
logging.setup('mnbtest')
LOG.debug('Configuration:')
print "Starting Test"
print "LOG FILE = {0}".format(CONF.log_file)
LOG.info('STARTING MNBTEST')
CONF.log_opt_values(LOG, std_logging.DEBUG)
LOG.info("Calling update_mnb with {0} messages".format(CONF.testcount))
update_mnb('lbaas.instance.test', CONF.testcount, 456)
time.sleep(30)
if __name__ == "__main__":
main()

View File

@ -1,216 +0,0 @@
#!/bin/bash
##############################################################################
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
##############################################################################
# DESCRIPTION
# This script is used to manually upgrade a worker node running a 1.0
# version of Libra to the 2.0 version. This is specifically targeted to
# Ubuntu nodes, but may work on other distributions, though that is
# untested. It makes some assumptions about the current setup.
#
# This script is designed to be safe to run multiple times, in case an
# error is encountered and it must be run again.
#
# EXIT VALUES
# 0 on success, 1 on error
##############################################################################
if [ $USER != "root" ]
then
echo "Must be run as root user."
exit 1
fi
LOG="/tmp/update_node.log"
if [ -e ${LOG} ]
then
rm -f ${LOG}
fi
#################################################
# Update sudo privs by inserting '/usr/bin/chown'
#################################################
file="/etc/sudoers"
echo "Updating SUDO file $file" | tee -a ${LOG}
# Uncomment below if you run the libra_worker process as the 'haproxy' user.
#sed -i.bak -e '/^%haproxy/ c\
#%haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /usr/bin/socat, /bin/chown' ${file}
#if [ $? -ne 0 ]
#then
# echo "1st edit of ${file} failed." | tee -a ${LOG}
# exit 1
#fi
sed -i.bak -e '/^%libra/ c\
%libra ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /usr/bin/socat, /bin/chown' ${file}
if [ $? -ne 0 ]
then
echo "2nd edit of ${file} failed." | tee -a ${LOG}
exit 1
fi
if [ -e ${file}.bak ]
then
rm ${file}.bak
fi
########################
# Make new log directory
########################
logdir="/mnt/log"
echo "Creating ${logdir}" | tee -a ${LOG}
if [ ! -e ${logdir} ]
then
mkdir ${logdir}
if [ $? -ne 0 ]
then
echo "Making log directory ${logdir} failed" | tee -a ${LOG}
exit 1
fi
fi
#######################################
# Create /etc/rsyslog.d/10-haproxy.conf
#######################################
haproxy_syslog="/etc/rsyslog.d/10-haproxy.conf"
echo "Creating ${haproxy_syslog}" | tee -a ${LOG}
cat > ${haproxy_syslog} <<'EOF'
$template Haproxy,"%TIMESTAMP% %msg%\n"
local0.* -/mnt/log/haproxy.log;Haproxy
# don't log anywhere else
local0.* ~
EOF
if [ $? -ne 0 ]
then
echo "Creating ${haproxy_syslog} failed." | tee -a ${LOG}
exit 1
fi
#################################
# Create /etc/logrotate.d/haproxy
#################################
haproxy_logrotate="/etc/logrotate.d/haproxy"
echo "Creating ${haproxy_logrotate}" | tee -a ${LOG}
cat > ${haproxy_logrotate} <<'EOF'
/mnt/log/haproxy.log {
weekly
missingok
rotate 7
compress
delaycompress
notifempty
create 640 syslog adm
sharedscripts
postrotate
/etc/init.d/haproxy reload > /dev/null
endscript
}
EOF
if [ $? -ne 0 ]
then
echo "Creating ${haproxy_logrotate} failed." | tee -a ${LOG}
exit 1
fi
##########################
# Edit current haproxy.cfg
##########################
haproxycfg="/etc/haproxy/haproxy.cfg"
echo "Updating HAProxy config file ${haproxycfg}" | tee -a ${LOG}
if [ -e ${haproxycfg} ]
then
sed -i.bak -e '/local1 notice/d' ${haproxycfg}
if [ $? -ne 0 ]
then
echo "Editing ${haproxycfg} failed." | tee -a ${LOG}
exit 1
fi
fi
if [ -e ${haproxycfg}.bak ]
then
rm -f ${haproxycfg}.bak
fi
##############
# Update Libra
##############
pkgversion="libra-2.0"
pkglocation="/tmp"
tarball="http://tarballs.openstack.org/libra/${pkgversion}.tar.gz"
echo "Downloading ${pkgversion} tarball to ${pkglocation}" | tee -a ${LOG}
cd $pkglocation
if [ $? -ne 0 ]; then echo "cd to ${pkglocation} failed" | tee -a ${LOG}; exit 1; fi
curl -Osf ${tarball}
if [ $? -ne 0 ]; then echo "Failed to download ${tarball}" | tee -a ${LOG}; exit 1; fi
echo "Updating Libra to ${pkgversion}" | tee -a ${LOG}
tar zxf ${pkgversion}.tar.gz 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "tar failed" | tee -a ${LOG}; exit 1; fi
cd ${pkgversion}
if [ $? -ne 0 ]; then echo "cd to ${pkgversion} failed" | tee -a ${LOG}; exit 1; fi
python setup.py install --install-layout=deb 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "python install failed" | tee -a ${LOG}; exit 1; fi
##################
# Restart rsyslogd
##################
echo "Restarting rsyslogd" | tee -a ${LOG}
service rsyslog restart 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "rsyslog restart failed" | tee -a ${LOG}; exit 1; fi
#################
# Restart haproxy
#################
echo "Restarting haproxy" | tee -a ${LOG}
service haproxy restart 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "haproxy restart failed" | tee -a ${LOG}; exit 1; fi
######################
# Restart libra_worker
######################
echo "Stopping libra_worker" | tee -a ${LOG}
killall libra_worker 2>&1 >> ${LOG}
#if [ $? -ne 0 ]; then echo "killing libra_worker failed" | tee -a ${LOG}; exit 1; fi
echo "Starting libra_worker" | tee -a ${LOG}
/usr/bin/libra_worker -c /etc/libra.cfg 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "starting libra_worker failed" | tee -a ${LOG}; exit 1; fi
exit 0

View File

@ -1,114 +0,0 @@
#!/bin/bash
##############################################################################
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
##############################################################################
# DESCRIPTION
# This script is used to manually upgrade a worker node running a 2.0
# version of Libra to the 3.0 version. This is specifically targeted to
# Ubuntu nodes, but may work on other distributions, though that is
# untested. It makes some assumptions about the current setup.
#
# This script is designed to be safe to run multiple times, in case an
# error is encountered and it must be run again.
#
# EXIT VALUES
# 0 on success, 1 on error
##############################################################################
if [ $USER != "root" ]
then
echo "Must be run as root user."
exit 1
fi
LOG="/tmp/update_node.log"
if [ -e ${LOG} ]
then
rm -f ${LOG}
fi
#################################################
# Update sudo privs by inserting '/usr/bin/chown'
#################################################
file="/etc/sudoers"
echo "Updating SUDO file $file" | tee -a ${LOG}
sed -i.bak -e '/^%haproxy/ c\
%haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /bin/chown' ${file}
if [ $? -ne 0 ]
then
echo "Edit of ${file} failed." | tee -a ${LOG}
exit 1
fi
if [ -e ${file}.bak ]
then
rm ${file}.bak
fi
#########################
# Chown on haproxy socket
#########################
haproxysock="/var/run/haproxy-stats.socket"
echo "Doing chown of haproxy socket ${haproxysock}" | tee -a ${LOG}
if [ -e ${haproxysock} ]
then
chown haproxy:haproxy ${haproxysock}
if [ $? -ne 0 ]
then
echo "chown on ${haproxysock} failed." | tee -a ${LOG}
exit 1
fi
fi
##########################
# Edit current haproxy.cfg
##########################
haproxycfg="/etc/haproxy/haproxy.cfg"
echo "Updating HAProxy config file ${haproxycfg}" | tee -a ${LOG}
if [ -e ${haproxycfg} ]
then
sed -i.bak -e '/stats socket/ c\
stats socket /var/run/haproxy-stats.socket user haproxy group haproxy mode operator' ${haproxycfg}
if [ $? -ne 0 ]
then
echo "Editing ${haproxycfg} failed." | tee -a ${LOG}
exit 1
fi
fi
if [ -e ${haproxycfg}.bak ]
then
rm -f ${haproxycfg}.bak
fi
#################
# Restart haproxy
#################
echo "Restarting haproxy" | tee -a ${LOG}
service haproxy restart 2>&1 >> ${LOG}
if [ $? -ne 0 ]; then echo "haproxy restart failed" | tee -a ${LOG}; exit 1; fi
exit 0

View File

@ -1,6 +0,0 @@
#!/bin/bash
python setup.py build_sphinx_latex
# Fix option double dashes in latex output
perl -i -pe 's/\\bfcode\{--(.*)\}/\\bfcode\{-\{\}-\1\}/g' build/sphinx/latex/*.tex
perl -i -pe 's/\\index\{(.*?)--(.*?)\}/\\index\{\1-\{\}-\2\}/g' build/sphinx/latex/*.tex
make -C build/sphinx/latex all-pdf

View File

@ -1,22 +0,0 @@
Description
===========
Purpose
-------
The Admin API server listens for REST+JSON connections to provide information
about the state of Libra to external systems.
Additionally the Admin API has several schedulers which automatically maintain
the health of the Libra system and the connected Load Balancer devices.
Design
------
Similar to the main API server it uses an Eventlet WSGI web server frontend
with Pecan+WSME to process requests. SQLAlchemy+MySQL is used to access the
data store. The main internal difference (apart from the API itself) is the
Admin API server doesn't use keystone or gearman.
It spawns several scheduled threads to run tasks such as building new devices
for the pool, monitoring load balancer devices and maintaining IP addresses.

View File

@ -1,775 +0,0 @@
Admin API REST Inteface (v2)
============================
Introduction
------------
This is the new Admin API interface for the LBaaS system. It will allow the engineers as well as support teams to perform basic tasks on the LBaaS system without direct access using Salt, SSH or MySQL. It can also be used to automate tasks such as monitoring overall system health.
Authentication & Security
-------------------------
Authentication will be performed in a similar way to the main API server, via. keystone to anyone registered to our service. There will be, however, one crucial addition. The database will contain a list of tenant IDs that can actually use the Admin API, anyone else will get a 401 response. This will also have two levels of access for now we will call 'staff' (USER) and 'administrators' (ADMIN). In addition to this the Admin API's port will be restricted to users on a VPN.
Since this is an Admin API all actions should be well logged along with the tenantID of the user who actioned them.
API Sections
------------
The Admin API will initially be divided into three distinct sections, Devices, LoadBalancers and Status. Once we have per-customer defined limits a new section should be added to support that. In the table below the following conventions are used:
{baseURI} - the endpoint address/IP for the Admin API server
{ver} - The version number (1.0 already exists as a system Admin API, 2.0 shall be the first version)
{lbID} - The load balancer ID
{deviceID} - The device ID
+---------------+----------------------------------+--------+---------------------------------------------+
| Resource | Operation | Method | Path |
+===============+==================================+========+=============================================+
| Devices | Get a list of devices | GET | {baseURI}/{ver}/devices |
+---------------+----------------------------------+--------+---------------------------------------------+
| Devices | Get a single device | GET | {baseURI}/{ver}/devices/{deviceID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| Devices | Get a device version | GET | {baseURI}/{ver}/devices/{deviceID}/discover |
+---------------+----------------------------------+--------+---------------------------------------------+
| Devices | Deletes a device | DELETE | {baseURI}/{ver}/devices/{deviceID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| LoadBalancers | Get a list of load balancers | GET | {baseURI}/{ver}/loadbalancers |
+---------------+----------------------------------+--------+---------------------------------------------+
| LoadBalancers | Gets a single load balancer | GET | {baseURI}/{ver}/loadbalancers/{lbID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| LoadBalancers | Delete a single load balancer | DELETE | {baseURI}/{ver}/loadbalancers/{lbID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Get a pool status | GET | {baseURI}/{ver}/status/pool |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Get the counters | GET | {baseURI}/{ver}/status/counters |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Get a service status | GET | {baseURI}/{ver}/status/service |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Get the global service limits | GET | {baseURI}/{ver}/status/limits |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Change the global service limits | PUT | {baseURI}/{ver}/status/limits |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Get a tenant's service limits | GET | {baseURI}/{ver}/status/limits/{tenantID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| Status | Change a tenant's service limits | PUT | {baseURI}/{ver}/status/limits/{tenantID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| User | Get a list of Admin API users | GET | {baseURI}/{ver}/user |
+---------------+----------------------------------+--------+---------------------------------------------+
| User | Get an Admin API user | GET | {baseURI}/{ver}/user/{tenantID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| User | Delete an Admin API user | DELETE | {baseURI}/{ver}/user/{tenantID} |
+---------------+----------------------------------+--------+---------------------------------------------+
| User | Add an Admin API user | POST | {baseURI}/{ver}/user |
+---------------+----------------------------------+--------+---------------------------------------------+
| User | Modify an Admin API user | PUT | {baseURI}/{ver}/user/{tenantID} |
+---------------+----------------------------------+--------+---------------------------------------------+
Get a list of devices
---------------------
This will be used to get either a whole list of devices or a filtered list given certain criteria. A future expansion to this would be to add pagination support.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/devices
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
* status - A specified status type to filter by such as 'OFFLINE', 'ONLINE' or 'ERROR'
* name - A specified device name (in a future version we could accept wildcards)
* ip - A specified device ip address (in a future version we could accept ranges)
* vip - A specified floating ip address (in a future version we could accept ranges)
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{"devices": [
{
"id": 123,
"name": "7908c1f2-1bce-11e3-bcd3-fa163e9790b4",
"status": "OFFLINE",
"ip": "15.125.30.123",
"vip": null,
"created": "2013-05-12 12:13:54",
"updated": "2013-06-02 14:21:31"
}
]}
Get a single device
-------------------
This will be used to get details of a single device specified by its ID. This will contain additional information such as load balancers attached to a given device.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/devices/{id}
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error), 404 (Not found)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"id": 123,
"name": "7908c1f2-1bce-11e3-bcd3-fa163e9790b4",
"status": "ONLINE",
"ip": "15.125.30.123",
"vip": "15.125.50.45",
"created": "2013-05-12 12:13:54",
"updated": "2013-06-02 14:21:31",
"loadBalancers": [
{
"id": 5263
}
]
}
Get a device version
--------------------
This will be used to send a DISCOVER gearman message to a given device's worker and get its version response.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/devices/{id}/discover
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error), 404 (Not found)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"id": 123,
"version": "1.0",
"release": "1.0.alpha.3.gca84083"
}
Delete a device
---------------
This will be used to delete a device, if the device has load balancers attached these will be moved to a new device. Typically this could be used for worker upgrades, going through each device rebuilding it using a a pool with newer workers. If there are no load balancers attached it should just mark the device for deletion, in this scenario a 204 with empty body will be returned.
Request type
^^^^^^^^^^^^
DELETE
Path
^^^^
/v2.0/devices/{id}
Access
^^^^^^
It should be available to 'administrators' only.
Response codes
^^^^^^^^^^^^^^
Success: 200 or 204
Failure: 400 (Bad request), 500 (Service error), 404 (Not found)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"oldId": 123,
"newId": 148
}
Get a list of LoadBalancers
---------------------------
This will be used to get a list of all load balancers or a filtered list using given criteria. A future expansion to this would be to add pagination support.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/loadbalancers
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
* status - A specified status type to filter by such as 'ACTIVE', 'DEGRADED' or 'ERROR'
* tenant - The tenant/project ID for a given customer
* name - A specified device name (in a future version we could accept wildcards)
* ip - A specified device ip address (in a future version we could accept ranges)
* vip - A specified floating ip address (in a future version we could accept ranges)
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{"loadBalancers": [
{
"id": 4561,
"name": "my load balancer",
"status": "ACTIVE",
"tenant": 8637027649,
"vip": "15.125.30.123",
"protocol": "HTTP",
"algorithm": "ROUND_ROBIN",
"port": 80,
"created": "2013-05-12 12:13:54",
"updated": "2013-06-02 14:21:31"
}
]}
Get a single LoadBalancer
-------------------------
This will be used to get details of a single load balancer specified by its ID. This will contain additional information such as nodes attached to the load balancer and which device is used.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/loadbalancers/{id}
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error), 404 (Not found)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"id": 4561,
"name": "my load balancer",
"status": "ACTIVE",
"tenant": 8637027649,
"vip": "15.125.30.123",
"protocol": "HTTP",
"algorithm": "ROUND_ROBIN",
"port": 80,
"device": 123,
"created": "2013-05-12 12:13:54",
"updated": "2013-06-02 14:21:31",
"nodes": [
{
"ip": "15.185.23.157",
"port": 80,
"weight": 1,
"enabled": true,
"status": "ONLINE"
}
],
"monitor": {
"type": "HTTP",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2",
"path": "/healthcheck"
}
}
Delete a single LoadBalancer (NOT IMPLEMENTED!)
-----------------------------------------------
This will be used to delete a single load balancer in the same way a given user would.
Request type
^^^^^^^^^^^^
DELETE
Path
^^^^
/v2.0/loadbalancers/{id}
Access
^^^^^^
It should be available to 'administrators' only.
Response codes
^^^^^^^^^^^^^^
Success: 204
Failure: 400 (Bad request), 500 (Service error), 404 (Not found)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Get pool status
---------------
This is used to get an overview of the current status of the load balancer pool
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/status/pool
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Query parameters supported
^^^^^^^^^^^^^^^^^^^^^^^^^^
Not applicable
Response Example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"devices": {
"used": 325,
"available": 50,
"error": 3,
"pendingDelete": 2
},
"vips": {
"used": 325,
"available": 15,
"bad" 2
}
}
Get counters
------------
This is used to get the current counters from the API server. There is no reset for this at the moment so this is from the first installation of a version of the API supporting counters.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/status/counters
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
[
{
"name": "loadbalancers_rebuild",
"value": 10
},
{
"name": "loadbalancers_error",
"value": 0
}
]
Get service status
------------------
This is used to get the health of vital service components. It will initially test all MySQL and Gearman servers to see if they are online.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/status/service
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"mysql": [
{
"ip": "15.185.14.125",
"status": "ONLINE"
}
],
"gearman": [
{
"ip": "15.185.14.75",
"status": "OFFLINE"
}
]
}
Get global service limits
-------------------------
This is used to get the defined global limits (executed per-tenant) of the service.
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/status/limits
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"maxLoadBalancerNameLength": 128,
"maxVIPsPerLoadBalancer": 1,
"maxNodesPerLoadBalancer": 50,
"maxLoadBalancers": 20
}
Change global service limits
----------------------------
This is used to modify the global limits of the service. It can be used to modify maxLoadBalancerNameLength, maxVIPsPerLoadBalancer, maxNodesPerLoadBalancer and/or maxLoadBalancers.
Request type
^^^^^^^^^^^^
PUT
Path
^^^^
/v2.0/status/limits
Access
^^^^^^
It should be available to 'administrators' only.
Request body example
^^^^^^^^^^^^^^^^^^^^
.. code-block:: json
{
"maxNodesPerLoadBalancer": 75
}
Response codes
^^^^^^^^^^^^^^
Success: 204
Failure: 400 (Bad request), 500 (Service error)
Get a tenant's service limits
-----------------------------
This is used to get individual tenant limits of the service (currently only maxLoadBalancers).
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/status/limits/{tenantID}
Access
^^^^^^
It should be available to both 'staff' and 'administrators'.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"maxLoadBalancers": 20
}
Change a tenant's service limits
--------------------------------
This is used to modify a tenant's limits of the service, if there is no current individual tenant limit a new one will be set. It can currently be used to modify maxLoadBalancers only.
Request type
^^^^^^^^^^^^
PUT
Path
^^^^
/v2.0/status/limits/{tenantID}
Access
^^^^^^
It should be available to 'administrators' only.
Request body example
^^^^^^^^^^^^^^^^^^^^
.. code-block:: json
{
"maxLoadBalancers": 75
}
Response codes
^^^^^^^^^^^^^^
Success: 204
Failure: 400 (Bad request), 500 (Service error)
List Admin API users
--------------------
This is used to get a list of users for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/user
Access
^^^^^^
It should be available to 'administrators' only.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
[
{
"tenant": "123456",
"level": "USER"
},
{
"tenant": "654321",
"level": "ADMIN"
}
]
Get an Admin API user
---------------------
This is used to get a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN
Request type
^^^^^^^^^^^^
GET
Path
^^^^
/v2.0/user/{tenantID}
Access
^^^^^^
It should be available to 'administrators' only.
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Response example
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"tenant": "123456",
"level": "USER"
}
Delete an Admin API user
------------------------
This is used to delete a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN
Request type
^^^^^^^^^^^^
DELETE
Path
^^^^
/v2.0/user/{tenantID}
Access
^^^^^^
It should be available to 'administrators' only.
Response codes
^^^^^^^^^^^^^^
Success: 204
Failure: 400 (Bad request), 500 (Service error)
Add an Admin API user
---------------------
This is used to add a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN
Request type
^^^^^^^^^^^^
POST
Path
^^^^
/v2.0/user
Access
^^^^^^
It should be available to 'administrators' only.
Request body example
^^^^^^^^^^^^^^^^^^^^
.. code-block:: json
{
"tenant": 654321,
"level": "ADMIN"
}
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)
Modify an Admin API user
------------------------
This is used to modify a single user for the admin API with their access levels, USER (referred to as staff in this document) or ADMIN
Request type
^^^^^^^^^^^^
POST
Path
^^^^
/v2.0/user/{tenantID}
Access
^^^^^^
It should be available to 'administrators' only.
Request body example
^^^^^^^^^^^^^^^^^^^^
.. code-block:: json
{
"level": "ADMIN"
}
Response codes
^^^^^^^^^^^^^^
Success: 200
Failure: 400 (Bad request), 500 (Service error)

View File

@ -1,230 +0,0 @@
Admin API Configuration
=======================
These options are specific to the Admin API in addition to the
:doc:`common options </config>`.
Configuration File
------------------
The ``[admin_api]`` section is specific to the libra_admin_api utility.
Below is an example:
.. code-block:: ini
[admin_api]
db_section=mysql1
ssl_certfile=/opt/server.crt
ssl_keyfile=/opt/server.key
gearman=127.0.0.1:4730
keystone_module=keystoneclient.middleware.auth_token:AuthProtocol
[mysql1]
host=localhost
port=3306
username=root
password=
schema=lbaas
ssl_cert=/opt/mysql_cert.crt
ssl_key=/opt/mysql_key.key
ssl_ca=/opt/mysql_ca.ca
Command Line Options
--------------------
.. program:: libra_admin_api
.. option:: --host <IP ADDRESS>
The IP address to bind the frontend to, default is 0.0.0.0
.. option:: --port <PORT NUMBER>
The port number to listen on, default is 8889
.. option:: --disable_keystone
Do not use keystone authentication, for testing purposes only
.. option:: --db_sections <SECTIONNAME>
Config file sections that describe the MySQL servers. This option can
be specified multiple times for Galera or NDB clusters.
.. option:: --ssl_certfile <PATH>
The path for the SSL certificate file to be used for frontend of the API
server
.. option:: --ssl_keyfile <PATH>
The path for the SSL key file to be used for the frontend of the API
server
.. option:: --gearman_keepalive
Use TCP KEEPALIVE to the Gearman job server. Not supported on all
systems.
.. option:: --gearman_keepcnt <COUNT>
Maximum number of TCP KEEPALIVE probes to send before killing the
connection to the Gearman job server.
.. option:: --gearman_keepidle <SECONDS>
Seconds of idle time on the Gearman job server connection before
sending TCP KEEPALIVE probes.
.. option:: --gearman_keepintvl <SECONDS>
Seconds between TCP KEEPALIVE probes.
.. option:: --gearman_ssl_ca <PATH>
The path for the Gearman SSL Certificate Authority.
.. option:: --gearman_ssl_cert <PATH>
The path for the Gearman SSL certificate.
.. option:: --gearman_ssl_key <PATH>
The path for the Gearman SSL key.
.. option:: --gearman <HOST:PORT>
Used to specify the Gearman job server hostname and port. This option
can be used multiple times to specify multiple job servers
.. option:: --keystone_module <MODULE:CLASS>
A colon separated module and class to use as the keystone authentication
module. The class should be compatible with keystone's AuthProtocol
class.
.. option:: --stats_driver <DRIVER LIST>
The drivers to be used for alerting. This option can be used multiple
times to specift multiple drivers.
.. option:: --stats_ping_timeout <PING_INTERVAL>
How often to run a ping check of load balancers (in seconds), default 60
.. option:: --stats_poll_timer <POLL_INTERVAL>
How long to wait until we consider the initial ping check failed and
send a second ping. Default is 5 seconds.
.. option:: --stats_poll_timeout_retry <POLL_INTERVAL>
How long to wait until we consider the second and final ping check
failed. Default is 30 seconds.
.. option:: --stats_offline_ping_limit <COUNT>
How many times to ping an OFFLINE load balancer before considering
it unreachable and marking it for deletion.
.. option:: --stats_device_error_limit <COUNT>
Maximum number of simultaneous device failures to allow recovery on
.. option:: --number_of_servers <NUMBER_OF_SERVER>
The number of Admin API servers in the system.
Used to calculate which Admin API server should stats ping next
.. option:: --server_id <SERVER_ID>
The server ID of this server, used to calculate which Admin API
server should stats ping next (start at 0)
.. option:: --datadog_api_key <KEY>
The API key to be used for the datadog driver
.. option:: --datadog_app_key <KEY>
The Application key to be used for the datadog driver
.. option:: --datadog_message_tail <TEXT>
Some text to add at the end of an alerting message such as a list of
users to alert (using @user@email.com format), used for the datadog
driver.
.. option:: --datadog_tags <TAGS>
A list of tags to be used for the datadog driver
.. option:: --node_pool_size <SIZE>
The number of hot spare load balancer devices to keep in the pool,
default 10
.. option:: --vip_pool_size <SIZE>
The number of hot spare floating IPs to keep in the pool, default 10
.. option:: --expire_days <DAYS>
The number of days before DELETED load balancers are purged from the
database. The purge is run every 24 hours. Purge is not run if no
value is provided.
.. option:: --stats_enable <BOOL>
Enable / Disable usage statistics gathering
.. option:: --exists_freq <MINUTES>
Minutes between sending of billing exists messages
.. option:: --usage_freq <MINUTES>
Minutes between sending of billing usage messages
.. option:: --stats_freqs <MINUTES>
Minutes between collecting usage statistics
.. option:: --stats_purge_enable <BOOL>
Enable / Disable purging of usage statistics
.. option:: --stats_purge_days <DAYS>
Number of days to keep usage statistics
.. option:: --delete_timer_seconds <SECONDS>
Which second of each minute delete timer should run
.. option:: --ping_timer_seconds <SECONDS>
Which second of each minute ping timer should run
.. option:: --stats_timer_seconds <SECONDS>
Which second of each minute statistics timer should run
.. option:: --usage_timer_seconds <SECONDS>
Which second of each minute usage timer should run
.. option:: --probe_timer_seconds <SECONDS>
Which second of each minute probe timer should run
.. option:: --offline_timer_seconds <SECONDS>
Which second of each minute offline timer should run
.. option:: --vips_timer_seconds <SECONDS>
Which second of each minute vips timer should run
.. option:: --exists_timer_seconds <SECONDS>
Which second of each minute exists timer should run

View File

@ -1,14 +0,0 @@
.. _libra-admin-api:
Libra Admin API Server
======================
.. toctree::
:maxdepth: 2
about
config
schedulers
stats-drivers
api
v1api

View File

@ -1,93 +0,0 @@
================
Admin Schedulers
================
The Admin API has several schedulers to maintain the health of the Libra
system. This section of the document goes into detail about each one.
Each Admin API server takes it in-turn to run these tasks. Which server is
next is determined by the :option:`--number_of_servers` and
:option:`--server_id` options.
Stats Scheduler
---------------
This scheduler is actually a monitoring scheduler and at a later date will also
gather statistics for billing purposes. It is executed once a minute.
It sends a gearman 'ping' message to active Load Balancer device. There are three
possible outcomes from the results:
It has support for multiple different :doc:`stats-drivers`.
#. If all is good, no action is taken
#. If a node connected to a load balancer has failed the node is marked as
ERROR and the load balancer is marked as DEGRADED
#. If a device has failed the device will automatically be rebuilt on a new
device and the associated floating IP will be re-pointed to that device. The
old device will be marked for deletion.
Rebuild (AutoFailover)
**********************
Libra LBaaS supports auto-failover or auto-rebuild of a broken :term:`device`.
This basically means typically re-allocating / re-building the :term:`device` to a new :term:`device`.
1. A ping is sent to each :term:`device` (ping_lbs > _exec_ping)
2. Send failures to drivers (_exec_ping > _send_fails)
3. Driver does
#. Marks the :term:`device` as being in ERROR state.
#. Triggers a rebuild
#. Looks for a free :term:`device` that is in OFFLINE state in the db.
#. Assigns the failed :term:`device` to the OFFLINE :term:`device`
#. Assigns the :term:`vip` to the new :term:`device`
#. Marks :term:`device` as DELETED
#. Puts the new :term:`device` into ACTIVE in the db.
4. A scheduled function remove the :term:`device` from DB and unconfigures it.
5. A scheduled function ensures that there are standby :term:`device` in the pool.
Delete Scheduler
----------------
This scheduler looks out for any devices marked for deletion after use or after
an error state. It is executed once a minute.
It sends a gearman message to the Pool Manager to delete any devices that are
to be deleted and removes them from the database.
Create Scheduler
----------------
This scheduler takes a look at the number of hot spare devices available. It
is executed once a minute (after the delete scheduler).
If the number of available hot spare devices falls below the value specified by
:option:`--node_pool_size` it will request that new devices are built and those
devices will be added to the database. It records how many are currently being
built so long build times don't mean multiple Admin APIs are trying to fulfil
the same quota.
VIP Scheduler
-------------
This scheduler takes a look at the number of hot spare floating IPs available.
It is executed once a minute.
If the number of available floating IP address falls below the value specified
by :option:`vip_pool_size` it will request that new IPs are build and those
will be added to the database.
Expunge Scheduler
-----------------
This scheduler removes logical Load Balancers marked as DELETED from the
database. It is executed once a day.
The DELETED logical Load Balancers remain in the database mainly for billing
purposes. This clears out any that were deleted after the number of days
specified by :option:`--expire-days`.

View File

@ -1,67 +0,0 @@
.. stats-drivers:
=============
Stats Drivers
=============
The Stats scheduler has support for multiple different drivers.
A typical driver has support for 3 different things:
* Sending a alert
* Sending a change
* Sending a delete
One can divide what a driver does into different areas:
* Alerting - Example Datadog
* Remediation - example: Database
* Stats - Example Datadog
Dummy
-----
A dummy driver which simply logs the above actions.
Database
--------
This is not a typical driver. It provides functionality such as triggering
rebuilds of failed devices, marking devices as deleted and changing node states in
the db.
Alert
*****
When receiving a alert it does the following:
# Marks the node with ERROR in the database
# Triggers a rebuild of the device (AutoFailover / AF)
Delete
******
Marks the device as DELETED in the Database
Change
******
Change the state of the device in the database
Datadog
-------
A plugin to provide functionality towards http://www.datadoghq.com/ for alerting.
Alert
*****
Send a failure alert up to Datadog
Delete
******
Send a message about a device being down / unreachable.

View File

@ -1,378 +0,0 @@
LBaaS Device API (v1, DEPRECATED)
=================================
Description
-----------
The LBaaS service provides two classes of APIs including a tenant facing
API and admin API. The admin API is designed for internal usage to allow
administration of the LBaaS service itself. As part of this, the *Device
API* allows for managing devices which are the actual load balancer
devices used by LBaaS.
API Overview
------------
The device API is not visible to tenants thus it is designed to operate
on its own HTTPS port which is configurable. The device API only
supports a JSON resource representation for reading and writing. The API
is designed as a RESTful API including support of CRUD operations for
creating, reading, updating and deleting devices.
Base URL and port
^^^^^^^^^^^^^^^^^
All device API calls run on the same TCP port and require HTTPS for
access. The specific HTTPS port and certificate are configurable by the
LBaaS service and will comply with the Cloud security requirements
including the certificate signing. The API is version'ed such that all
calls are prefixed with a version URI. For example,
``https://lbaas-service:8889/v1/devices/...``
would access the LBaaS system hosted on lbaas-service, using HTTPS on
port 8889 using version 1 of the API.
Exceptions
^^^^^^^^^^
As a RESTful service, the device API can return standard HTTP status
codes with each request including success and error codes mentioned
below. In the event a non 200 series status is returned, a JSON
formatted error body is provided with additional details. The format of
the JSON error body is as follows:
*Example of a bad request JSON error response body*
::
{
"message":"Bad Request",
"details":"device name : lbaas-10.5.251.48 already exists",
"code":400
}
Base URI
^^^^^^^^
All LBaaS Device API calls have a common base URI defined as follows:
``<baseURI> = https://<lbaas-system-addr>:<lbaas-device-port>/v1``
- *lbaas-system-addr* is the system name / address where the LBaaS API
service is running.
- *lbaas-device-port* is the TCP port in which the device service is
listening for HTTPS REST requests.
- */v1/devices* will prefix all REST calls.
Device Data Model
^^^^^^^^^^^^^^^^^
Device REST calls allow reading and writing device resources represented
in JSON. The data model for devices is defined as follows:
id
^^
*id* is an integer representing a unique id for the device. *id* is
created by the LBaaS service when devices are created. *id* is used to
reference devices as the REST collection id.
updated
^^^^^^^
*updated* is a text string representing the last time this device
resource was updated.
created
^^^^^^^
*created* is a text string representing when the device was created.
status
^^^^^^
*status* is a text string representing the status of the device as
reported by the device to the LBaaS service ( this is done through the
gearman client / worker interface ). Status values can be 'OFFLINE',
'ONLINE', 'ERROR'.
address
^^^^^^^
*address* is the IPv4 or IPV6 address of the device. This is the adress
which will be used as the loadbalancer's address used by the customer.
Note, this should be a Nova floating IP address for usage with HAProxy
on Nova.
name
^^^^
*name* is the name of the device which is used internally by LBaaS as
the gearman worker name. Each device name is specified by the pool
manager and must be unique for each device. The format of the name is
``lbaas-<version>-<id>`` where ``<version>`` is the gearman worker
version e.g. *v1* and ``<id>`` is a unique UUID for the name.
loadbalancer
^^^^^^^^^^^^
*loadbalancer* are references to logical loadbalancers who are using
this device. This is a list of one or more integers. An empty or zero
value denotes that this device is not used and is free. Note, if the
device is not in use, it has no customer loadbalancer config and is in a
'OFFLINE' state.
type
^^^^
*type* is a text string describing the type of device. Currently only
'HAProxy' is supported.
Example of a single device
^^^^^^^^^^^^^^^^^^^^^^^^^^
::
{
"id": 1,
"updated": "2013-06-10T14:29:14",
"created": "2013-06-10T14:29:14",
"status": "OFFLINE",
"floatingIpAddress": "15.185.96.125",
"publicIpAddress": "15.185.96.125",
"name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00",
"loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}],
"type": "basename: libra-haproxy, image: 12345",
"az": 2
}
Operations
==========
Get all Devices
---------------
Get all devices currently defined.
::
GET <baseURI>/devices
Return Status
^^^^^^^^^^^^^
200 on success, 500 for internal error
Example
^^^^^^^
::
curl -k https://15.185.107.220:8889/v1/devices
Response:
::
{
"devices": [
{
"id": 1,
"updated": "2013-06-10T14:29:14",
"created": "2013-06-10T14:29:14",
"status": "OFFLINE",
"floatingIpAddress ":"15.185.96.125",
"publicIpAddress": "15.185.96.125",
"name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00",
"loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}],
"type": "basename: libra-haproxy, image: 12345",
"az": 2
}
]
}
Get a Device
------------
Get a specific device.
::
GET <baseURI>/devices/{deviceId}
Return Status
^^^^^^^^^^^^^
200 on success, 404 not found, 500 for internal error
Example
^^^^^^^
::
curl -k https://15.185.107.220:8889/v1/devices/1
Response:
::
{
"id": 1,
"updated": "2013-06-10T14:29:14",
"created": "2013-06-10T14:29:14",
"status": "OFFLINE",
"floatingIpAddress": "15.185.96.125",
"publicIpAddress": "15.185.96.125",
"name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00",
"loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}],
"type": "basename: libra-haproxy, image: 12345",
"az": 2
}
Create a Device
---------------
Create a new device will register an already deployed device with the
LBaaS service. In order to do so, LBaaS will need to know its name and
address. Returned will be the new device including its *id*.
::
POST <baseURI>/devices
Return Status
^^^^^^^^^^^^^
200 on success, 400 bad request, 500 for internal error
Request Body
^^^^^^^^^^^^
A JSON request body is required for this request.
::
{
"name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00",
"publicIpAddress": "15.185.96.125",
"floatingIpAddress": "15.185.96.125",
"az": 2,
"type": "basename: libra-haproxy, image: 12345"
}
Example
^^^^^^^
::
curl -X POST -H "Content-type:application/json" --data-binary "@device.json" -k https://15.185.107.220:8889/v1/devices
Response:
::
{
"id": 1,
"updated": "2013-06-10T14:29:14",
"created": "2013-06-10T14:29:14",
"status": "OFFLINE",
"floatingIpAddress": "15.185.96.125",
"publicIpAddress": "15.185.96.125",
"name": "lbaas-v1-067e6162-3b6f-4ae2-a171-2470b63dff00",
"loadBalancers": [{"id": 10313, "tenantid": "42374872347634"}],
"type": "basename: libra-haproxy, image: 12345",
"az": 2
}
Delete a Device
---------------
Delete a device will delete a device from the LBaaS service. Note, this
call can be dangerous and effect a customers load balancer if it is in
use. *please use this call with extreme caution!*.
::
DELETE <baseURI>/devices/{deviceId}
Return Status
^^^^^^^^^^^^^
204 on success, 400 bad request, 500 for internal error
Example
^^^^^^^
::
curl -X DELETE -k https://15.185.107.220:8889/v1/devices/1
Update a Device
---------------
Update the status of a device, it can set the status to `ERROR` or `ONLINE`
and the statusDescription field. No other fields can be changed and will be
ignored.
::
PUT <baseURI>/devices/{deviceId}
Return Status
^^^^^^^^^^^^^
200 on success, 400 bad request, 500 for internal error
Request Body
^^^^^^^^^^^^
A JSON request body is required for this request.
::
{
"status": "ERROR",
"statusDescription": "Load Balancer has failed"
}
Example
^^^^^^^
::
curl -X PUT -H "Content-type:application/json" --data-binary "@device.json" -k https://15.185.107.220:8889/v1/devices/1
Get Usage of Devices
--------------------
This call allows obtaining usage summary information for all devices.
::
GET <baseURI>/devices/usage
Return Status
^^^^^^^^^^^^^
200 on success, 500 for internal error
Example
^^^^^^^
::
curl -k https://15.185.107.220:8889/v1/devices/usage
Response:
::
{
"total": 100,
"free" : 50,
"taken": 50
}

View File

@ -1,16 +0,0 @@
Description
===========
Purpose
-------
The API server listens for REST+JSON connections to interface the user with
the LBaaS system. Its API is based on the Atlas API with a few slight
modifications.
Design
------
It is designed to use Eventlet WSGI web server frontend and Pecan+WSME to
process the requests. SQLAlchemy+MySQL is used to store details of the load
balancers and Gearman is used to communicate to the workers.

View File

@ -1,635 +0,0 @@
Load Balancer as a Service (LBaaS) API Specification
====================================================
.. toctree::
:maxdepth: 2
:glob:
rest/*
1. Overview
-----------
This guide is intended for software developers who wish to create
applications using the Load Balancer as a Service (LBaaS) set
of APIs. It assumes the reader has a general understanding of cloud
APIs, load balancing concepts, RESTful web services, HTTP/1.1
conventions and JSON serialization formats. The LBaaS set of APIs
utilize and take advantage of a variety of Openstack cloud API patterns
which are described in detail.
1.1 API Maturity Level
~~~~~~~~~~~~~~~~~~~~~~
This API definition represents the Load Balancer as a Service
in Beta release form.
**Maturity Level**: *Experimental*
**Version API Status**: *BETA*
2. Architecture View
--------------------
2.1 Overview
~~~~~~~~~~~~
The Load Balancer as a Service (LBaaS) is a set of APIs that
provide a RESTful interface for the creation and management of load
balancers in the cloud. Load balancers created can be used for a variety
of purposes including load balancers for your external cloud hosted
services as well as internal load balancing needs. The load balancing
solution is meant to provide both load balancing and high availability
in an industry standard manner. The LBaaS APIs defined are integrated
within the API ecosystem including integration with the
identity management system, billing and monitoring systems.
2.2 Conceptual/Logical Architecture View
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To use the Load Balancers API effectively, you should
understand several key concepts.
2.2.1 Load Balancer
^^^^^^^^^^^^^^^^^^^
A load balancer is a logical device. It is used to distribute workloads
between multiple back-end systems or services called 'nodes', based on
the criteria defined as part of its configuration.
2.2.2 Virtual IP Address
^^^^^^^^^^^^^^^^^^^^^^^^
A virtual IP address is an Internet Protocol (IP) address configured on the
load balancer for use by clients connecting to a service that is load
balanced. Incoming connections and requests are distributed to back-end
nodes based on the configuration of the load balancer. The load balancer will
need to registered with the appropriate DNS domain record in order for users
to access the nodes via a domain name-based URL.
2.2.3 Node
^^^^^^^^^^
A node is a back-end device providing a service, like a web server or file
server, on a specified IP and port.
The nodes defined by the load balancer are responsible for servicing the
requests received through the load balancers virtual IP. By default, the
load balancer employs a basic health check that ensures the node is
listening on its defined port. The node is checked at the time of
addition and at regular intervals as defined by the load balancer health
check configuration. If a back-end node is not listening on its port or
does not meet the conditions of the defined active health check for the
load balancer, then the load balancer will not forward connections or
requests to it and its status will be listed as OFFLINE. Only nodes that
are in an ONLINE status will receive and be able to service traffic from
the load balancer.
Nodes can be assigned a weight attribute that determines the portion of
requests or connections it services compared to the other nodes of the load
balancer. For example, if node A has a weight of 2 and node B has a weight of 1,
then the loadbalancer will forward twice as many requests to node A than to
node B. If the weight attribute is not specified, then the node's weight is
implicitly set to "1". Weight values from 1 to 256 are allowed.
Nodes that are assigned to a load balancer that is delivering data to a Galera
database cluster may require a primary write node be specified to avoid
database locking problems that can occur. For this case, a load balancer can be
configured to use the special "GALERA" protocol type. When a "GALERA" protocol
is chosen, all of the specified nodes must use the node "backup" attribute to
specify whether it is a backup node or the primary node. There may only be a
single primary node specified by setting the "backup" attribute to FALSE. All
other nodes must have the "backup" attribute set to TRUE.
2.2.4 Heath Monitors
~~~~~~~~~~~~~~~~~~~~
A health monitor is a configurable, active monitoring operation that exists for all load balancer nodes. In addition to the basic health checks, active health monitoring operations periodically check your back-end nodes to ensure they are responding correctly.
Active health monitoring offers two choices for the type of monitor it can provide; CONNECT or HTTP. CONNECT monitoring is the most basic type of health check and it does not perform post-processing or protocol specific health checks. HTTP monitoring, on the other hand, is more intelligent and it is capable of processing HTTP responses to determine the condition of a node. For both options, a user may configure the time delay between monitoring checks, the timeout period for a connection to a node, the number of attempts before removing a node from rotation and for HTTP monitoring, the HTTP path to test.
Active health monitoring, by default is configured to use CONNECT type monitoring with a 30 second delay, 30 second timeout, and 2 retries, and it can not be disabled. The caller may configure one health monitor per load balancer and the same configuration is used to monitor all of the back-end nodes.
2.3 Infrastructure Architecture View
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LBaaS fits into the ecosystem of APIs by utilizing the same common
authentication mechanisms as any other services. In order to
use LBaaS, a user account must have activated "Load Balancer" service.
All API calls require require a valid authentication token.
3. Account-level View
---------------------
Once the account is activated, the LBaaS service will show up
in the service catalog returned during user login. In addition, LBaaS
endpoints to be used will also be presented. Availability zone
information may vary based on region.
3.1 Service Catalog
~~~~~~~~~~~~~~~~~~~
Once a user authenticates using RESTful API, a service
catalog will list the availability of the LBaaS service, roles and
endpoints for the region you have logged into and in which you are
activated for.
*The following is an example of LBaaS service information within the
service catalog including roles and endpoints:*
::
"user": {
"id": "59267322167978",
"name": "lbaas_user",
"roles": [
{
"id": "83241756956007",
"serviceId": "220",
"name": "lbaas-user",
"tenantId": "11223344556677"
},
{
"id": "00000000004024",
"serviceId": "140",
"name": "user",
"tenantId": "11223344556677"
},
{
"id": "00000000004013",
"serviceId": "130",
"name": "block-admin",
"tenantId": "11223344556677"
}
]
},
"serviceCatalog": [
{
"name": "Identity",
"type": "identity",
"endpoints": [{
"publicURL": "https:\/\/usa.region-b.geo-1.identity.hpcloudsvc.com:35357\/v2.0\/",
"region": "region-b.geo-1",
"versionId": "2.0",
"versionInfo": "https:\/\/usa.region-b.geo-1.identity-internal.hpcloudsvc.com:35357\/v2.0\/"
}]
},
{
"name": "Load Balancer",
"type": "hpext:lbaas",
"endpoints": [{
"tenantId": "11223344556677",
"publicURL": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com\/v1.1",
"publicURL2": "",
"region": "region-b.geo-1",
"versionId": "1.1",
"versionInfo": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com\/v1.1",
"versionList": "https:\/\/usa.region-b.geo-1.lbaas.hpcloudsvc.com"
}]
}
]
4. General API Information
--------------------------
This section describes operations and guidelines that are common to all
LBaaS APIs.
4.1 Authentication
~~~~~~~~~~~~~~~~~~
The LBaaS API uses standards defined by the OpenStack Keystone project
for authentication. Please refer to the
identity management system for more details on all authentication
methods currently supported.
4.2 Service Access/Endpoints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As shown in the example above, logging into your region will provide you
with the appropriate LBaaS endpoints to use. In addition, all supported
versions are published within the service catalog. A client may choose to
use any LBaaS API version listed.
4.3 Request/Response Types
~~~~~~~~~~~~~~~~~~~~~~~~~~
The LBaaS API currently only supports JSON data serialization formats
for request and response bodies. The request format is specified using
the 'Content-Type' header and is required for operations that have a
request body. The response format should be specified in requests using
the 'Accept' header. If no response format is specified, JSON is the
default.
4.4 Persistent Connections
~~~~~~~~~~~~~~~~~~~~~~~~~~
By default, the API supports persistent connections via HTTP/1.1
'keep-alives'. All connections will be kept alive unless the connection
header is set to close. In adherence with the IETF HTTP RFCs, the server
may close the connection at any time and clients should not rely on this
behavior.
4.5 Absolute Limits
~~~~~~~~~~~~~~~~~~~
Absolute limits are limits which prohibit a user from creating too many
LBaaS resources. For example, 'maxNodesPerLoadbalancer' identifies the
total number of nodes that may be associated with a given load balancer.
Limits for a specific tenant may be queried for using the 'GET /limits'
API. This will return the limit values which apply to the tenant who
made the request.
+-----------------------------+------------------------------------------------------------+
| Limited Resource | Description |
+=============================+============================================================+
| maxLoadBalancers | Maximum number of load balancers allowed for this tenant |
+-----------------------------+------------------------------------------------------------+
| maxNodesPerLoadBalancer | Maximum number of nodes allowed for each load balancer |
+-----------------------------+------------------------------------------------------------+
| maxLoadBalancerNameLength | Maximum length allowed for a load balancer name |
+-----------------------------+------------------------------------------------------------+
| maxVIPsPerLoadBalancer | Maximum number of Virtual IPs for each load balancer |
+-----------------------------+------------------------------------------------------------+
4.6 Faults
~~~~~~~~~~
When issuing a LBaaS API request, it is possible that an error can
occur. In these cases, the system will return an HTTP error response
code denoting the type of error and a LBaaS response body with
additional details regarding the error. Specific HTTP status codes
possible are listed in each API definition.
*The following JSON message represents the JSON response body used for
all faults:*
::
{
"message":"Description of fault",
"details":"Details of fault",
"code": HTTP standard error status
}
4.7 Specifying Tenant IDs
~~~~~~~~~~~~~~~~~~~~~~~~~
Tenant identifiers with LBaaS API URIs are not required. The tenant
identifier is derived from the Openstack Keystone authentication token
provided with each API call. This simplifies the REST URIs to only
include the base URI and the resource. All
LBaaS calls behave in this manner.
5. LBaaS API Resources and Methods
----------------------------------
The following is a summary of all supported LBaaS API resources and
methods. Each resource and method is defined in detail in the subsequent
sections.
**Derived resource identifiers:**
i
**{baseURI}** is the endpoint URI returned in the service catalog upon
logging in including the protocol, endpoint and base URI.
**{ver}** is the specific version URI returned as part of the service
catalog.
**{loadbalancerId}** is the unique identifier for a load balancer
returned by the LBaaS service.
**{nodeId}** is the unique identifier for a load balancer node returned
by the LBaaS service.
5.1 LBaaS API Summary Table
~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Resource | Operation | Method | Path |
+=================+============================================================+==========+=================================================================+
| Versions | :ref:`Get list of all API versions <api-versions>` | GET | {baseURI}/ |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Versions | :ref:`Get specific API version <api-version>` | GET | {baseURI}/{ver} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Limits | :ref:`Get list of LBaaS limits <api-limits>` | GET | {baseURI}/{ver}/limits |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Protocols | :ref:`Get list of supported protocols <api-protocols>` | GET | {baseURI}/{ver}/protocols |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Algorithms | :ref:`Get list of supported algorithms <api-algorithms>` | GET | {baseURI}/{ver}/algorithms |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Load Balancer | :ref:`Get list of all load balancers <api-lb-list>` | GET | {baseURI}/{ver}/loadbalancers |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Load Balancer | :ref:`Get load balancer details <api-lb-status>` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Load Balancer | :ref:`Create a new load balancer <api-lb-create>` | POST | {baseURI}/{ver}/loadbalancers |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Load Balancer | :ref:`Update load balancer attributes <api-lb-modify>` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Load Balancer | :ref:`Delete an existing load balancer <api-lb-delete>` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Node | :ref:`Get list of load balancer nodes <api-node-list>` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Node | :ref:`Get a specific load balancer node <api-node-status>` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Node | :ref:`Create a new load balancer node <api-node-create>` | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Node | :ref:`Update a load balancer node <api-node-modify>` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Node | :ref:`Delete a load balancer node <api-node-delete>` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Virtual IP | :ref:`Get list of virtual IPs <api-vips>` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/virtualips |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Logs | :ref:`Archive log file to Object Storage <api-logs>` | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/logs |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Health Monitor | :ref:`Get a load balancer monitor <api-monitor-status>` | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Health Monitor | :ref:`Update a load balancer monitor <api-monitor-modify>` | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
| Health Monitor | :ref:`Reset a load balancer monitor <api-monitor-delete>` | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+-----------------+------------------------------------------------------------+----------+-----------------------------------------------------------------+
5.2 Common Request Headers
~~~~~~~~~~~~~~~~~~~~~~~~~~
*HTTP standard request headers*
**Accept** - Internet media types that are acceptable in the response.
LBaaS API supports the media type 'application/json'.
**Content-Length** - The length of the request body in octets (8-bit
bytes).
**Content-Type** - The Internet media type of the request body. Used
with POST and PUT requests. LBaaS API supports
'application/json'.
*Non-standard request headers*
**X-Auth-Token** - authorization token.
*Example*
::
GET /v1.0/loadbalancers HTTP/1.1
Host: system.hpcloudsvc.com
Content-Type: application/json
Accept: application/json
X-Auth-Token: TOKEN
Content-Length: 85
5.3 Common Response Headers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
*HTTP standard response headers*
**Content-Type** - Internet media type of the response body.
**Date** - The date and time that the response was sent.
*Example*
::
HTTP/1.1 200 OK
Content-Length: 1135
Content-Type: application/json; charset=UTF-8
Date: Tue, 30 Oct 2012 16:22:35 GMT
.. _api-versions:
6. Get a List of All LBaaS API Versions Supported
-------------------------------------------------
6.1 Operation
~~~~~~~~~~~~~
+------------+--------------------------------+----------+--------------+
| Resource | Operation | Method | Path |
+============+================================+==========+==============+
| Versions | Get list of all API versions | GET | {baseURI}/ |
+------------+--------------------------------+----------+--------------+
6.2 Description
~~~~~~~~~~~~~~~
This method allows querying the LBaaS service for all supported versions
it supports. This method is also advertised within the Keystone service
catalog which is presented upon user login. All versions listed can be
used for LBaaS.
6.3 Request Data
~~~~~~~~~~~~~~~~
None required.
6.4 Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
6.5 Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
6.6 Request Body
~~~~~~~~~~~~~~~~
None required.
6.7 Normal Response Code
~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
6.8 Response Body
~~~~~~~~~~~~~~~~~
The response body contains a list of all supported versions of LBaaS.
6.9 Error Response Codes
~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
6.10 Example
~~~~~~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com
**Response**
::
{
"versions": [
{
"id": "v1.1",
"links": [
{
"href": "http://api-docs.hpcloud.com",
"rel": "self"
}
],
"status": "CURRENT",
"updated": "2012-12-18T18:30:02.25Z"
}
]
}
.. _api-version:
7. Get Specific LBaaS API Version Information
---------------------------------------------
7.1 Operation
~~~~~~~~~~~~~
+------------+----------------------------+----------+-------------------+
| Resource | Operation | Method | Path |
+============+============================+==========+===================+
| Versions | Get specific API version | GET | {baseURI}/{ver} |
+------------+----------------------------+----------+-------------------+
7.2 Description
~~~~~~~~~~~~~~~
This method allows querying the LBaaS service for information regarding
a specific version of the LBaaS API. This method is also advertised
within the Keystone service catalog which is presented upon user login.
7.3 Request Data
~~~~~~~~~~~~~~~~
None required.
7.4 Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
7.5 Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
7.6 Request Body
~~~~~~~~~~~~~~~~
None required.
7.7 Normal Response Code
~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
7.8 Response Body
~~~~~~~~~~~~~~~~~
The response body contains information regarding a specific LBaaS API
version.
7.9 Error Response Codes
~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
7.10 Example
~~~~~~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1
**Response**
::
{
"version": {
"id": "v1.1",
"links": [
{
"href": "http://api-docs.hpcloud.com",
"rel": "self"
}
],
"media-types": [
{
"base": "application/json"
}
],
"status": "CURRENT",
"updated": "2012-12-18T18:30:02.25Z"
}
}
Features Currently Not Implemented or Supported
-----------------------------------------------
The following features are not supported.
* IPV6 address types are not supported.
SSL
---
Supported
~~~~~~~~~
End-to-end HTTPS protocol support is currently provided by the TCP load balancer option. HTTPS-based traffic will flow between end-users and application server nodes via the TCP load balancer connection.
* The same SSL certificate needs to be installed on each application server node.
* The same private key needs to be installed on each application server node.
* The SSL certificate needs to reference the load balancer fully qualified domain name (FQDN) or external IP address of the load balancer in the Subject CommonName(CN) or Subject Alternative
Name field of the certificate. The IP address of the servers behind the load balancer should not be used.
Not supported
~~~~~~~~~~~~~
* SSL certificate termination on the load balancer
* HTTPS/SSL session affinity or "stickyness"

View File

@ -1,126 +0,0 @@
API Configuration
=================
These options are specific to the API in addition to the
:doc:`common options </config>`.
Configuration File
------------------
The ``[api]`` section is specific to the libra_api utility. Below is an
example:
.. code-block:: ini
[api]
db_sections=mysql1
gearman=127.0.0.1:4730
keystone_module=keystoneclient.middleware.auth_token:AuthProtocol
swift_basepath=lbaaslogs
swift_endpoint=https://host.com:443/v1/
ssl_certfile=/opt/certfile.crt
ssl_keyfile=/opt/keyfile.key
[mysql1]
host=localhost
port=3306
username=root
password=
schema=lbaas
ssl_cert=/opt/mysql_cert.crt
ssl_key=/opt/mysql_key.key
ssl_ca=/opt/mysql_ca.ca
In addition to this any options that are specific to the given keystone
module should be stored in the ``[keystone]`` section.
Command Line Options
--------------------
.. program:: libra_api
.. option:: --host <IP ADDRESS>
The IP address to bind the frontend to, default is 0.0.0.0
.. option:: --port <PORT NUMBER>
The port number to listen on, default is 443
.. option:: --disable_keystone
Do not use keystone authentication, for testing purposes only
.. option:: --db_secions <SECTIONNAME>
Config file sections that describe the MySQL servers. This option can
be specified multiple times for Galera or NDB clusters.
.. option:: --gearman <HOST:POST>
Used to specify the Gearman job server hostname and port. This option
can be used multiple times to specify multiple job servers.
.. option:: --gearman_keepalive
Use TCP KEEPALIVE to the Gearman job server. Not supported on all
systems.
.. option:: --gearman_keepcnt <COUNT>
Maximum number of TCP KEEPALIVE probes to send before killing the
connection to the Gearman job server.
.. option:: --gearman_keepidle <SECONDS>
Seconds of idle time on the Gearman job server connection before
sending TCP KEEPALIVE probes.
.. option:: --gearman_keepintvl <SECONDS>
Seconds between TCP KEEPALIVE probes.
.. option:: --gearman_ssl_ca <PATH>
The path for the Gearman SSL Certificate Authority
.. option:: --gearman_ssl_cert <PATH>
The path for the Gearman SSL certificate
.. option:: --gearman_ssl_key <PATH>
The path for the Gearman SSL key
.. option:: --keystone_module <MODULE:CLASS>
A colon separated module and class to use as the keystone authentication
module. The class should be compatible with keystone's AuthProtocol
class.
.. option:: --swift_basepath <CONTAINER>
The default container to be used for customer log uploads.
.. option:: --swift_endpoint <URL>
The default endpoint for swift. The user's tenant ID will automatically
be appended to this unless overridden at the log archive request.
.. option:: --ssl_certfile <PATH>
The path for the SSL certificate file to be used for frontend of the API
server
.. option:: --ssl_keyfile <PATH>
The path for the SSL key file to be used for the frontend of the API
server
.. option:: --ip_filters <FILTERS>
A mask of IP addresses to filter for backend nodes in the form
xxx.xxx.xxx.xxx/yy
Any backend node IP address supplied which falls outside these filters
will result in an error for the create or node add functions.
This option can be specified multiple times.

View File

@ -1,11 +0,0 @@
.. _libra-api:
Libra API Server
================
.. toctree::
:maxdepth: 2
about
config
api

View File

@ -1,116 +0,0 @@
.. _api-algorithms:
==========
Algorithms
==========
Get List Of Supported LBaaS Algorithms
--------------------------------------
Operation
~~~~~~~~~
+--------------+------------------------------------+----------+------------------------------+
| Resource | Operation | Method | Path |
+==============+====================================+==========+==============================+
| Algorithms | Get list of supported algorithms | GET | {baseURI}/{ver}/algorithms |
+--------------+------------------------------------+----------+------------------------------+
Description
~~~~~~~~~~~
All load balancers utilize an algorithm that defines how traffic should
be directed between back end nodes. The default algorithm for newly
created load balancers is ROUND\_ROBIN, which can be overridden at
creation time or changed after the load balancer has been initially
provisioned.
The algorithm name is to be constant within a major revision of the load
balancing API, though new algorithms may be created with a unique
algorithm name within a given major revision of this API.
**Supported Algorithms**
+----------------------+-------------------------------------------------------------------------+
| Name | Description |
+======================+=========================================================================+
| LEAST\_CONNECTIONS | The node with the lowest number of connections will receive requests. |
+----------------------+-------------------------------------------------------------------------+
| ROUND\_ROBIN | Connections are routed to each of the back-end servers in turn. |
+----------------------+-------------------------------------------------------------------------+
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the currently supported algorithms.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/algorithms
**Response**
::
{
"algorithms": [
{
"name": "ROUND_ROBIN"
},
{
"name": "LEAST_CONNECTIONS"
}
]
}

View File

@ -1,360 +0,0 @@
.. _api-monitor:
===============
Health Monitors
===============
.. _api-monitor-status:
Get Load Balancer Health Monitor
--------------------------------
Operation
~~~~~~~~~
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
|Resource |Operation |Method |Path |
+====================+==========================================+=======+==============================================================+
|Health Monitor |Get a load balancer health monitor |GET |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
Description
~~~~~~~~~~~
This operation retrieves the current configuration of a load balancer health monitor.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|200 |OK |
+------------------+---------------------+
Response Body
~~~~~~~~~~~~~
The response body contains the health monitor for the requested load balancer or 404, if not found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|400 |Bad Request |
+------------------+---------------------+
|401 |Unauthorized |
+------------------+---------------------+
|404 |Not Found |
+------------------+---------------------+
|405 |Not Allowed |
+------------------+---------------------+
|500 |LBaaS Fault |
+------------------+---------------------+
Example
~~~~~~~
**Curl Example**
::
curl -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token:HPAuth_d17efd" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor
**Response**
::
{
"type": "CONNECT",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2"
}
or..
::
{
"type": "HTTP",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2",
"path": "/healthcheck"
}
.. _api-monitor-modify:
Update Load Balancer Health Monitor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Operation
~~~~~~~~~
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
|Resource |Operation |Method |Path |
+====================+==========================================+=======+==============================================================+
|Health Monitor |Update a load balancer health monitor |PUT |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
Description
~~~~~~~~~~~
Active health monitoring provides two types of health monitors, CONNECT or HTTP. The caller can configure one health monitor per load balancer.
The health monitor has a type attribute to signify which types it is. The required atrributes for each type is as follows:
**CONNECT Monitor**
The monitor connects to each node on its defined port to ensure that the node is listening properly.
The CONNECT monitor is the most basic type of health check and does not perform post-processing or protocol specific health checks. It includes several configurable properties:
- delay: This is the minimum time in seconds between regular calls to a monitor. The default is 30 seconds.
- timeout: Maximum number of seconds for a monitor to wait for a connection to be established to the node before it times out. The value cannot be greater than the delay value. The default is 30 seconds.
- attemptsBeforeDeactivation: Number of permissible monitor failures before removing a node from rotation. Must be a number between 1 and 10. The default is 2 attempts.
**HTTP Monitor**
The HTTP monitor is more intelligent than the CONNECT monitor. It is capable of processing an HTTP response to determine the condition of a node. It supports the same basic properties as the CONNECT monitor and includes the additional attribute of path that is used to evaluate the HTTP response to a monitor probe.
- path: The HTTP path used in the HTTP request by the monitor. This must be a string beginning with a / (forward slash). The monitor expects a response from the node with an HTTP status code of 200.
The default Health Monitor Configuration, when a load balancer is created is:
::
{
"type": "CONNECT",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2"
}
Request Data
~~~~~~~~~~~~
Request data includes the desired configuration attributes of the health monitor.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
The request body includes the health monitor attributes.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|202 |Accepted |
+------------------+---------------------+
Response Body
~~~~~~~~~~~~~
The response body contains the health monitor requested
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|400 |Bad Request |
+------------------+---------------------+
|401 |Unauthorized |
+------------------+---------------------+
|404 |Not Found |
+------------------+---------------------+
|405 |Not Allowed |
+------------------+---------------------+
|500 |LBaaS Fault |
+------------------+---------------------+
Example
~~~~~~~
**Contents of Request file node.json**
**Request**
::
{
"type": "CONNECT",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2"
}
or..
::
{
"type": "HTTP",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2",
"path": "/healthcheck"
}
**Curl Request**
curl -X PUT -H "X-Auth-Token:HPAuth_d17efd" --data-binary "@node.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor
**Response**
Status with the following response body.
::
{
"type": "CONNECT",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2"
}
or..
::
{
"type": "HTTP",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2",
"path": "/healthcheck"
}
.. _api-monitor-delete:
Reset Load Balancer Health Monitor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Operation
~~~~~~~~~
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
|Resource |Operation |Method |Path |
+====================+==========================================+=======+==============================================================+
|Health Monitor |Reset a load balancer health monitor |DELETE |{baseURI}/{ver}/loadbalancers/{loadbalancerId}/healthmonitor |
+--------------------+------------------------------------------+-------+--------------------------------------------------------------+
Description
~~~~~~~~~~~
Reset health monitor settings for a load balancer back to the following default configuration.
::
{
"type": "CONNECT",
"delay": "30",
"timeout": "30",
"attemptsBeforeDeactivation": "2"
}
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|202 |Accepted |
+------------------+---------------------+
Response Body
~~~~~~~~~~~~~
None.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+------------------+---------------------+
| HTTP Status Code | Description |
+==================+=====================+
|400 |Bad Request |
+------------------+---------------------+
|401 |Unauthorized |
+------------------+---------------------+
|404 |Not Found |
+------------------+---------------------+
|405 |Not Allowed |
+------------------+---------------------+
|500 |LBaaS Fault |
+------------------+---------------------+
Example
~~~~~~~
**Curl Request**
::
curl -X DELETE -H "X-Auth-Token:HPAuth_d17efd" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/healthmonitor
**Response**
202 status with no response body.

View File

@ -1,117 +0,0 @@
.. _api-limits:
======
Limits
======
Get List of LBaaS API Limits
----------------------------
Operation
~~~~~~~~~~
+------------+----------------------------+----------+--------------------------+
| Resource | Operation | Method | Path |
+============+============================+==========+==========================+
| Limits | Get list of LBaaS limits | GET | {baseURI}/{ver}/limits |
+------------+----------------------------+----------+--------------------------+
Description
~~~~~~~~~~~
This method allows querying the LBaaS service for a list of API limits
which apply on a tenant basis. Each tenant may not utilize LBaaS API
resources exceeding these limits and will receive and over limit error
if attempted (413).
+-----------------------------+------------------------------------------------------------+
| Returned Limit Name | Value |
+=============================+============================================================+
| maxLoadBalancers | Maximum number of load balancers allowed for this tenant |
+-----------------------------+------------------------------------------------------------+
| maxNodesPerLoadBalancer | Maximum number of nodes allowed for each load balancer |
+-----------------------------+------------------------------------------------------------+
| maxLoadBalancerNameLength | Maximum length allowed for a load balancer name |
+-----------------------------+------------------------------------------------------------+
| maxVIPsPerLoadBalancer | Maximum number of Virtual IPs for each load balancer |
+-----------------------------+------------------------------------------------------------+
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains information regarding limits imposed for the
tenant making the request.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/limits
**Response**
::
{
"limits": {
"absolute": {
"values": {
"maxLoadBalancerNameLength": 128,
"maxLoadBalancers": 20,
"maxNodesPerLoadBalancer": 5,
"maxVIPsPerLoadBalancer": 1
}
}
}
}

View File

@ -1,817 +0,0 @@
.. api-lb:
=============
Load Balancer
=============
.. _api-lb-list:
Get List Of All Load Balancers
------------------------------
Operation
~~~~~~~~~
+-----------------+----------------------------------+----------+---------------------------------+
| Resource | Operation | Method | Path |
+=================+==================================+==========+=================================+
| Load Balancer | Get list of all load balancers | GET | {baseURI}/{ver}/loadbalancers |
+-----------------+----------------------------------+----------+---------------------------------+
Description
~~~~~~~~~~~
This operation provides a list of all load balancers configured and
associated with your account. This includes a summary of attributes for
each load balancer. In order to retrieve all the details for a load
balancer, an individual request for the load balancer must be made.
This operation returns the following attributes for each load balancer:
**id :** Unique identifier for the load balancer
**name :** Creator-assigned name for the load balancer
**algorithm :** Creator-specified algorithm for the load balancer
**protocol :** Creator-specified protocol for the load balancer
**port :** Creator-specified port for the load balancer
**status :** Current status, see section on load balancer status within
load balancer create
**created :** When the load balancer was created
**updated :** When the load balancer was last updated
**nodeCount :** The number of backend servers attached to this load balancer
**options :** Current options are timeout (30 sec) and retries (3) for each load balancer
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~~~~~~
The response body contains a list of load balancers for the tenant
making the request.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers
**Response**
::
{
"loadBalancers":[
{
"name":"lb-site1",
"id":"71",
"protocol":"HTTP",
"port":"80",
"algorithm":"LEAST_CONNECTIONS",
"status":"ACTIVE",
"created":"2010-11-30T03:23:42Z",
"updated":"2010-11-30T03:23:44Z",
"options": {"timeout": 30000, "retries": 3}
},
{
"name":"lb-site2",
"id":"166",
"protocol":"TCP",
"port":"9123",
"algorithm":"ROUND_ROBIN",
"status":"ACTIVE",
"created":"2010-11-30T03:23:42Z",
"updated":"2010-11-30T03:23:44Z",
"options": {"timeout": 30000, "retries": 3}
}
]
}
.. _api-lb-status:
Get Load Balancer Details
-------------------------
Operation
~~~~~~~~~
+-----------------+--------------------------------+----------+--------------------------------------------------+
| Resource | Operation | Method | Path |
+=================+================================+==========+==================================================+
| Load Balancer | Get a specific load balancer | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+--------------------------------+----------+--------------------------------------------------+
Description
~~~~~~~~~~~
This operation provides detailed description for a specific load
balancer configured and associated with your account. This operation is
not capable of returning details for a load balancer which has been
deleted. Details include load balancer virtual IP and node information.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer requested or 404, if not
found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/2000
**Response**
::
{
"id": "2000",
"name":"sample-loadbalancer",
"protocol":"HTTP",
"port": "80",
"algorithm":"ROUND_ROBIN",
"status":"ACTIVE",
"created":"2010-11-30T03:23:42Z",
"updated":"2010-11-30T03:23:44Z",
"options": {"timeout": 30000, "retries": 3},
"virtualIps":[
{
"id": "1000",
"address":"192.168.1.1",
"type":"PUBLIC",
"ipVersion":"IPV4"
}
],
"nodes": [
{
"id": "1041",
"address":"10.1.1.1",
"port": "80",
"condition":"ENABLED",
"status":"ONLINE"
},
{
"id": "1411",
"address":"10.1.1.2",
"port": "80",
"condition":"ENABLED",
"status":"ONLINE"
}
],
}
.. _api-lb-create:
Create a New Load Balancer
--------------------------
Operation
~~~~~~~~~
+-----------------+------------------------------+----------+---------------------------------+
| Resource | Operation | Method | Path |
+=================+==============================+==========+=================================+
| Load Balancer | Create a new load balancer | POST | {baseURI}/{ver}/loadbalancers |
+-----------------+------------------------------+----------+---------------------------------+
Description
~~~~~~~~~~~
This operation provisions a new load balancer based on the configuration
defined in the request object. Once the request is validated and
progress has started on the provisioning process, a response object will
be returned. The object will contain a unique identifier and status of
the request.
If the status returned is set to 'BUILD', then using the identifier of
the load balancer, the caller can check on the progress of the creation
operation by performing a GET on loadbalancers/{loadbalancerId}. When
the status of the load balancer returned changes to 'ACTIVE', then the
load balancer has been successfully provisioned and is now operational.
**Load Balancer Status Values**
+-------------------+----------------------------------------------------------------+
| Status Name | Description |
+===================+================================================================+
| BUILD | Load balancer is in a building state and not yet operational |
+-------------------+----------------------------------------------------------------+
| ACTIVE | Load balancer is in an operational state |
+-------------------+----------------------------------------------------------------+
| PENDING\_UPDATE | Load balancer is in the process of an update |
+-------------------+----------------------------------------------------------------+
| ERROR | Load balancer is in an error state and not operational |
+-------------------+----------------------------------------------------------------+
The caller of this operation must specify at least the following
attributes of the load balancer:
\*name
\*at least one node
If the request cannot be fulfilled due to insufficient or invalid data,
an HTTP 400 (Bad Request) error response will be returned with
information regarding the nature of the failure in the body of the
response. Failures in the validation process are non-recoverable and
require the caller to correct the cause of the failure and POST the
request again.
By default, the system will create a load balancer with protocol set to
HTTP, port set to 80 (or 443 if protocol is TCP), and assign a public
IPV4 address to the load balancer. There is also a third special-case
protocol "GALERA" that can be used to choose a primary write node when
the load balancer is being used to deliver data to a Galera database
cluster. The default load balancing algorithm used is set to ROUND\_ROBIN.
The load balancer options consist of a 30 second timeout for client
connections (30,000ms) and 3 retries. Valid timeout values range from
0 to 1000 seconds (1,000,000 ms) with 0 indicating no timeout. retries can
range from 0 to 256.
A load balancer name has a max length that can be determined by querying
limits.
Users may configure all documented features of the load balancer at
creation time by simply providing the additional elements or attributes
in the request. This document provides an overview of all the features
the load balancing service supports.
If you have at least one load balancer, you may create subsequent load
balancers that share a single virtual IP by issuing a POST and supplying
a virtual IP ID instead of a type. Additionally, this feature is highly
desirable if you wish to load balance both an unsecured and secure
protocol using one IP address. For example, this method makes it
possible to use the same load balancing configuration to support an HTTP
and an TCP load balancer. Load balancers sharing a virtual IP must
utilize a unique port.
Relevant weights can be assigned to nodes using the weight attribute of the
node element. The weight of a node determines the portion of requests or
connections it services compared to the other nodes of the load balancer. For
example, if node A has a weight of 2 and node B has a weight of 1, then the
loadbalancer will forward twice as many requests to node A than to node B. If
the weight attribute is not specified, then the node's weight is implicitly
set to "1". Weight values from 1 to 256 are allowed.
Note that nodes that are assigned to a load balancer that is delivering data to
a Galera database cluster may require a primary write node be specified to avoid
database locking problems that can occur. For this case, a load balancer can be
configured to use the special "GALERA" protocol type. When a "GALERA" protocol
is chosen, all of the specified nodes must use the node "backup" attribute to
specify whether it is a backup node or the primary node. There may only be a
single primary node specified by setting the "backup" attribute to FALSE. All
other nodes must have the "backup" attribute set to TRUE.
Request Data
~~~~~~~~~~~~
The caller is required to provide a request data with the POST which
includes the appropriate information to create a new load balancer.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
- **X-Auth-Token**
- **Accept: application/json**
- **Content-Type: application/json**
Request Body
~~~~~~~~~~~~
The request body must follow the correct format for new load balancer
creation, examples....
**Request body example to create a load balancer with two nodes and an
optional "weight" assigned. Note that a default weight of 1 does not
have to be explicitly assigned**
::
{
"name": "a-new-loadbalancer",
"nodes": [
{
"address": "10.1.1.1",
"port": "80"
"weight": "2"
},
{
"address": "10.1.1.2",
"port": "81"
}
]
}
**Request body example to create a load balancer using existing load
balancer virtual IP**
::
{
"name":"a-new-loadbalancer",
"port":"80",
"protocol":"HTTP",
"options": {"timeout": 30000, "retries": 3},
"virtualIps": [
{
"id":"39"
}
],
"nodes": [
{
"address":"10.1.1.1",
"port":"80",
"condition":"ENABLED"
}
]
}
**Request body example to create a load balancer that specifies a
single primary write node for a Galera cluster**
::
{
"name":"a-new-loadbalancer",
"port":"83",
"protocol":"GALERA",
"options": {"timeout": 30000, "retries": 3},
"virtualIps": [
{
"id":"39"
}
],
"nodes": [
{
"address": "10.1.1.1",
"port": "3306",
"backup": "TRUE"
},
{
"address": "10.1.1.2",
"port": "3306",
"backup": "FALSE"
}
]
}
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer requested or appropriate
error.
**Create Load Balancer (Required Attributes with Shared IP) Response:
JSON**
::
{
"name": "a-new-loadbalancer",
"id": "144",
"protocol": "HTTP",
"port": "83",
"algorithm": "ROUND_ROBIN",
"status": "BUILD",
"created": "2011-04-13T14:18:07Z",
"updated":"2011-04-13T14:18:07Z",
"options": {"timeout": 30000, "retries": 3},
"virtualIps": [
{
"address": "3ffe:1900:4545:3:200:f8ff:fe21:67cf",
"id": "39",
"type": "PUBLIC",
"ipVersion": "IPV6"
}
],
"nodes": [
{
"address": "10.1.1.1",
"id": "653",
"port": "80",
"status": "ONLINE",
"condition": "ENABLED"
}
]
}
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+-----------------------+
| HTTP Status Code | Description |
+====================+=======================+
| 400 | Bad Request |
+--------------------+-----------------------+
| 401 | Unauthorized |
+--------------------+-----------------------+
| 404 | Not Found |
+--------------------+-----------------------+
| 405 | Not Allowed |
+--------------------+-----------------------+
| 413 | Over Limit |
+--------------------+-----------------------+
| 500 | LBaaS Fault |
+--------------------+-----------------------+
| 503 | Service Unavailable |
+--------------------+-----------------------+
Example
~~~~~~~
**Contents of Request file lb.json**
::
{
"name": "lb #1",
"protocol":"tcp",
"nodes": [
{
"address": "15.185.229.153",
"port": "443"
},
{
"address": "15.185.226.163",
"port": "443"
},
],
}
**Curl Request**
::
curl -X POST -H "X-Auth-Token: TOKEN" --data-binary "@lb.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers
**Response**
::
{
"port":"443",
"id":"10",
"protocol":"tcp",
"updated":"2013-02-10T18:20Z",
"created":"2013-02-10T18:20Z",
"status":"BUILD",
"nodes":[
{
"port":"443",
"id":"19",
"condition":"ENABLED",
"status":"ONLINE",
"address":"15.185.229.153"
},
{
"port":"443",
"id":"20",
"condition":"ENABLED",
"status":"ONLINE",
"address":"15.185.226.163"
}
],
"name":"lb #1",
"virtualIps":[
{
"id":"5",
"address":"15.185.96.125",
"ipVersion":"IPV_4",
"type":"PUBLIC"
}
],
"algorithm":"ROUND_ROBIN",
"options": {"timeout": 30000, "retries": 3},
}
.. _api-lb-modify:
Update an Existing Load Balancer
--------------------------------
Operation
~~~~~~~~~
+-----------------+-----------------------------------+----------+--------------------------------------------------+
| Resource | Operation | Method | Path |
+=================+===================================+==========+==================================================+
| Load Balancer | Update load balancer attributes | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+-----------------------------------+----------+--------------------------------------------------+
Description
~~~~~~~~~~~
This operation updates the attributes of the specified load balancer.
Upon successful validation of the request, the service will return a 202
(Accepted) response code. A caller should check that the load balancer
status is ACTIVE to confirm that the update has taken effect. If the
load balancer status is 'PENDING\_UPDATE' then the caller can poll the
load balancer with its ID (using a GET operation) to wait for the
changes to be applied and the load balancer to return to an ACTIVE
status.
This operation allows the caller to change one or more of the following
attributes:
\*name
\*algorithm
\*options
This operation does not return a response body.
.. note::
The load balancer ID, status, port and protocol are immutable
attributes and cannot be modified by the caller. Supplying an
unsupported attribute will result in a 400 (badRequest) fault.
Request Data
~~~~~~~~~~~~
Load balancer body with attributes to be updated.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
**Example**
::
{
"name": "newname-loadbalancer",
"algorithm": "LEAST_CONNECTIONS",
"options": {"timeout": 30000, "retries": 3}
}
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
None.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Contents of Request file lb.json**
::
{
"name": "newname-loadbalancer",
"algorithm": "LEAST_CONNECTIONS",
"options": {"timeout": 30000, "retries": 3}
}
**Curl Request**
::
curl -X PUT -H "X-Auth-Token: TOKEN" --data-binary "@lb.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100
**Response**
status with no response body.
.. _api-lb-delete:
Delete Load Balancer
--------------------
Operation
~~~~~~~~~
+-----------------+------------------------------------+----------+--------------------------------------------------+
| Resource | Operation | Method | Path |
+=================+====================================+==========+==================================================+
| Load Balancer | Delete an existing load balancer | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId} |
+-----------------+------------------------------------+----------+--------------------------------------------------+
Description
~~~~~~~~~~~
Delete load balancer removes the specified load balancer and its
associated configuration from the account. Any and all configuration
data is immediately purged and is not recoverable.
This operation does not require a request body.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
None.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Example**
::
curl -X DELETE -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100
**Response**
status with no response body.

View File

@ -1,70 +0,0 @@
.. _api-logs:
====
Logs
====
Archive log file to Object Storage
----------------------------------
Operation
~~~~~~~~~
+----------+------------------------------------+--------+-----------------------------------------------------+
| Resource | Operation | Method | Path |
+==========+====================================+========+=====================================================+
| Logs | Archive log file to Object Storage | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/logs |
+----------+------------------------------------+--------+-----------------------------------------------------+
Description
~~~~~~~~~~~
The operation tells the load balancer to push the current log file into an HP Cloud Object Storage container. The status of the load balancer will be set to 'PENDING_UPDATE' during the operation and back to 'ACTIVE' upon success or failure. A success/failure message can be found in the 'statusDescription' field when getting the load balancer details.
**Load Balancer Status Values**
+----------------+---------------+--------------------------------+
| Status | Name | Description |
+================+===============+================================+
| ACTIVE | Load balancer | is in an operational state |
| PENDING_UPDATE | Load balancer | is in the process of an update |
+----------------+---------------+--------------------------------+
By default with empty POST data the load balancer will upload to the swift account owned by the same tenant as the load balancer in a container called 'lbaaslogs'. To change this the following optional parameters need to be provided in the POST body:
**objectStoreBasePath** : the object store container to use
**objectStoreEndpoint** : the object store endpoint to use including tenantID, for example: https://region-b.geo-1.objects.hpcloudsvc.com:443/v1/1234567890123
**authToken** : an authentication token to the object store for the load balancer to use
Request Data
~~~~~~~~~~~~
The caller is required to provide a request data with the POST which includes the appropriate information to upload logs.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
The request body must follow the correct format for new load balancer creation, examples....
A request that uploads the logs to a different object store
::
{
"objectStoreBasePath": "mylblogs",
"objectStoreEndpoint": "https://region-b.geo-1.objects.hpcloudsvc.com:443/v1/1234567890123",
"authToken": "HPAuth_d17efd"
}

View File

@ -1,566 +0,0 @@
.. _api-node:
=====
Nodes
=====
.. _api-node-list:
List All Load Balancer Nodes
----------------------------
Operation
~~~~~~~~~
+------------+-----------------------------------+----------+--------------------------------------------------------+
| Resource | Operation | Method | Path |
+============+===================================+==========+========================================================+
| Node | Get list of load balancer nodes | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes |
+------------+-----------------------------------+----------+--------------------------------------------------------+
Description
~~~~~~~~~~~
List all nodes for a specified load balancer.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer nodes requested or 404, if
not found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Example**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes
**Response**
::
{
"nodes" : [
{
"id":"410",
"address":"10.1.1.1",
"port":"80",
"condition":"ENABLED",
"status":"ONLINE"
},
{
"id":"236",
"address":"10.1.1.2",
"port":"80",
"condition":"ENABLED",
"status":"ONLINE"
},
{
"id":"2815",
"address":"10.1.1.3",
"port":"83",
"condition":"DISABLED",
"status":"OFFLINE"
},
]
}
.. _api-node-status:
Get Load Balancer Node
----------------------
Operation
~~~~~~~~~~~~~~
+------------+-------------------------------------+----------+-----------------------------------------------------------------+
| Resource | Operation | Method | Path |
+============+=====================================+==========+=================================================================+
| Node | Get a specific load balancer node | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+------------+-------------------------------------+----------+-----------------------------------------------------------------+
Description
~~~~~~~~~~~
This operation retrieves the configuration of a node.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer node requested or 404, if
not found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Example**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/410
**Response**
::
{
"id":"410",
"address":"10.1.1.2",
"port":"80",
"condition":"ENABLED",
"status":"ONLINE"
}
.. _api-node-create:
Create Load Balancer Node
-------------------------
Operation
~~~~~~~~~
+------------+-----------------------------------+----------+--------------------------------------------------------+
| Resource | Operation | Method | Path |
+============+===================================+==========+========================================================+
| Node | Create a new load balancer node | POST | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes |
+------------+-----------------------------------+----------+--------------------------------------------------------+
Description
~~~~~~~~~~~
Add a new node to any existing load balancer. When a node is added, it is
assigned a unique identifier that can be used for mutating operations
such as changing the condition, or removing the node from the load
balancer. When a node is added to a load balancer it is enabled by
default.
Relevant weights can be assigned to nodes using the weight attribute of the
node element. The weight of a node determines the portion of requests or
connections it services compared to the other nodes of the load balancer. For
example, if node A has a weight of 2 and node B has a weight of 1, then the
loadbalancer will forward twice as many requests to node A than to node B. If
the weight attribute is not specified, then the node's weight is implicitly
set to "1". Weight values from 1 to 256 are allowed.
Request Data
~~~~~~~~~~~~
The request must contain information regarding the new node to be added.
More than one node can be added at a time.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
The request body defines the attributes of the new node to be created.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer requested or 404, if not
found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 413 | Over Limit |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Contents of Request file nodes.json**
::
{
"nodes": [
{
"address": "10.1.1.1",
"port": "80",
"weight": "2"
},
{
"address": "10.2.2.1",
"port": "80",
"weight": "4"
},
{
"address": "10.2.2.2",
"port": "88",
"condition": "DISABLED"
}
]
}
**Curl Request**
::
curl -X POST -H "X-Auth-Token: TOKEN" --data-binary "@nodes.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes
**Response**
::
{
"nodes": [
{
"id": "7298",
"address": "10.1.1.1",
"port": "80",
"condition": "ENABLED",
"status": "ONLINE",
"weight": "2"
},
{
"id": "293",
"address": "10.2.2.1",
"port": "80",
"condition": "ENABLED",
"status": "OFFLINE",
"weight": "4"
},
{
"id": "183",
"address": "10.2.2.2",
"port": "88",
"condition": "DISABLED",
"status": "OFFLINE"
}
]
}
.. _api-node-modify:
Update Load Balancer Node Condition
-----------------------------------
Operation
~~~~~~~~~
+------------+-------------------------------+----------+-----------------------------------------------------------------+
| Resource | Operation | Method | Path |
+============+===============================+==========+=================================================================+
| Node | Update a load balancer node | PUT | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+------------+-------------------------------+----------+-----------------------------------------------------------------+
Description
~~~~~~~~~~~
Every node in the load balancer is either enabled or disabled which
determines its role within the load balancer. When the node has
condition='ENABLED' the node is permitted to accept new connections. Its
status will eventually become 'ONLINE' to reflect this configuration.
When the node has condition='DISABLED' the node is not permitted to
accept any new connections. Existing connections to the node are
forcibly terminated. The nodes status changes to OFFLINE once the
configuration has been successfully applied.
Relevant weights can be assigned to nodes using the weight attribute of the
node element. The weight of a node determines the portion of requests or
connections it services compared to the other nodes of the load balancer. For
example, if node A has a weight of 2 and node B has a weight of 1, then the
loadbalancer will forward twice as many requests to node A than to node B. If
the weight attribute is not specified, then the node's weight is implicitly
set to "1". Weight values from 1 to 256 are allowed.
The node IP and port are immutable attributes and cannot be modified
with a PUT request. Supplying an unsupported attribute will result in a
fault. A load balancer supports a maximum number of nodes. The
maximum number of nodes per load balancer is returned when querying the
limits of the load balancer service.
Request Data
~~~~~~~~~~~~
Request data includes the desired condition of the node as well as the
optional weight of the node.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
The request body includes the node 'condition' attribute and its desired
state.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
None.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Contents of Request file node.json**
::
{
"condition": "DISABLED",
}
OR
{
"condition": "ENABLED",
"weight": "2"
}
**Curl Request**
::
curl -X PUT -H "X-Auth-Token: TOKEN" --data-binary "@node.json" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/100
**Response**
status with no response body.
.. _api-node-delete:
Delete Load Balancer Node
-------------------------
Operation
~~~~~~~~~~~~~~
+------------+-------------------------------+----------+-----------------------------------------------------------------+
| Resource | Operation | Method | Path |
+============+===============================+==========+=================================================================+
| Node | Delete a load balancer node | DELETE | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/nodes/{nodeId} |
+------------+-------------------------------+----------+-----------------------------------------------------------------+
Description
~~~~~~~~~~~
Delete node for a load balancer.
.. note::
A load balancer must have at least one node. Attempting to remove the last
node of a load balancer will result in a 401 error.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 202 | Accepted |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
None.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -X DELETE -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/nodes/100
**Response**
status with no response body.

View File

@ -1,107 +0,0 @@
.. _api-protocols:
=========
Protocols
=========
Get List of Supported LBaaS Protocols
-------------------------------------
Operation
~~~~~~~~~
+-------------+-----------------------------------+----------+-----------------------------+
| Resource | Operation | Method | Path |
+=============+===================================+==========+=============================+
| Protocols | Get list of supported protocols | GET | {baseURI}/{ver}/protocols |
+-------------+-----------------------------------+----------+-----------------------------+
Description
~~~~~~~~~~~
All load balancers must be configured with the protocol of the service which is
being load balanced. The protocol selection should be based on the protocol of
the back-end nodes. The current specification supports HTTP (port 80) and TCP
(port 443) services. HTTPS traffic is supported currently via the TCP
connection. Support for SSL termination on the load balancer is not
currently supported.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the currently supported protocols and port
numbers.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/protocols
**Response**
::
{
"protocols": [
{
"name": "HTTP",
"port": 80
},
{
"name": "TCP",
"port": 443
}
]
}

View File

@ -1,101 +0,0 @@
.. _api-vips:
===========
Virtual IPs
===========
Get List of Virtual IPs
-----------------------
Operation
~~~~~~~~~
+--------------+---------------------------+----------+-------------------------------------------------------------+
| Resource | Operation | Method | Path |
+==============+===========================+==========+=============================================================+
| Virtual IP | Get list of virtual IPs | GET | {baseURI}/{ver}/loadbalancers/{loadbalancerId}/virtualips |
+--------------+---------------------------+----------+-------------------------------------------------------------+
Description
~~~~~~~~~~~
This operation lists all the virtual IP addresses of a load balancer. The
maximum number of VIPs that can be configured when creating a load
balancer can be discovered by querying the limits of the load balancer service.
Request Data
~~~~~~~~~~~~
None required.
Query Parameters Supported
~~~~~~~~~~~~~~~~~~~~~~~~~~
None required.
Required HTTP Header Values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
**X-Auth-Token**
Request Body
~~~~~~~~~~~~
None required.
Normal Response Code
~~~~~~~~~~~~~~~~~~~~
+--------------------+---------------+
| HTTP Status Code | Description |
+====================+===============+
| 200 | OK |
+--------------------+---------------+
Response Body
~~~~~~~~~~~~~
The response body contains the load balancer VIP list requested or 404,
if not found.
Error Response Codes
~~~~~~~~~~~~~~~~~~~~
+--------------------+----------------+
| HTTP Status Code | Description |
+====================+================+
| 400 | Bad Request |
+--------------------+----------------+
| 401 | Unauthorized |
+--------------------+----------------+
| 404 | Not Found |
+--------------------+----------------+
| 405 | Not Allowed |
+--------------------+----------------+
| 500 | LBaaS Fault |
+--------------------+----------------+
Example
~~~~~~~
**Curl Request**
::
curl -H "X-Auth-Token: TOKEN" https://uswest.region-b.geo-1.lbaas.hpcloudsvc.com/v1.1/loadbalancers/100/virtualips
**Response**
::
{
"virtualIps": [
{
"id": "1021",
"address": "206.10.10.210",
"type": "PUBLIC",
"ipVersion": "IPV4"
}
]
}

View File

@ -1,10 +0,0 @@
.. _architecture:
Architecture
============
.. toctree::
:maxdepth: 2
production
logical

View File

@ -1,32 +0,0 @@
====================
Logical architecture
====================
See information for each component for more information.
* :ref:`libra-pool-mgm` - A node pool manager to keep a warm spare pool of load balancers ready
* :ref:`libra-worker` - A node worker to asynchronously communicate to the API server
* :ref:`libra-api` - A customer API server
* :ref:`libra-admin-api` - An administrative API server
The API server is based on a modified version of the `Atlas API specification
<https://wiki.openstack.org/wiki/Atlas-LB>`_.
High level overview
-------------------
.. image:: /img/libralayout.png
Here you can see that the pool manager spins up the required Nova nodes with
the load balancer image. It then hands the details of these nodes over to the
Admin API server.
The client sends an API request to the API server, which in turn sends the
configuration information to the worker on the load balancer node. The worker
has a plugin system to speak to multiple load balancer types but is currently
designed to use HAProxy.
The statsd monitoring system routinely probes the workers and can alert on as
well as disable faulty nodes.
The parts of this diagram in orange are provided by the Libra codebase.

View File

@ -1,36 +0,0 @@
.. _architecture-production:
=======================
Production Architecture
=======================
See information for each component for more information.
* :ref:`libra-pool-mgm` - A node pool manager to keep a warm spare pool of load balancers ready
* :ref:`libra-worker` - A node worker to asynchronously communicate to the API server
* :ref:`libra-api` - A customer API server
* :ref:`libra-admin-api` - An administrative API server
High level overview
-------------------
* Some cloud or virtualization system.
* User and/or Tenant with required privileges / resources.
* Ubuntu 12.04 Precise x86_64 image for :term:`instance`.
* HAProxy for LoadBalancers.
* Gearman for Libra service communication.
* MySQL Galera Multi-master cluster for HA databases.
Think of each service as a :term:`instance`, for each service or :term:`instance`
running services we create 1 pr :term:`az`.
Diagram
-------
In the case below the setup is
* 1 gearman :term:`instance` pr :term:`az`.
* 1 MySQL Galera :term:`instance` pr :term:`az`.
* n+ workers running HAProxy accross multiple pr :term:`az`
.. image:: /img/production.png

View File

@ -1,234 +0,0 @@
# -*- coding: utf-8 -*-
#
# OpenStack CI documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 18 13:42:23 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Libra LBaaS Toolset'
copyright = u'2013, Hewlett-Packard Development Company, L.P.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%d-%02d-%02d-beta" % (
datetime.datetime.now().year,
datetime.datetime.now().month,
datetime.datetime.now().day
)
# The full version, including alpha/beta/rc tags.
release = "%d-%02d-%02d-beta" % (
datetime.datetime.now().year,
datetime.datetime.now().month,
datetime.datetime.now().day
)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LBaaSdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Libra-{0}.tex'.format(version), u'Libra LBaaS Toolset Documentation',
u'Hewlett-Packard Development Company, L.P.', 'manual'),
]
#pdf_documents = [('index', 'Libra-{0}'.format(version), u'Libra Client, Worker and Pool Manager Documentation', u'Andrew Hutchings and David Shrewsbury')]
#pdf_break_level = 1
#pdf_stylesheets = ['sphinx', 'libra']
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# ('index', 'lbaas', u'Libra LBaaS Toolset',
# [u'Hewlett-Packard Development Company, L.P.'], 1)
#]

View File

@ -1,236 +0,0 @@
.. _configuration:
=============
Configuration
=============
Configuration of Services
=========================
Configuration File Format
-------------------------
Libra uses the `Oslo configuration library <https://wiki.openstack.org/wiki/Oslo/Config>`_
so its format is similar to other OpenStack programs.
DEFAULT Section
^^^^^^^^^^^^^^^
The ``[DEFAULT]`` section contains generic options common to the various
Libra utilities (worker, mgm, etc).
.. code-block:: ini
[DEFAULT]
daemon = true
user = libra
group = libra
verbose = false
debug = false
billing_enable = false
notification_driver = []
default_notification_level = INFO
default_publisher_id = None
host = localhost
kombu_ssl_version =
kombu_ssl_keyfile =
kombu_ssl_certfile =
kombu_ssl_ca_certs =
rabbit_use_ssl = false
rabbit_userid = guest
rabbit_password = guest
rabbit_host = localhost
rabbit_port = 5672
rabbit_hosts = []
rabbit_virtual_host = /
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
rabbit_max_retries = 0
rabbit_ha_queues = false
control_exchange = openstack
amqp_durable_queues = false
Options supported in this section:
.. option:: daemon
Run as a daemon. Default is 'true'.
.. option:: user
Specifies the user for the process when in daemon mode. Default is the
current user.
.. option:: group
Specifies the group for the process when run in daemon mode.
.. option:: verbose
Prints more verbose output. Sets logging level to INFO from WARNING
.. option:: debug
Prints debug output. Sets logging level to DEBUG from WARNING
.. option:: billing_enable
Enables the sending of billing information to a rabbitMQ host. It sends
create and delete loadbalancer messages as well as exists and usage
messages on a periodic, configurable basis. See admin_api config.
.. option:: notification_driver
Driver or drivers to handle sending notifications for metering / billing.
For instance, the openstack rpc driver is
openstack.common.notifier.rpc_notifier.
.. option:: default_notification_level
Default notification level for outgoing notifications
.. option:: default_publisher_id
Default publisher_id for outgoing notifications
.. option:: host
Default host name to use in notifications. Will use default_publisher_id
or gethostname() if not set.
.. option:: host
Default host name to use in notifications. Will use default_publisher_id
or gethostname() if not set.
.. option:: kombu_ssl_version
SSL version to use (valid only if SSL enabled). valid values are TLSv1,
SSLv23 and SSLv3. SSLv2 may be available on some distributions
.. option:: kombu_ssl_keyfile
SSL key file (valid only if SSL enabled)
.. option:: kombu_ssl_certfile
SSL cert file (valid only if SSL enabled)
.. option:: kombu_ssl_ca_certs
SSL certification authority file (valid only if SSL enabled)
.. option:: rabbit_use_ssl
Connect over SSL for RabbitMQ
.. option:: rabbit_userid
The RabbitMQ userid
.. option:: rabbit_password
The RabbitMQ password
.. option:: rabbit_host
The RabbitMQ broker address where a single node is used
.. option:: rabbit_port
The RabbitMQ broker port where a single node is used
.. option:: rabbit_hosts
RabbitMQ HA cluster host:port pairs
.. option:: rabbit_virtual_host
The RabbitMQ virtual host
.. option:: rabbit_retry_interval
How frequently to retry connecting with RabbitMQ
.. option:: rabbit_retry_backoff
How long to backoff for between retries when connecting to RabbitMQ
.. option:: rabbit_max_retries
Maximum retries with trying to connect to RabbitMQ (the default of 0
implies an infinite retry count)
.. option:: rabbit_ha_queues
Use H/A queues in RabbitMQ (x-ha-policy: all). You need to wipe RabbitMQ
database when changing this option.
.. option:: control_exchange
AMQP exchange to connect to if using RabbitMQ or Qpid
.. option:: amqp_durable_queues
Use durable queues in amqp.
Gearman Section
^^^^^^^^^^^^^^^
The ``[gearman]`` section contains options specific to connecting to
a Gearman job server. All of the Libra utilities will read this section
since each connects to Gearman.
In order to support SSL connections, it is required that all three SSL
related options be supplied. Also, the user owning the process must be
able to read all SSL files.
.. code-block:: ini
[gearman]
servers = 10.0.0.1:4730, 10.0.0.2:4730
poll = 1
ssl_ca = /etc/ssl/gearman.ca
ssl_cert = /etc/ssl/gearman.cert
ssl_key = /etc/ssl/gearman.key
Options supported in this section:
.. option:: keepalive
Enable TCP KEEPALIVE pings. Default is 'false'.
.. option:: keepcnt
Max KEEPALIVE probes to send before killing connection.
.. option:: keepidle
Seconds of idle time before sending KEEPALIVE probes.
.. option:: keepintvl
Seconds between TCP KEEPALIVE probes.
.. option:: poll
Gearman worker polling timeout. Default is 1.
.. option:: reconnect_sleep
Seconds to sleep between job server reconnects. Default is 60.
.. option:: servers
Comma-separated list of Gearman job servers and port in HOST:PORT format.
.. option:: ssl_ca
Gearman SSL certificate authority.
.. option:: ssl_cert
Gearman SSL certificate.
.. option:: ssl_key
Gearman SSL key.

View File

@ -1,36 +0,0 @@
========
Glossary
========
.. glossary::
instance
A Virtual Machine in "Cloud" speak.
az
A logical grouping of resources typically used to provide HA.
database
A software that stores data like a SQL server or similar.
device
A Loadbalancer Device which either runs in Software aka
:ref:`libra-worker` with :term:`haproxy` or any other kind of
software / hardware.
vip
A virtual ip is a ip address which is assigned to the :term:`device`
and can be moved around if needed.
gearman
A job system. See http://gearman.org/ for more info.
haproxy
Software loadbalancer that runs typically on Linux. Used as the base
for the Lira LBaaS tools.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

View File

@ -1,14 +0,0 @@
Load Balancer as a Service Device Tools
=======================================
.. toctree::
:maxdepth: 2
install/index
architecture/index
worker/index
pool_mgm/index
api/index
admin_api/index
config
glossary

View File

@ -1,283 +0,0 @@
=======================
Development environment
=======================
Libra is a system to provide LoadBalancing as a Service on top of
various platforms. It is comprised of four components :ref:`libra-api`,
:ref:`libra-admin-api`, :ref:`libra-pool-mgm` and :ref:`libra-worker`,
supported by a few other open source components. For more information see
:doc:`/architecture/index`.
Development environment
+++++++++++++++++++++++
This guide will walk you through howto setup a development environment for Libra
using:
* 1 Node for the API, Admin API and Pool mgm with MySQL
* n+ Nodes for workers that is going to run HAProxy
* Ubuntu 12.04 as our OS of choice
Common steps
============
1. Install general system dependencies
::
$ sudo apt-get install -qy python-virtualenv python-pip python-dev git gcc
2. Clone the repo from Stackforge at GitHub
::
$ git clone https://github.com/stackforge/libra.git
$ cd libra
3. Create a libra user
::
$ sudo adduser --disabled-login libra
4. Create needed directories
::
$ sudo mkdir -p /var/run/libra /var/log/libra
$ sudo chown libra:libra /var/run/libra /var/log/libra
Installing
==========
.. index::
double: install; libra
1. Do steps in :doc:`ppa`
2. Do steps in 'Common steps'
3. Install dependencies
::
$ sudo apt-get install -qy gearman-job-server mysql-server
4. Setup a VirtualEnvironment
.. note::
This is to not interfere with systemwide libraries.
::
$ virtualenv .venv
$ . .venv/bin/activate
5. Install python-gearman
.. note::
This is a custom version with patches commited upstream but not release yet.
::
$ pip install https://launchpad.net/~libra-core/+archive/ppa/+files/gearman_2.0.2.git3.orig.tar.gz
6. Install dependencies using pip
::
$ pip install -r requirements.txt -r test-requirements.txt
7. Install python-keystoneclient
::
$ pip install python-keystoneclient
8. Install Libra in development mode
::
$ python setup.py develop
9. Copy the configuration file to /etc
::
$ sudo cp etc/sample_libra.cfg /etc/libra.cfg
10. Configure libra
::
$ sudo vi /etc/libra.cfg
.. note::
See :ref:`configuration` for how to proceed for various options.
You should at least configure the variables needed for your environment.
Setup database and gearman
==========================
1. Import the initial database
::
$ mysql -p < libra/common/api/lbaas.sql
2. Change the listening address of Gearman server
::
$ sudo vi /etc/default/gearman-job-server
3. Restart gearman
::
$ sudo service gearman-job-server restart
Bring up services
=================
1. Start the Pool Manager
::
$ libra_pool_mgm --config-file /etc/libra.cfg --log-dir /var/log/libra/
2. Start Admin API & API services
::
$ libra_admin_api --config-file /etc/libra.cfg --log-dir /var/log/libra/
$ libra_api --config-file /etc/libra.cfg --log-dir /var/log/libra/
Creating a Worker Image
=======================
.. note::
In this setup we'll be using OpenStack as the underlying provider for our Libra Worker nodes to run HAProxy on.
1. Boot a server using Nova
.. note::
You should at least open (for now at least) port 22 for ssh.
--nic argument is only needed if you have multiple networks.
--security-groups is not needed at the time if you have 22 in default
::
$ nova boot --flavor <flavour id or name> --image <image id of ubuntu precise> --key-name default --nic net-id=<network id> --security-groups=<your security groups> worker
2. Create a floating ip
::
$ neutron floatingip-create <external network name>
3. Assign a floating ip to the instance
.. note::
You can view all the ports by issuing `neutron port-list`.
::
$ neutron floatingip-associate <floating ip id> <port id>
4. Login to the instance
::
$ ssh root@<ip>
5. Do steps in 'Common steps'
6. Install HAProxy
::
$ apt-get install -qy haproxy socat
7. Install python-gearman
.. note::
This is a custom version with patches commited upstream but not release yet.
::
$ pip install https://launchpad.net/~libra-core/+archive/ppa/+files/gearman_2.0.2.git3.orig.tar.gz
8. Install dependencies using pip
::
$ pip install -r requirements.txt -r test-requirements.txt
9. Install Libra in development mode
::
$ python setup.py develop
10. Install an Upstart job
.. note::
You will also need to copy your libra.cnf to the worker machine, and update libra-worker.conf to use it (the default is /etc/libra/libra.cnf).
There is also an additional logging configuration file to install.
You may want to test that the service starts up appropriately before moving to the next step.
::
$ mkdir /etc/libra
$ wget https://raw2.github.com/pcrews/lbaas-salt/master/lbaas-haproxy-base/logging_worker.cfg -O /etc/libra/logging_worker.cfg
$ wget https://raw2.github.com/pcrews/lbaas-salt/master/lbaas-haproxy-base/libra-worker.conf -O /etc/init/libra_worker.conf
11. Make a snapshot of the worker image
::
$ nova image-create worker libra-worker
12. At the libra-poo-mgm node change the 'nova_image' setting to the value of your newly created snapshot
.. note::
To get the ID of the snapshot do
nova image-show libra-worker | grep -w id | cut -d '|' -f3
::
$ sudo vi /etc/libra.cfg
13. Restart libra_pool_mgm
::
$ killall -9 libra_pool_mgm
$ libra_pool_mgm --config-file /etc/libra.cfg --log-dir /var/log/libra/
Verifying that it works
=======================
If you have done all correctly you should be able to do something like the
below command on the node that has the :ref:`libra-pool-mgm`
::
$ less +F /var/log/libra/libra_pool_mgm.log

View File

@ -1,113 +0,0 @@
Diskimage Builder
=================
Building Libra Images using Diskimage Builder.
Setup the builder - Manual way
------------------------------
1. Set DIB path
::
$ echo 'export DIB_PATH=$HOME/diskimage-builder' >> ~/.bashrc
2. Clone the repository "git://github.com:openstack/diskimage-builder" locally.
::
$ git clone git://github.com:openstack/diskimage-builder $DIB_PATH
3. Add DIB bin to PATH and DIB directory to your directory to your env.
::
$ echo 'export PATH=$PATH:$DIB_PATH/bin' >> ~/.bashrc
$ . ~/.bashrc
4. Setup some variables
::
$ echo 'export LIBRA_ELEMENTS=$HOME/libra-elements' >> ~/.bashrc
$ . ~/.bashrc
5. Clone the 'libra-elements' repository
::
$ git clone git://github.com/LBaaS/libra-elements $LIBRA_ELEMENTS
6. Export the following variable to your .bashrc. Then source it.
::
$ export ELEMENTS_PATH=$DIB_PATH/elements:$LIBRA_ELEMENTS/elements
Setup DIB using bootstrap.sh
----------------------------
bootstrap.sh is a script to bootstrap your environment for DIB and libra-elements.
It does:
#. Install deps
#. Add some vars to ~/.dib_profile and your ~/.bashrc
#. Clone / update the repos.
Simply run:
::
$ curl https://raw.github.com/LBaaS/libra-elements/master/bootstrap.sh | bash
Supported distros
-----------------
Currently the supported distributions for DIB are:
.. note::
There are not support in the elements nor in the packages for anythign else at this time
* precise
Worker image
------------
To generate a worker image, do
::
DIB_RELEASE=precise disk-image-create "libra-worker" -o libra-worker.qcow2
API node
--------
To generate a API image, do
::
DIB_RELEASE=precise disk-image-create "libra-api" -o libra-api.qcow2
Or to put both the API and Admin API on the same image
::
DIB_RELEASE=precise disk-image-create "libra-api libra-admin-api" -o libra-api.qcow2
Pool Manager image
------------------
To generate a API image, do
::
DIB_RELEASE=precise disk-image-create "libra-pool-mgr" -o libra-pool-mgr.qcow2

View File

@ -1,13 +0,0 @@
.. _install:
Installing Libra LBaas
======================
.. toctree::
:maxdepth: 2
development
production
ppa
diskimage-builder
verify

View File

@ -1,298 +0,0 @@
.. _install-openstack:
=============================
Installing Libra on Openstack
=============================
Libra can utilize OpenStack as it's platform to provide LBaaS either for instances
that run inside of a OpenStack enviroment our inside.
Architecture
^^^^^^^^^^^^
Please see :ref:`architecture-production` for understanding the general
production archiecture.
Requirements
^^^^^^^^^^^^
* OpenStack cloud to provide the underlying IaaS functions for Libra.
* User and Tenant with required privileges / resources.
* Ubuntu 12.04 Precise x86_64 image for instances.
Instance flavors
----------------
* :ref:`libra-api` / :ref:`libra-admin-api` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk)
* :ref:`libra-pool-mgm` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk)
* :ref:`libra-worker` / :term:`haproxy` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk)
* :term:`gearman` - m1.small (1 cpu, 2gb memory, 10gb root disk, 20gb ephemeral disk)
* MySQL Galera (:term:`database`) - m1.medium (2 cpu, 4gb memory, 10gb root disk, 40gb ephemeral disk)
.. note::
The worker flavor needs to have unlimed or high BW capabilities if
not traffic might not get through and it will suffer from network
congestion.
Commands / Tools
================
Nova Boot
---------
::
$ nova boot --image <ubuntu precise img id> --flavor <flavour name / id> --availability-zone <az> <instance name>-<az>
Example: nova boot --image ubuntu-precise-amd64 --flavor m1.small --availability-zone az1 libra-gearman-az1
PDSH
----
Use PDSH if you don't want to have to do stuff like for loops with SSH loops or alot of manual SSH's into boxes to do steps.
1. Add the following to your ~/.ssh/config
.. note:: If you don't to this pdsh will fail due to hostkeys that are not known.
::
Host *
StrictHostKeyChecking no
2. Create a file for the group of instances you want PDSH to target
Example contents: gearman
::
10.0.0.4
10.0.0.5
10.0.0.6
3. Run pdsh with ssh
::
$ WCOLL=<file> pdsh -R ssh <cmd>
Example: WCOLL=gearman pdsh -R ssh uptime
Installing pre-requisite services
=================================
We want to setup the services like Gearman and the Database instances before
installing the actual Libra system.
Gearman
-------
1. Create 3 instances for Gearman using the command in `Commands`
2. You will end up with something like
::
| aff72090-6f5e-44c7-9d35-674d92f0ba82 | libra-gearman-1 | ACTIVE | None | Running | os-net=10.255.255.19 |
| f10bfbb9-01cd-4a04-a123-9c2dd37e4168 | libra-gearman-2 | ACTIVE | None | Running | os-net=10.255.255.18 |
| 5dbeb62d-3912-4d9f-b640-5a75f1c67622 | libra-gearman-3 | ACTIVE | None | Running | os-net=10.255.255.15 |
2. Login / or script the next actions
3. Do steps in :doc:`ppa` for each instance
4. Install Gearman instance
::
$ sudo apt-get install -qy gearman-jobs-instance
5. Change Gearman to listen on all addresses
::
$ sudo sed 's/127.0.0.1/0.0.0.0/g' -i /etc/default/gearman-job-instance
$ sudo service gearman-job-instance restart
Database
========
http://www.percona.com/doc/percona-xtradb-cluster/howtos/ubuntu_howto.html
1. Create 3 instances for Gearman
2. You will end up with something like
::
| 60b2d90a-a5a6-457b-8d4f-4b5575033c44 | libra-db-1 | ACTIVE | None | Running | os-net=10.255.255.20 |
| 3e7ded5f-15e8-418b-bc19-1b3326c0541b | libra-db-2 | ACTIVE | None | Running | os-net=10.255.255.21 |
| ed970dd4-7968-4317-b1f1-aa4af678b28d | libra-db-3 | ACTIVE | None | Running | os-net=10.255.255.22 |
3. Add the Percona PPA
::
$ sudo apt-key adv --keyinstance keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A
$ sudo sh -c 'echo "deb http://repo.percona.com/apt precise main" >> /etc/apt/sources.list.d/percona.list'
4. Install Percona instance on each instance
::
$ sudo debconf-set-selections <<< 'percona-xtradb-cluster-instance-5.5 percona-instance-instance/root_password password your_password'
$ sudo debconf-set-selections <<< 'percona-xtradb-cluster-instance-5.5 percona-instance-instance/root_password_again password your_password'
$ sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy percona-xtradb-cluster-instance-5.5
5. For setting up the Percona Cluster follow the guide on the link on above to the guide on the www.percona.com pages.
6. Create the Libra database and a user with grants to it
::
mysql > CREATE DATABASE lbaas CHARACTER SET utf8 COLLATE utf8_general_ci;
mysql > GRANT ALL ON lbaas.* TO 'lbaas'@'10.255.255.%' IDENTIFIED BY 'lbaas';
mysql > FLUSH PRIVILEGES;
Worker image
============
1. Create a instance that will become our template for workers.
..
$ nova boot ... worker
2. Login to the server
3. Do the steps in :doc:`ppa`.
4. Install the :ref:`libra-worker` package and dependencies.
::
$ sudo apt-get install -qy libra-worker socat haproxy
5. Configure the [worker] section in the configuration file.
.. note:: See :ref:`configuration` for information about options
::
$ sudo cp /usr/share/libra/sample_libra.cfg /etc/libra.cfg
$ sudo vi /etc/libra.cfg
6. Make a snapshot of the image and take note of the ID (We'll be needing it later)
::
$ nova image-create worker libra-worker
$ nova image-show libra-worker
7. Shutdown the instance
$ nova delete worker
Pool Manager instances
======================
1. Create 3 instances that will run the :ref:`libra-api` and :ref:`libra-admin-api`
2. You will end up with something like
::
| d4e21f7b-aa1b-4132-83e7-6cd5281adfb3 | libra-pool-mgm-1 | ACTIVE | None | Running | os-net=10.255.255.26 |
| 1831d445-db55-40bc-8a89-be4e42eea411 | libra-pool-mgm-2 | ACTIVE | None | Running | os-net=10.255.255.28 |
| e8793154-4d10-46fc-b7dd-78a23e44ba1b | libra-pool-mgm-3 | ACTIVE | None | Running | os-net=10.255.255.27 |
2. Login / or script the next actions
3. Do steps in :doc:`ppa` for each instance
4. Install :ref:`libra-pool-mgm`
::
$ sudo apt-get install -qy libra-pool-mgm
5. On the first instance configure settings to your env.
.. note::
We'll create a configuration file on the first :ref:`libra-pool-mgm`
instance and copy it to the rest of the API instances and later
:ref:`libra-pool-mgm` instances so we do less work :).
..
$ sudo cp /usr/share/libra/sample_libra.cfg /etc/libra.cfg
$ sudo vi /etc/libra.cfg
.. note::
See :ref:`configuration` for configuration options.
6. Copy the configuration file over to the rest of the instances.
7. Restart the :ref:`libra-pool-mgm` service on each instance.
8. Check the logs for errors.
API nodes
=========
1. Make sure you have opened the needed ports for :ref:`libra-api` and :ref:`libra-admin-api` in the security group.
2. Create 3 instances that will run the :ref:`libra-api` and :ref:`libra-admin-api`
3. Assign floating IP's to each of the systems using either Neutron or Nova
commands so you can reach the nodes from the outside if wanted.
4. You will end up with something like
::
| 27ae4d83-792a-4458-bdb0-4e13e8970a48 | libra-api-1 | ACTIVE | None | Running | os-net=10.255.255.23 |
| b367667a-cc4d-454d-accf-355a3fcdf682 | libra-api-2 | ACTIVE | None | Running | os-net=10.255.255.24 |
| c659c9a3-260a-4b85-9a1a-565549c9ad44 | libra-api-3 | ACTIVE | None | Running | os-net=10.255.255.25 |
5. Login / or script the next actions
6. Install python-keystoneclient
::
$ sudo apt-get install -qy python-keystoneclient
7. Do steps in :doc:`ppa` for each instance
8. Install latest version of Libra
::
$ sudo apt-get install -qy libra-api libra-admin-api
9. Copy the configuration file from one of the :ref:`libra-pool-mgm` instances
to each instance.
10. Restart :ref:`libra-api` and :ref:`libra-admin-api` on each instance.
::
$ for i in api admin-api; do sudo service libra-$i restart; done
11. Now you're done with the API services
12. Check that the logs have any errors.
13. See :ref:`install-verify` to verify that the system works!

View File

@ -1,32 +0,0 @@
.. _ppa:
=========
Libra PPA
=========
Currently we require a PPA that is provided by the HPCS LBaaS / Libra team in order
to get the right versions of the dependencies. So we'll need to setup a PPA.
To add it to your Ubuntu node follow the instructions below.
Adding the PPA
==============
1. Install a utility package
::
$ sudo apt-get install -qy python-software-properties
2. Add the PPA
::
$ sudo apt-add-repository ppa:libra-core/ppa
3. Update package indexes
::
$ sudo apt-get update -q

View File

@ -1,21 +0,0 @@
.. _install-production:
======================
Production environment
======================
Libra is a system to provide LoadBalancing as a Service on top of
various platforms. It is comprised of four components :ref:`libra-api`,
:ref:`libra-admin-api`, :ref:`libra-pool-mgm` and :ref:`libra-worker`,
supported by a few other open source components. For more information see
:doc:`/architecture/index`.
These guides will help you through the installation of a production setup of Libra.
Below you see the different systems that Libra can be installed / runned upon.
.. toctree::
:maxdepth: 2
openstack

View File

@ -1,11 +0,0 @@
.. _install-verify:
=======================
Verifying functionality
=======================
.. note::
In order to do the following you need libra_client installed on your system.
libra_client --service_type=compute --insecure --bypass_url=http://<endpoint>/v1.1 list

View File

@ -1,19 +0,0 @@
Description
===========
Purpose
-------
The Libra Node Pool manager is designed to communicate with Openstack Nova or
any other compute API to provide nodes and floating IPs to the libra system
for use. It does this by providing a gearman worker interface to the Nova
API. This means you can have multiple pool managers running and gearman will
decide on the next available pool manager to take a job.
Design
------
It is designed to accept requests from the Libra components to manipulate Nova
instances and floating IPs. It is a daemon which is a gearman worker. Any
commands sent to that worker are converted into Nova commands and the results
are sent back to the client.

View File

@ -1,154 +0,0 @@
Gearman Commands
================
The Pool Manager registers as the worker name ``libra_pool_mgm`` on the gearman
servers. Using this it accepts the JSON requests outlined in this document.
In all cases it will return the original message along with the following for
success:
.. code-block:: json
{
"response": "PASS"
}
And this for failure:
.. code-block:: json
{
"response": "FAIL"
}
BUILD_DEVICE
------------
This command sends the Nova ``boot`` command using the Nova API and returns
details about the resulting new Nova instance. Details about which image and
other Nova settings to use are configured using the options or config file for
Pool Manager.
Example:
.. code-block:: json
{
"action": "BUILD_DEVICE"
}
Response:
.. code-block:: json
{
"action": "BUILD_DEVICE",
"response": "PASS",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9",
"addr": "15.185.175.81",
"type": "basename: libra-stg-haproxy, image: 12345",
"az": "3"
}
DELETE_DEVICE
-------------
This command requests that a Nova instance be deleted.
Example:
.. code-block:: json
{
"action": "DELETE_DEVICE",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9"
}
Response:
.. code-block:: json
{
"action": "DELETE_DEVICE",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9",
"response": "PASS"
}
BUILD_IP
--------
This command requests a floating IP from Nova.
Example:
.. code-block:: json
{
"action": "BUILD_IP",
}
Response:
.. code-block:: json
{
"action": "BUILD_IP",
"response": "PASS",
"id": "12345",
"ip": "15.185.234.125"
}
ASSIGN_IP
---------
This command assigns floating IP addresses to Nova instances (by name of
instance).
Example:
.. code-block:: json
{
"action": "ASSIGN_IP",
"ip": "15.185.234.125",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9"
}
Response:
.. code-block:: json
{
"action": "ASSIGN_IP",
"ip": "15.185.234.125",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9",
"response": "PASS"
}
REMOVE_IP
---------
This command removes a floating IP address from a Nova instance, preserving
the IP address to be used another time.
Example:
.. code-block:: json
{
"action": "REMOVE_IP",
"ip": "15.185.234.125",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9"
}
Response:
.. code-block:: json
{
"action": "REMOVE_IP",
"ip": "15.185.234.125",
"name": "libra-stg-haproxy-eaf1fef0-1584-11e3-b42b-02163e192df9",
"response": "PASS"
}

View File

@ -1,135 +0,0 @@
Pool Manager Configuration
==========================
These options are specific to the pool manager in addition to the
:doc:`common options </config>`.
Configuration File
------------------
The ``[mgm]`` section is specific to the libra_pool_mgm utility. Below is an
example:
.. code-block:: ini
[mgm]
pid = /var/run/libra/libra_mgm.pid
logfile = /var/log/libra/libra_mgm.log
datadir = /etc/libra/
nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
nova_user = username
nova_pass = password
nova_tenant = tenant
nova_region = region
nova_keyname = default
nova_secgroup = default
nova_image = 12345
nova_image_size = standard.medium
gearman=127.0.0.1:4730
node_basename = 'libra'
Command Line Options
--------------------
.. program:: libra_pool_mgm
.. option:: --datadir <DATADIR>
The data directory used to store things such as the failed node list.
.. option:: -n, --nodaemon
Do not run as a daemon. This option is useful for debugging purposes
only as the worker is intended to be run as a daemon normally.
.. option:: --node_basename <NODE_BASENAME>
A name to prefix the UUID name given to the nodes the pool manager
generates.
.. option:: --nova_auth_url <NOVA_AUTH_URL>
The URL used to authenticate for the Nova API
.. option:: --nova_user <NOVA_USER>
The username to authenticate for the Nova API
.. option:: --nova_pass <NOVA_PASS>
The password to authenticate for the Nova API
.. option:: --nova_tenant <NOVA_TENANT>
The tenant to use for the Nova API
.. option:: --nova_region <NOVA_REGION>
The region to use for the Nova API
.. option:: --nova_keyname <NOVA_KEYNAME>
The key name to use when spinning up nodes in the Nova API
.. option:: --nova_secgroup <NOVA_SECGROUP>
The security group to use when spinning up nodes in the Nova API
.. option:: --nova_image <NOVA_IMAGE>
The image ID or name to use on new nodes spun up in the Nova API
.. option:: --nova_net_id <Neutron Network ID>
Specify which Neutron Network ID workers should be started with.
.. option:: --nova_image_size <NOVA_IMAGE_SIZE>
The flavor ID (image size ID) or name to use for new nodes spun up in
the Nova API
.. option:: --gearman_keepalive
Use TCP KEEPALIVE to the Gearman job server. Not supported on all
systems.
.. option:: --gearman_keepcnt <COUNT>
Maximum number of TCP KEEPALIVE probes to send before killing the
connection to the Gearman job server.
.. option:: --gearman_keepidle <SECONDS>
Seconds of idle time on the Gearman job server connection before
sending TCP KEEPALIVE probes.
.. option:: --gearman_keepintvl <SECONDS>
Seconds between TCP KEEPALIVE probes.
.. option:: --gearman_ssl_ca <PATH>
The path for the Gearman SSL Certificate Authority.
.. option:: --gearman_ssl_cert <PATH>
The path for the Gearman SSL certificate.
.. option:: --gearman_ssl_key <PATH>
The path for the Gearman SSL key.
.. option:: --gearman <HOST:PORT>
Used to specify the Gearman job server hostname and port. This option
can be used multiple times to specify multiple job servers
.. option:: --rm_fip_ignore_500
When removing a floating IP, ignore the HTTP 500 error and treat it as
a successful response.
.. option:: --tcp_check_port <PORT>
After a floating IP has been assigned use this port to do a TCP connect
test to see if the assign was successful. If not specified the check
will not take place.

View File

@ -1,11 +0,0 @@
.. _libra-pool-mgm:
Libra Node Pool Manager
=======================
.. toctree::
:maxdepth: 2
about
config
commands

Binary file not shown.

View File

@ -1,86 +0,0 @@
Description
===========
Purpose
-------
A Python-based Gearman worker that handles messages for the Gearman job queue
sharing the same name as the local hostname. The messages that it receives are
JSON objects describing a load balancer, and returns this same JSON object, but
with status fields added to describe the state of the LB.
Installation
------------
Installing the Required Tools
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You must have Python setuptools installed. On Ubuntu::
$ sudo apt-get install python-setuptools
Now you may install the Libra toolset::
$ sudo python setup.py install
The worker also needs some packages installed in order to be used with
HAProxy. The commands below will install them on Ubuntu::
$ sudo apt-get install haproxy
$ sudo apt-get install socat
The Ubuntu default is to have HAProxy disabled. You will need to edit the
file */etc/default/haproxy* and set *ENABLED* to 1 if you want HAProxy to
actually start (hint: you do).
Edit /etc/sudoers
^^^^^^^^^^^^^^^^^
The worker needs to be able to run some commands as root without being
prompted for a password. It is suggested that you run the worker as
the `haproxy` user and `haproxy` group on Ubuntu systems. Then add the
following line to /etc/sudoers::
%haproxy ALL = NOPASSWD: /usr/sbin/service, /bin/cp, /bin/mv, /bin/rm, /bin/chown
The above lets everyone in the *haproxy* group run those commands
as root without being prompted for a password.
Configuration File
------------------
It can be easier to give options via a configuration file. See the sample
configuration file etc/sample_libra.cfg for an example and further
documentation. Use the :option:`--config <libra_worker.py -c>` option
to specify the configuration file to read.
Running the Worker
------------------
The worker can run in either daemon or non-daemon mode. Daemon mode requires
escalated privileges so that it can behave like a proper daemon. Non-daemon
mode (:option:`--nodaemon <libra_worker.py -n>` option) is useful for testing.
Basic commands::
# Getting help
$ libra_worker -h
# Start up as a daemon running as the `haproxy` user and
# connecting to the local Gearman job server.
$ sudo libra_worker --user haproxy --group haproxy --server 127.0.0.1:4730
# Start up with debugging output in non-daemon mode
$ libra_worker --debug --nodaemon
NOTE: When running the worker in daemon mode, you must make sure that the
directory where the PID file will be (:option:`--pid <libra_worker.py -p>`
option) and the directory where the log files will be written
(:option:`--logfile <libra_worker.py -l>` option) exists and is writable
by the user/group specified with the :option:`--user <libra_worker.py --user>`
and :option:`--group <libra_worker.py --group>` options.
You can verify that the worker is running by using the sample Gearman
client in the bin/ directory::
$ bin/client.py

View File

@ -1,111 +0,0 @@
Code Walkthrough
================
Here we'll highlight some of the more important code aspects.
Gearman Worker Thread
---------------------
.. py:module:: libra.worker.worker
.. py:function:: config_thread(logger, driver, servers, reconnect_sleep)
This function encapsulates the functionality for the Gearman worker thread
that will be started by the :py:class:`~libra.worker.main.EventServer`
class. It should never exit.
This function connects to the Gearman job server(s) and runs the Gearman
worker task, which itself is another function that is called for each
message retrieved from the Gearman job servers.
If all Gearman job servers become unavailable, the worker would
normally exit. This function identifies that situation and periodically
attempts to restart the worker in an endless loop.
EventServer Class
-----------------
.. py:module:: libra.worker.main
.. py:class:: EventServer(logger)
This class encapsulates the server activity once it starts in either
daemon or non-daemon mode and all configuration options are read. It
uses the `eventlet <http://eventlet.net/doc/>`_ Python module to start
tasks that it will be supplied.
.. py:method:: main(tasks)
The one and only method in the class and represents the primary
function of the program. A list of functions and their parameters
is supplied as the only argument. Each function will be started in
its own Green Thread.
LBaaSController Class
---------------------
.. py:module:: libra.worker.controller
.. py:class:: LBaaSController(logger, driver, json_msg)
This class is used by the Gearman task started within the worker thread
(the :py:func:`~libra.worker.worker.config_thread` function) to drive the
Gearman message handling.
.. py:method:: run()
This is the only method that should be called directly. It parses the
JSON message given during object instantiation and determines the action
to perform based on the contents. It returns another JSON message that
should then be returned to the Gearman client.
LoadBalancerDriver Class
------------------------
See :ref:`libra-worker-driver` for information
Relationship Diagram
--------------------
Below is a conceptual diagram that shows the basic relationships between
the items described above::
+-------------+ JSON request +-------------------+
| Gearman | --------------------> | |
| worker | | LBaaSController |
| task | <-------------------- | |
+-------------+ JSON response +-------------------+
| ^
| |
API call | | (Optional Exception)
| |
V |
+----------------------+
| |
| LoadBalancerDriver |
| |
+----------------------+
The steps shown above are:
.. py:module:: libra.worker
* The Gearman worker task used in the worker thread (see the
:py:func:`~worker.config_thread` function), is run when the worker
receives a message from the Gearman job server (not represented above).
* This task then uses the :py:class:`~controller.LBaaSController` to process
the message that it received.
* Based on the contents of the message, the controller then makes the relevant
driver API calls using the :py:class:`~drivers.LoadBalancerDriver` driver
that was selected via the :option:`--driver <libra_worker.py --driver>`
option.
* The driver executes the API call. If the driver encounters an error during
execution, an exception is thrown that should be handled by the
:py:class:`~controller.LBaaSController` object. Otherwise, nothing is
returned, indicating success.
* The :py:class:`~controller.LBaaSController` object then creates a response
message and returns this message back to the Gearman worker task.
* The Gearman worker task sends the response message back through the Gearman
job server to the originating client (not represented above).

View File

@ -1,40 +0,0 @@
Worker Configuration
====================
These options are specific to the worker in addition to the
:doc:`common options </config>`.
Configuration File
------------------
The ``[worker]`` section is specific to the libra_worker utility. Below
is an example:
.. code-block:: ini
[worker]
driver = haproxy
pid = /var/run/libra/libra_worker.pid
Note that drivers supported by the worker may add additional subsections
to the configuration file for their configuration needs. See the
:doc:`haproxy driver documentation <drivers/haproxy>` for an example.
Options supported in this section:
.. option:: driver <DRIVER>
Load balancer driver to use. Valid driver options are:
* *haproxy* - `HAProxy <http://haproxy.1wt.eu>`_ software load balancer.
This is the default driver.
.. option:: pid <FILE>
Location for the process PID file.
Command Line Options
--------------------
Some options can be specified via the command line. Run with the
-h or --help option for a full listing.

View File

@ -1,71 +0,0 @@
.. _libra-worker-driver:
Drivers
=======
The driver is the part of the Worker which is responsible for doing actions
towards the underlying service like HAProxy or other.
It's a plugin based python class that has a generic API for configuring up
:term:`device`.
LoadBalancerDriver Class
------------------------
See Drivers for driver documentation
.. py:module:: libra.worker.drivers
.. py:class:: LoadBalancerDriver
This defines the API for interacting with various load balancing
appliances. Drivers for these appliances should inherit from this
class and implement the relevant API methods that it can support.
`This is an abstract class and is not meant to be instantiated directly.`
Generally, an appliance driver should queue up any configuration changes
made via these API calls until the :py:meth:`create` method is called.
The :py:meth:`suspend`, :py:meth:`enable`, :py:meth:`delete`,
:py:meth:`get_stats()` and :py:meth:`archive` methods should take
immediate action.
.. py:method:: init()
.. py:method:: add_server(host, port)
.. py:method:: set_protocol(protocol, port)
.. py:method:: set_algorithm(algorithm)
.. py:method:: create()
.. py:method:: suspend()
.. py:method:: enable()
.. py:method:: delete()
.. py:method:: get_stats()
.. py:method:: archive()
Known Load Balancer Drivers Dictionary
--------------------------------------
.. py:data:: known_drivers
This is the dictionary that maps values for the
:option:`--driver <libra_worker.py --driver>` option
to a class implementing the driver :py:class:`~LoadBalancerDriver` API
for that appliance. After implementing a new driver class, you simply add
a new entry to this dictionary to plug in the new driver.
.. note::
See below for driver specific documentation
.. toctree::
:maxdepth: 2
:glob:
drivers/*

View File

@ -1,64 +0,0 @@
.. _libra-worker-driver-haproxy:
HAProxy driver
==============
Configuration File
------------------
The ``[worker:haproxy]`` section is read by the HAProxy driver.
.. code-block:: ini
[worker:haproxy]
service = ubuntu
logfile = /var/log/haproxy.log
Options supported in this section:
.. option:: logfile
Path where HAProxy will store its logs. Note that this file is not
created by the worker, but rather by the haproxy process itself. Its
contents will be delivered in response to an ARCHIVE request from the
API server.
.. note::
See :ref:`libra-worker-driver-haproxy-archiving` for information on
archiving.
.. option:: statsfile
Location of the HAProxy statistics cache file. This file needs to be
placed in a location where the worker has write access and where it
will not be deleted by external processes (so don't place it in /tmp).
This is used to deliver usage reports to the API server in response to
a STATS requests.
.. option:: service
The underlying OS Service implementation to use. Default is 'ubuntu'.
.. _libra-worker-driver-haproxy-archiving:
Log archiving
-------------
In order to support log-archiving with haproxy you need to redirect
the rsyslog feed from local0 to a dedicated file
.. note::
Change the /var/log/haproxy.log to the path you have set in the worker
section of the config.
::
cat >/etc/rsyslog.d/10-haproxy.conf<<EOF
$template Haproxy,"%TIMESTAMP% %msg%\n"
local0.* -/var/log/haproxy.log;Haproxy
# don't log anywhere else
local0.* ~
EOF

View File

@ -1,14 +0,0 @@
.. _libra-worker:
Libra Gearman Worker
====================
.. toctree::
:maxdepth: 2
:glob:
about
config
code
messages
driver

View File

@ -1,394 +0,0 @@
Worker Messages
===============
.. py:module:: libra.worker.controller
The worker expects several different types of JSON messages. Below are examples
of each. The :py:class:`~LBaaSController` class expects the messages to be
one of the types defined below.
Some things in common with all messages:
* The type is determined by the **hpcs_action**
field of the JSON message, which is required to be present.
* The JSON field names is case-sensitive.
* The JSON field values is case-insensitive.
* Extraneous fields are ignored.
* Every response will return the original message with some additional fields.
* Every response will include a **hpcs_response** field with a value of either
*PASS* or *FAIL*. Additional fields will vary depending on message type.
UPDATE Message
--------------
The UPDATE message creates or updates the load balancer configuration.
Either one or two load balancers may be defined within this message. If two
are defined, one must be with the HTTP protocol and the other must be with
the TCP protocol. No other exceptions are allowed.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
* loadBalancers
* loadBalancers.protocol
* loadBalancers.nodes
* loadBalancers.nodes.address
* loadBalancers.nodes.port
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "UPDATE",
"loadBalancers": [
{
"name": "a-new-loadbalancer",
"protocol": "http",
"nodes": [
{
"address": "10.0.0.1",
"port": "80",
"weight": "1"
}
]
}
]
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "UPDATE",
"loadBalancers": [
{
"name": "a-new-loadbalancer",
"protocol": "http",
"nodes": [
{
"address": "10.0.0.1",
"port": "80",
"weight": "1"
}
]
}
],
"hpcs_response": "PASS"
}
SUSPEND Message
---------------
The SUSPEND message will temporarily disable a load balancer until it is
reenabled with an ENABLE message.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "SUSPEND"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "SUSPEND",
"hpcs_response": "PASS"
}
ENABLE Message
--------------
The ENABLE message will reenable a previously suspsended load balancer.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "ENABLE"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "ENABLE",
"hpcs_response": "PASS"
}
DELETE Message
--------------
The DELETE message will permanently disable a load balancer. This process
is not expected to be reversible.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DELETE"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DELETE",
"hpcs_response": "PASS"
}
DIAGNOSTICS Message
-------------------
The DIAGNOSTICS message will run some basic network connection tests to see if
the device the worker lives on is healthy. At the moment it runs a connect
test to Google and a gearman connect test.
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DIAGNOSTICS"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DIAGNOSTICS",
"network": "PASS",
"gearman": [
{
"15.185.1.2": "PASS"
},
{
"15.185.1.3": "FAIL"
}
],
"release": "1.0.alpha.3.gca84083",
"hpcs_response": "PASS"
}
DISCOVER Message
----------------
The DISCOVER message allows a sender (i.e., API server) to discover the version
of a running worker process. The version can then be used to decide which
messages are supported.
A **version** field will be returned in the JSON message. It will be in the
format of <major>.<minor>.
A **release** field will also be returned in the JSON message. It contains
more complete versioning information as returned from a 'git describe'.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DISCOVER"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "DISCOVER",
"version": "1.0",
"release": "1.0.alpha.3.gca84083",
"hpcs_response": "PASS"
}
ARCHIVE Message
---------------
The ARCHIVE message requests that the load balancer send any available logs
to a destination defined within the request. Currently, the only supported
destination is a Swift account.
If the request fails, **hpcs_response** will be set to *FAIL* and a field
named **hpcs_error** will be added with an error message explaining the
failure.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
* hpcs_object_store_type
* hpcs_object_store_basepath
* hpcs_object_store_endpoint
* hpcs_object_store_token
* loadBalancers
* loadBalancers.protocol
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "ARCHIVE",
"hpcs_object_store_basepath": "lbaaslogs",
"hpcs_object_store_endpoint": "https://example.com/v1/100",
"hpcs_object_store_token": "MY_AUTH_TOKEN",
"hpcs_object_store_type": "swift",
"loadBalancers": [
{
"id": "15",
"name": "lb #1",
"protocol": "HTTP"
}
]
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "ARCHIVE",
"hpcs_object_store_basepath": "lbaaslogs",
"hpcs_object_store_endpoint": "https://example.com/v1/100",
"hpcs_object_store_token": "MY_AUTH_TOKEN",
"hpcs_object_store_type": "swift",
"loadBalancers": [
{
"id": "15",
"name": "lb #1",
"protocol": "HTTP"
}
],
"hpcs_response": "FAIL",
"hpcs_error": "Some error string explaining the failure."
}
STATS Message
-------------
The STATS message queries the worker for general availability (i.e., a ping)
Currently, this doesn't do more than verify that the HAProxy process is
running and we can successfully query its statistics socket.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "STATS"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "STATS",
"hpcs_response": "PASS"
}
METRICS Message
---------------
The METRICS message queries the worker for load balancer usage metrics.
The number of bytes out for each load balancer defined on the device
is returned in the response.
Required Fields
^^^^^^^^^^^^^^^
* hpcs_action
Example Request
^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "METRICS"
}
Example Response
^^^^^^^^^^^^^^^^
.. code-block:: json
{
"hpcs_action": "METRICS",
"utc_start": "2014-01-09 15:11.45.704754",
"utc_end": "2014-01-09 16:10.00.72683",
"loadBalancers": [
{
"protocol": "HTTP",
"bytes_out": "12345"
},
{
"protocol": "TCP",
"bytes_out": "5678"
}
],
"hpcs_response": "PASS"
}

View File

@ -1,210 +0,0 @@
########################################################################
# A sample configuration file read by the Libra utilities.
########################################################################
#-----------------------------------------------------------------------
# The [DEFAULT] section contains options common to the various Libra
# utilities (worker, mgm, etc).
#-----------------------------------------------------------------------
[DEFAULT]
# Options to enable more verbose output
#verbose = false
#debug = false
# Daemon process options
#daemon = true
#user = libra
#group = libra
#billing_enable = False
# Openstack
#notification_driver = openstack.common.notifier.rpc_notifier
#default_notification_level = INFO
#default_publisher_id = id
#host = localhost
# Kombu
rabbit_use_ssl = True
#kombu_ssl_version = ''
#kombu_ssl_keyfile = ''
#kombu_ssl_certfile = ''
#kombu_ssl_ca_certs = ''
#rabbit_host = localhost
#rabbit_port = 5672
#rabbit_userid = guest
#rabbit_password = guest
#rabbit_hosts =
#rabbit_host = localhost
#rabbit_port =
#rabbit_virtual_host = /
#rabbit_retry_interval = 1
#rabbit_retry_backoff = 2
#rabbit_max_retries = 0
#rabbit_ha_queues = False
#control_exchange = openstack
#amqp_durable_queues = False
#-----------------------------------------------------------------------
# Options for utilities that are Gearman workers or clients.
#-----------------------------------------------------------------------
[gearman]
#servers = localhost:4730, HOST:PORT
#keepalive = false
#keepcnt = COUNT
#keepidle = SECONDS
#keepintvl = SECONDS
#poll = 1
#reconnect_sleep = 60
#ssl_ca = /path/to/ssl_ca
#ssl_cert = /path/to/ssl_cert
#ssl_key = /path/to/ssl_key
#-----------------------------------------------------------------------
# [worker] and [worker:*] sections are specific to the Libra worker.
#-----------------------------------------------------------------------
[worker]
#driver = haproxy
#pid = /var/run/libra/libra_worker.pid
# HAProxy driver options for the worker
[worker:haproxy]
#service = ubuntu
#statsfile = /var/log/haproxy.stats
#-----------------------------------------------------------------------
# The [mgm] section is specific to the libra_mgm utility.
#-----------------------------------------------------------------------
[mgm]
# Options with defaults
#pid = /var/run/libra/libra_mgm.pid
#threads = 8
#rm_fip_ignore_500 = false
#nova_insecure = false
#build_diag_timeout = 10
# Required options
az = 1
nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
nova_keyname = default
nova_region = region
nova_secgroup = default
nova_user = username
nova_pass = password
nova_image = 12345
nova_image_size = standard.medium
# Others
node_basename = BASENAME
nova_az_name = NAME
nova_bypass_url = URL
nova_net_id = ID
nova_tenant = TENANT
nova_tenant_id = TENANTID
#-----------------------------------------------------------------------
# The [admin_api] section is specific to the libra_admin_api utility.
#-----------------------------------------------------------------------
[admin_api]
# Options with defaults
#host = 0.0.0.0
#port = 8889
pid = /var/run/libra/libra_admin_api.pid
#stats_device_error_limit = 5
#stats_offline_ping_limit = 10
#stats_poll_timeout = 5
#stats_poll_timeout_retry = 30
#exists_freq = 60
#usage_freq = 60
#stats_freq = 5
#server_id = 0
#number_of_servers = 1
#expire_days = 0
#vip_pool_size = 10
#node_pool_size = 10
#stats_driver = dummy
#stats_enable = False
#stats_purge_enable = False
# The following are the seconds of each minute
# that the timers will run. The defaults should
# not need to be changed..
#stats_purge_days = 5
#delete_timer_seconds = 5
#ping_timer_seconds = 15
#stats_timer_seconds = 20
#usage_timer_seconds = 25
#probe_timer_seconds = 30
#offline_timer_seconds = 45
#vips_timer_seconds = 50
#exists_timer_seconds = 55
# Required options
db_sections = mysql1
ssl_certfile = certfile.crt
ssl_keyfile = keyfile.key
# Datadog plugin options
#datadog_env = unknown
datadog_api_key = KEY
datadog_app_key = KEY2
datadog_message_tail = MSG
datadog_tags = service:lbaas
# Others
#-----------------------------------------------------------------------
# The [api] section is specific to the libra_api utility.
#-----------------------------------------------------------------------
[api]
# Options with defaults
#disable_keystone=False
#host = 0.0.0.0
#port = 443
#keystone_module = keystoneclient.middleware.auth_token:AuthProtocol
#pid = /var/run/libra/libra_api.pid
# Required options
db_sections = mysql1
swift_basepath = lbaaslogs
swift_endpoint = https://host.com:443/v1/
# Others
ssl_certfile = certfile.crt
ssl_keyfile = keyfile.key
ip_filters = 192.168.0.0/24
#-----------------------------------------------------------------------
# The [mysql*] sections are referenced by admin_api and api by the
# db_sections values.
#-----------------------------------------------------------------------
[mysql1]
username = root
password =
schema = lbaas
host = localhost
port = 3306
#-----------------------------------------------------------------------
# The API will reference keystone options here
#-----------------------------------------------------------------------
[keystone]

View File

@ -1,35 +0,0 @@
[loggers]
keys=root
[logger_root]
level=DEBUG
handlers=screen,rotating_file
[formatters]
keys=simple,ts,newline
[formatter_simple]
format=%(name)s - %(levelname)s - %(message)s
[formatter_ts]
format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
[formatter_newline]
format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
class=libra.common.log.NewlineFormatter
[handlers]
keys=rotating_file,screen
[handler_rotating_file]
formatter=newline
class=libra.common.log.CompressedTimedRotatingFileHandler
level=DEBUG
args=('/var/log/libra/libra.log',)
[handler_screen]
class=StreamHandler
formatter=ts
level=AUDIT
args=(sys.stdout,)

View File

@ -1,39 +0,0 @@
########################################################################
# Config for oslo notifier
########################################################################
[DEFAULT]
# Options to enable more verbose output
verbose = true
debug = true
use_stderr = true
publish_errors = true
logfile = /tmp/libra.log
# Openstack
notification_driver = drivername
default_notification_level = INFO
default_publisher_id = lbaas
host = apiTest
# Kombu
rabbit_use_ssl = True
rabbit_host = localhost
rabbit_port = 5671
rabbit_userid = user
rabbit_password = password
#rabbit_hosts =
rabbit_virtual_host = vhost
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
rabbit_max_retries = 0
rabbit_ha_queues = False
fake_rabbit = False
control_exchange = exchange
amqp_durable_queues = True
[admin_api]
billing_enable = True
exists_freq = 20
logfile = /tmp/libra_admin.log
db_sections = ''

View File

@ -1,18 +0,0 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo('libra').version_string()
__release__ = pbr.version.VersionInfo('libra').release_string()

View File

@ -1,144 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
adminapi_group = cfg.OptGroup('admin_api', 'Libra Admin API options')
cfg.CONF.register_group(adminapi_group)
cfg.CONF.register_opts(
[
cfg.BoolOpt('disable_keystone',
default=False,
help='Unauthenticated server, for testing only'),
cfg.StrOpt('keystone_module',
default='keystoneclient.middleware.auth_token:AuthProtocol',
help='A colon separated module and class for keystone '
' middleware'),
cfg.StrOpt('datadog_api_key',
help='API key for datadog alerting'),
cfg.StrOpt('datadog_app_key',
help='Application key for datadog alerting'),
cfg.StrOpt('datadog_env',
default='unknown',
help='Server enironment'),
cfg.StrOpt('datadog_message_tail',
help='Text to add at the end of a Datadog alert'),
cfg.StrOpt('datadog_tags',
help='A space separated list of tags for Datadog alerts'),
cfg.ListOpt('db_sections',
required=True,
help='MySQL config sections in the config file'),
cfg.IntOpt('expire_days',
default=0,
help='Number of days until deleted load balancers '
'are expired'),
cfg.StrOpt('host',
default='0.0.0.0',
help='IP address to bind to, 0.0.0.0 for all IPs'),
cfg.IntOpt('node_pool_size',
default=10,
help='Number of hot spare devices to keep in the pool'),
cfg.IntOpt('number_of_servers',
default=1,
help='number of Admin API servers, used to calculate '
'which Admin API server should stats ping next'),
cfg.StrOpt('pid',
default='/var/run/libra/libra_admin_api.pid',
help='PID file'),
cfg.IntOpt('port',
default=8889,
help='Port number for API server'),
cfg.IntOpt('server_id',
default=0,
help='server ID of this server, used to calculate which '
'Admin API server should stats ping next '
'(start at 0)'),
cfg.StrOpt('ssl_certfile',
help='Path to an SSL certificate file'),
cfg.StrOpt('ssl_keyfile',
help='Path to an SSL key file'),
cfg.IntOpt('stats_device_error_limit',
default=5,
help='Max number of simultaneous device failures to allow '
'recovery on'),
cfg.ListOpt('stats_driver',
default=['dummy'],
help='type of stats device to use'),
cfg.IntOpt('stats_offline_ping_limit',
default=10,
help='Number of failed pings to an OFFLINE device before '
'deleting it'),
cfg.IntOpt('stats_poll_timeout',
default=5,
help='gearman timeout value for initial ping request '
'(in seconds)'),
cfg.IntOpt('stats_poll_timeout_retry',
default=30,
help='gearman timeout value for retry ping request '
'(in seconds)'),
cfg.IntOpt('vip_pool_size',
default=10,
help='Number of hot spare vips to keep in the pool'),
cfg.BoolOpt('stats_enable',
default=False,
help='Enable / Disable usage statistics gathering'),
cfg.IntOpt('exists_freq',
metavar='MINUTES',
default=60,
help='Minutes between sending of billing exists messages'),
cfg.IntOpt('usage_freq',
metavar='MINUTES',
default=60,
help='Minutes between sending of billing usage messages'),
cfg.IntOpt('stats_freq',
metavar='MINUTES',
default=5,
help='Minutes between collecting usage statistics'),
cfg.BoolOpt('stats_purge_enable',
default=False,
help='Enable / Disable purging of usage statistics'),
cfg.IntOpt('stats_purge_days',
metavar='DAYS',
default=5,
help='Number of days to keep usage statistics'),
cfg.IntOpt('delete_timer_seconds',
default=5,
help='Which second of each minute delete timer should run'),
cfg.IntOpt('ping_timer_seconds',
default=15,
help='Second of each minute ping timer should run'),
cfg.IntOpt('stats_timer_seconds',
default=20,
help='Second of each minute statistics timer should run'),
cfg.IntOpt('usage_timer_seconds',
default=25,
help='Which second of each minute usage timer should run'),
cfg.IntOpt('probe_timer_seconds',
default=30,
help='Which second of each minute probe timer should run'),
cfg.IntOpt('offline_timer_seconds',
default=45,
help='Second of each minute offline timer should run'),
cfg.IntOpt('vips_timer_seconds',
default=50,
help='Which second of each minute vips timer should run'),
cfg.IntOpt('exists_timer_seconds',
default=55,
help='Second of each minute exists timer should run'),
],
group=adminapi_group
)

View File

@ -1,95 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import importlib
from oslo.config import cfg
from pecan import request
from libra.openstack.common import log
from libra.common.api.lbaas import db_session, AdminAuth
LOG = log.getLogger(__name__)
def get_limited_to_project(headers):
"""Return the tenant the request should be limited to."""
tenant_id = headers.get('X-Tenant-Id')
LOG.info(
'Admin API {0} request {1} ({2}) from {3} tenant {4}'.format(
request.environ.get('REQUEST_METHOD'),
request.environ.get('PATH_INFO'),
request.environ.get('QUERY_STRING'),
request.environ.get('REMOTE_ADDR'),
tenant_id
)
)
return tenant_id
def tenant_is_type(headers, tenant_types):
""" Check the tenant ID is a user of the Admin API and allowed to use the
API command specified
"""
tenant_id = get_limited_to_project(headers)
if not tenant_id:
return False
with db_session() as session:
is_auth = session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).\
filter(AdminAuth.level.in_(tenant_types)).count()
if is_auth > 0:
session.commit()
return True
session.commit()
return False
def tenant_is_user(headers):
return tenant_is_type(headers, ['USER', 'ADMIN'])
def tenant_is_admin(headers):
return tenant_is_type(headers, ['ADMIN'])
class AuthDirector(object):
""" There are some paths we want to work unauthenticated. This class
will direct intentionally unauthenticated requests to the relevant
controllers. """
def __init__(self, app):
self.unauthed_app = app
if not cfg.CONF['admin_api']['disable_keystone']:
self.app = self._install()
else:
self.app = app
def __call__(self, env, start_response):
uri = env['PATH_INFO']
if uri in ['/', '/v1', '/v1/', '/v2.0', '/v2.0/']:
return self.unauthed_app(env, start_response)
else:
return self.app(env, start_response)
def _install(self):
"""Install ACL check on application."""
config = ConfigParser.SafeConfigParser()
config.read(cfg.CONF['config_file'])
module_details = cfg.CONF['admin_api']['keystone_module'].split(':')
keystone = importlib.import_module(module_details[0])
auth_class = getattr(keystone, module_details[1])
return auth_class(self.unauthed_app, config._sections['keystone'])

View File

@ -1,199 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import daemon
import daemon.pidfile
import daemon.runner
import grp
import logging as std_logging
import pwd
import pecan
import sys
import signal
from eventlet import wsgi
from libra import __version__
from libra.common.api import server
from libra.admin_api.stats.drivers.base import known_drivers
from libra.admin_api.stats.ping_sched import PingStats
from libra.admin_api.stats.offline_sched import OfflineStats
from libra.admin_api.stats.billing_sched import BillingStats
from libra.admin_api.stats.stats_sched import UsageStats
from libra.admin_api.device_pool.manage_pool import Pool
from libra.admin_api.expunge.expunge import ExpungeScheduler
from libra.admin_api import config as api_config
from libra.admin_api import model
from libra.admin_api import acl
from libra.openstack.common import importutils
from libra.openstack.common import log as logging
from libra.common.log import get_descriptors
from libra.common.options import CONF
from libra.common.options import add_common_opts
from libra.common.options import check_gearman_ssl_files
LOG = logging.getLogger(__name__)
def get_pecan_config():
# Set up the pecan configuration
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config):
model.init_model()
if not pecan_config:
pecan_config = get_pecan_config()
config = dict(pecan_config)
config['database'] = CONF['admin_api']['db_sections']
config['gearman'] = {
'server': CONF['gearman']['servers'],
'ssl_key': CONF['gearman']['ssl_key'],
'ssl_cert': CONF['gearman']['ssl_cert'],
'ssl_ca': CONF['gearman']['ssl_ca'],
'keepalive': CONF['gearman']['keepalive'],
'keepcnt': CONF['gearman']['keepcnt'],
'keepidle': CONF['gearman']['keepidle'],
'keepintvl': CONF['gearman']['keepintvl']
}
if CONF['debug']:
config['wsme'] = {'debug': True}
config['app']['debug'] = True
pecan.configuration.set_config(config, overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
static_root=pecan_config.app.static_root,
template_path=pecan_config.app.template_path,
debug=getattr(pecan_config.app, 'debug', False),
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
guess_content_type_from_ext=getattr(
pecan_config.app,
'guess_content_type_from_ext',
True)
)
final_app = acl.AuthDirector(app)
return final_app
class MaintThreads(object):
def __init__(self, drivers):
self.classes = []
self.drivers = drivers
signal.signal(signal.SIGINT, self.exit_handler)
signal.signal(signal.SIGTERM, self.exit_handler)
self.run_threads()
def run_threads(self):
pool = Pool()
self.classes.append(pool)
expunge = ExpungeScheduler()
self.classes.append(expunge)
pings = PingStats(self.drivers)
self.classes.append(pings)
offline = OfflineStats(self.drivers)
self.classes.append(offline)
if CONF['admin_api'].stats_enable:
usage = UsageStats(self.drivers)
self.classes.append(usage)
if CONF['billing_enable']:
billing = BillingStats(self.drivers)
self.classes.append(billing)
def exit_handler(self, signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for function in self.classes:
function.shutdown()
sys.exit()
class LogStdout(object):
def write(self, data):
if data.strip() != '':
LOG.info(data)
def main():
add_common_opts()
CONF(project='libra', version=__version__)
logging.setup('libra')
LOG.debug('Configuration:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
drivers = []
pc = get_pecan_config()
sock = server.make_socket(CONF['admin_api']['host'],
CONF['admin_api']['port'],
CONF['admin_api']['ssl_keyfile'],
CONF['admin_api']['ssl_certfile'])
if CONF['daemon']:
pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['admin_api']['pid'],
10)
if daemon.runner.is_pidfile_stale(pidfile):
pidfile.break_lock()
descriptors = get_descriptors()
descriptors.append(sock.fileno())
context = daemon.DaemonContext(
working_directory='/',
umask=0o022,
pidfile=pidfile,
files_preserve=descriptors
)
if CONF['user']:
context.uid = pwd.getpwnam(CONF['user']).pw_uid
if CONF['group']:
context.gid = grp.getgrnam(CONF['group']).gr_gid
context.open()
try:
check_gearman_ssl_files()
except Exception as e:
LOG.critical(str(e))
return
# Use the root logger due to lots of services using logger
LOG.info('Starting on %s:%d', CONF.admin_api.host, CONF.admin_api.port)
api = setup_app(pc)
for driver in CONF['admin_api']['stats_driver']:
drivers.append(importutils.import_class(known_drivers[driver]))
MaintThreads(drivers)
sys.stderr = LogStdout()
wsgi.server(sock, api, keepalive=False)
return 0

View File

@ -1,26 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Pecan Application Configurations
app = {
'root': 'libra.admin_api.controllers.root.RootController',
'modules': ['libra.admin_api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/admin_api/templates',
'errors': {
404: '/notfound',
'__force_dict__': True
}
}

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,49 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import expose, response
from v1.v1 import V1Controller
from v2.v2_0 import V2Controller
from libra.admin_api.model.responses import Responses
class RootController(object):
"""root control object."""
@expose('json')
def index(self):
response.status = 200
return Responses.versions
@expose('json')
def _default(self):
"""default route.. acts as catch all for any wrong urls.
For now it returns a 404 because no action is defined for /"""
response.status = 404
return Responses._default
@expose()
def _lookup(self, primary_key, *remainder):
if primary_key == 'v1':
return V1Controller(), remainder
if primary_key == 'v2.0':
return V2Controller(), remainder
else:
response.status = 404
return Responses._default
@expose('json')
def notfound(self):
return Responses._default

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,335 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pecan imports
from pecan import expose, response, abort
from pecan.rest import RestController
import wsmeext.pecan as wsme_pecan
from wsme.exc import ClientSideError
from libra.admin_api.model.validators import DeviceResp, DevicePost, DevicePut
from libra.common.api.lbaas import LoadBalancer, Device, db_session
from libra.common.api.lbaas import loadbalancers_devices
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class DevicesController(RestController):
def __init__(self, devid=None):
# Required for PUT requests. See _lookup() below
self.devid = devid
@expose('json')
def get(self, device_id=None, marker=None, limit=None):
"""
Gets either a list of all devices or a single device details.
device_id is supplied if we are getting details of a single device
marker and limit are used to paginate when device_id is not
supplied. Currently this just supplies "LIMIT marker, limit" to
MySQL which is fine.
:param device_id: id of device (unless getall)
Url:
GET /devices
List all configured devices
Url:
GET /devices/{device_id}
List details of a particular device
Returns: dict
"""
with db_session() as session:
# if we don't have an id then we want a list of all devices
if not device_id:
# return all devices
device = {'devices': []}
if marker is None:
marker = 0
if limit is None:
limit = 100
devices = session.query(
Device.id, Device.az, Device.updated, Device.created,
Device.status, Device.publicIpAddr, Device.name,
Device.type, Device.floatingIpAddr).\
offset(marker).limit(limit)
for item in devices:
dev = item._asdict()
dev['loadBalancers'] = []
if dev['status'] != "OFFLINE":
# Find loadbalancers using device
lbids = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(
loadbalancers_devices.c.device == dev['id']).\
all()
lblist = [i[0] for i in lbids]
if len(lblist) > 0:
lbs = session.query(
LoadBalancer.id, LoadBalancer.tenantid).\
filter(LoadBalancer.id.in_(lblist)).all()
if lbs:
for item in lbs:
lb = item._asdict()
lb['hpcs_tenantid'] = lb['tenantid']
del(lb['tenantid'])
dev['loadBalancers'].append(lb)
device['devices'].append(dev)
elif device_id == 'usage':
return self.usage()
else:
# return device detail
device = session.query(
Device.id, Device.az, Device.updated, Device.created,
Device.status, Device.publicIpAddr, Device.name,
Device.type, Device.floatingIpAddr
).filter(Device.id == device_id).first()
if not device:
response.status = 404
session.rollback()
return dict(
status=404,
message="device id " + device_id + "not found"
)
device = device._asdict()
device['loadBalancers'] = []
if device['status'] != "OFFLINE":
lbids = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(
loadbalancers_devices.c.device == device['id']).\
all()
lblist = [i[0] for i in lbids]
lbs = session.query(
LoadBalancer.id, LoadBalancer.tenantid).\
filter(LoadBalancer.id.in_(lblist)).all()
if lbs:
for item in lbs:
lb = item._asdict()
device['loadBalancers'].append(lb)
session.commit()
response.status = 200
return device
@wsme_pecan.wsexpose(DeviceResp, body=DevicePost)
def post(self, body=None):
""" Creates a new device entry in devices table.
:param None
Url:
POST /devices
JSON Request Body
{
"name":"device name",
"publicIpAddr":"15.x.x.x",
"floatingIpAddr":"15.x.x.x",
"az":2,
"type":"type descr"
}
Returns: dict
{
"status": "OFFLINE",
"updated": "2013-06-06T10:17:19",
"name": "device name",
"created": "2013-06-06T10:17:19",
"loadBalancers": [],
"floatingIpAddr": "192.1678.98.99",
"publicIpAddr": "192.1678.98.99",
"az": 2,
"type": "type descr",
"id": 67
}
"""
# Get a new device object
device = Device()
device.name = body.name
device.publicIpAddr = body.publicIpAddr
device.floatingIpAddr = body.floatingIpAddr
device.az = body.az
device.type = body.type
device.pingCount = 0
device.status = 'OFFLINE'
device.created = None
with db_session() as session:
# write to database
session.add(device)
session.flush()
# refresh the device record so we get the id back
session.refresh(device)
try:
return_data = DeviceResp()
return_data.id = device.id
return_data.name = device.name
return_data.floatingIpAddr = device.floatingIpAddr
return_data.publicIpAddr = device.publicIpAddr
return_data.az = device.az
return_data.type = device.type
return_data.created = device.created
return_data.updated = device.updated
return_data.status = device.status
return_data.loadBalancers = []
session.commit()
return return_data
except:
LOG.exception('Error communicating with load balancer pool')
errstr = 'Error communicating with load balancer pool'
session.rollback()
raise ClientSideError(errstr)
@wsme_pecan.wsexpose(None, body=DevicePut)
def put(self, body=None):
""" Updates a device entry in devices table with new status.
Also, updates status of loadbalancers using this device
with ERROR or ACTIVE and the errmsg field
:param - NOTE the _lookup() hack used to get the device id
Url:
PUT /devices/<device ID>
JSON Request Body
{
"status": <ERROR | ONLINE>
"statusDescription": "Error Description"
}
Returns: None
"""
if not self.devid:
raise ClientSideError('Device ID is required')
with db_session() as session:
device = session.query(Device).\
filter(Device.id == self.devid).first()
if not device:
session.rollback()
raise ClientSideError('Device ID is not valid')
device.status = body.status
session.flush()
lb_status = 'ACTIVE' if body.status == 'ONLINE' else body.status
lb_descr = body.statusDescription
# Now find LB's associated with this Device and update their status
lbs = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(loadbalancers_devices.c.device == self.devid).\
all()
for lb in lbs:
session.query(LoadBalancer).\
filter(LoadBalancer.id == lb[0]).\
update({"status": lb_status, "errmsg": lb_descr},
synchronize_session='fetch')
session.flush()
session.commit()
return
@expose('json')
def delete(self, device_id):
""" Deletes a given device
:param device_id: id of device to delete
Urls:
DELETE /devices/{device_id}
Returns: None
"""
with db_session() as session:
# check for the device
device = session.query(Device.id).\
filter(Device.id == device_id).first()
if device is None:
session.rollback()
response.status = 400
return dict(
faultcode="Client",
faultstring="Device ID is not valid"
)
# Is the device is attached to a LB
lb = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(loadbalancers_devices.c.device == device_id).\
all()
if lb:
session.rollback()
response.status = 400
return dict(
faultcode="Client",
faultstring="Device belongs to a loadbalancer"
)
try:
session.query(Device).filter(Device.id == device_id).delete()
session.flush()
session.commit()
return None
except:
session.rollback()
LOG.exception('Error deleting device from pool')
response.status = 500
return dict(
faultcode="Server",
faultstring="Error deleting device from pool"
)
# Kludge to get to here because Pecan has a hard time with URL params
# and paths
def usage(self):
"""Reports the device usage statistics for total, taken, and free
:param None
Url:
GET /devices/usage
Returns: dict
"""
with db_session() as session:
total = session.query(Device).count()
free = session.query(Device).filter(Device.status == 'OFFLINE').\
count()
session.commit()
response.status = 200
return dict(
total=total,
free=free,
taken=total - free
)
@expose('json')
def _lookup(self, devid, *remainder):
"""Routes more complex url mapping for PUT
Raises: 404
"""
# Kludgy fix for PUT since WSME doesn't like IDs on the path
if devid:
return DevicesController(devid), remainder
abort(404)

View File

@ -1,36 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import expose, response
from devices import DevicesController
from libra.admin_api.model.responses import Responses
class V1Controller(object):
"""v1 control object."""
@expose('json')
def index(self):
response.status = 200
return Responses.versions_v1
@expose('json')
def _default(self):
"""default route.. acts as catch all for any wrong urls.
For now it returns a 404 because no action is defined for /"""
response.status = 404
return Responses._default
devices = DevicesController()

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,250 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pecan imports
import ipaddress
from pecan import expose, request, response
from pecan.rest import RestController
from libra.admin_api.library.rebuild import rebuild_device
from libra.common.api.lbaas import LoadBalancer, Device, db_session
from libra.common.api.lbaas import loadbalancers_devices, Vip
from libra.openstack.common import log
from libra.admin_api.stats.stats_gearman import GearJobs
from libra.admin_api.acl import tenant_is_admin, tenant_is_user
LOG = log.getLogger(__name__)
class DevicesController(RestController):
@expose('json')
def get(
self, device_id=None, status=None, name=None, ip=None, vip=None
):
"""
Gets either a list of all devices or a single device details.
:param device_id: id of device (unless getall)
Url:
GET /devices
List all configured devices
Url:
GET /devices/{device_id}
List details of a particular device
Returns: dict
"""
# Work around routing issue in Pecan, doesn't work as a separate class
# due to this get accepting more than one parameter
if status == 'discover':
return self.discover(device_id)
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
# if we don't have an id then we want a list of all devices
if not device_id:
# return all devices
device = {'devices': []}
devices = session.query(
Device.id, Device.az, Device.updated, Device.created,
Device.status, Device.name, Device.type,
Device.floatingIpAddr.label('ip'), Vip.id.label('vipid'),
Vip.ip.label('vip')).outerjoin(Device.vip)
if vip is not None:
# Search devices by vip, should only return one
vip_num = int(ipaddress.IPv4Address(unicode(vip)))
devices = devices.filter(Vip.ip == vip_num)
if status is not None:
# Search devices by status
status = status.upper()
if status not in ['OFFLINE', 'ONLINE', 'ERROR']:
# Invalid status specified
response.status = 400
return dict(
faultcode="Client",
faultstring="Invalid status: " + status
)
devices = devices.filter(Device.status == status)
if name is not None:
# Search devices by name, should only return one
devices = devices.filter(Device.name == name)
if ip is not None:
# Search devices by IP, should only return one
devices = devices.filter(Device.floatingIpAddr == ip)
devices.all()
for item in devices:
dev = item._asdict()
if dev['vip']:
dev['vip'] = [{
"id": dev['vipid'],
"address": str(ipaddress.IPv4Address(dev['vip']))
}]
else:
dev['vip'] = []
del(dev['vipid'])
device['devices'].append(dev)
else:
# return device detail
device = session.query(
Device.id, Device.az, Device.updated, Device.created,
Device.status, Device.floatingIpAddr.label('ip'),
Device.name, Device.type, Vip.id.label('vipid'),
Vip.ip.label('vip')
).outerjoin(Device.vip).filter(Device.id == device_id).first()
if not device:
response.status = 404
session.rollback()
return dict(
faultcode="Client",
faultstring="device id " + device_id + "not found"
)
device = device._asdict()
if device['vip']:
device['vip'] = [{
"id": device['vipid'],
"address": str(ipaddress.IPv4Address(device['vip']))
}]
else:
device['vip'] = []
del(device['vipid'])
device['loadBalancers'] = []
if device['status'] != "OFFLINE":
lbids = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(
loadbalancers_devices.c.device == device['id']).\
all()
lblist = [i[0] for i in lbids]
lbs = session.query(
LoadBalancer.id, LoadBalancer.tenantid).\
filter(LoadBalancer.id.in_(lblist)).all()
if lbs:
for item in lbs:
lb = item._asdict()
device['loadBalancers'].append(lb)
session.commit()
response.status = 200
return device
@expose('json')
def delete(self, device_id):
""" Deletes a given device
:param device_id: id of device to delete
Urls:
DELETE /devices/{device_id}
Returns: None
"""
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
# check for the device
device = session.query(Device.id).\
filter(Device.id == device_id).first()
if device is None:
session.rollback()
response.status = 404
return dict(
faultcode="Client",
faultstring="Device " + device_id + " not found"
)
# Is the device is attached to a LB
lb = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(loadbalancers_devices.c.device == device_id).\
all()
if lb:
# Rebuild device
resp = rebuild_device(device_id)
response.status = resp[0]
return resp[1]
# If we get here there are no load balancers so delete device
response.status = 204
try:
device = session.query(Device).\
filter(Device.id == device_id).first()
device.status = 'DELETED'
session.commit()
return None
except:
session.rollback()
LOG.exception('Error deleting device from pool')
response.status = 500
return dict(
faultcode="Server",
faultstring="Error deleting device from pool"
)
return None
def discover(self, device_id):
"""
Discovers information about a given libra worker based on device ID
"""
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
device = session.query(Device.name).\
filter(Device.id == device_id).scalar()
device_name = str(device)
session.commit()
if device_name is None:
response.status = 404
return dict(
faultcode="Client",
faultstring="Device " + device_id + " not found"
)
gearman = GearJobs()
discover = gearman.get_discover(device_name)
if discover is None:
response.status = 500
return dict(
faultcode="Server",
faultstring="Could not discover device"
)
return dict(
id=device_id, version=discover['version'],
release=discover['release']
)

View File

@ -1,193 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pecan imports
import ipaddress
from pecan import expose, request, response
from pecan.rest import RestController
from libra.common.api.lbaas import LoadBalancer, Device, db_session
from libra.common.api.lbaas import Vip, Node, HealthMonitor
from libra.openstack.common import log
from libra.admin_api.acl import tenant_is_user
LOG = log.getLogger(__name__)
class LoadBalancersController(RestController):
@expose('json')
def get(
self, lb_id=None, status=None, tenant=None, name=None, ip=None,
vip=None
):
"""
Gets either a list of all loadbalancers or a details for a single
loadbalancer.
:param lb_id: id of the loadbalancer (unless getall)
Url:
GET /loadbalancers
List all loadbalancers
Url:
GET /loadbalancers/{lb_id}
List details of a particular device
Returns: dict
"""
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
# if there is no lb_id then we want a list of loadbalancers
if not lb_id:
loadbalancers = {'loadBalancers': []}
lbs = session.query(
LoadBalancer.id, LoadBalancer.name, LoadBalancer.status,
LoadBalancer.tenantid, Vip.id.label('vipid'),
Vip.ip.label('vip'),
Device.floatingIpAddr.label('ip'),
LoadBalancer.protocol, LoadBalancer.algorithm,
LoadBalancer.port, LoadBalancer.created,
LoadBalancer.updated
).join(LoadBalancer.devices).join(Device.vip)
if status is not None:
if status not in ('ACTIVE', 'BUILD', 'DEGRADED', 'ERROR'):
response.status = 400
return dict(
faultcode="Client",
faultstring="Invalid status: " + status
)
lbs = lbs.filter(LoadBalancer.status == status)
if tenant is not None:
lbs = lbs.filter(LoadBalancer.tenantid == tenant)
if name is not None:
lbs = lbs.filter(LoadBalancer.name == name)
if ip is not None:
lbs = lbs.filter(Device.floatingIpAddr == ip)
if vip is not None:
vip_num = int(ipaddress.IPv4Address(unicode(vip)))
lbs = lbs.filter(Vip.ip == vip_num)
lbs.all()
for item in lbs:
lb = item._asdict()
if lb['vip']:
lb['vip'] = [{
"id": lb['vipid'],
"address": str(ipaddress.IPv4Address(lb['vip']))
}]
del(lb['vip'])
del(lb['vipid'])
else:
lb['vip'] = [None]
del(lb['vipid'])
loadbalancers['loadBalancers'].append(lb)
else:
lbs = session.query(
LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol,
LoadBalancer.port, LoadBalancer.algorithm,
LoadBalancer.status, LoadBalancer.created,
LoadBalancer.updated, LoadBalancer.errmsg,
Device.id.label('device'),
Vip.id.label('vipid'), Vip.ip.label('vip')
).join(LoadBalancer.devices).\
outerjoin(Device.vip).\
filter(LoadBalancer.id == lb_id).\
first()
if not lbs:
response.status = 404
return dict(
faultcode="Client",
faultstring="Loadbalancer " + lb_id + " not found"
)
loadbalancers = lbs._asdict()
nodes = session.query(
Node.id, Node.address, Node.port, Node.status,
Node.enabled, Node.weight
).filter(Node.lbid == lb_id).all()
loadbalancers['nodes'] = []
for item in nodes:
node = item._asdict()
if node['enabled'] == 1:
node['condition'] = 'ENABLED'
else:
node['condition'] = 'DISABLED'
del node['enabled']
node['port'] = str(node['port'])
node['id'] = str(node['id'])
if node['weight'] == 1:
del node['weight']
loadbalancers['nodes'].append(node)
if loadbalancers['vip']:
loadbalancers['vip'] = [{
"id": loadbalancers['vipid'],
"address": str(
ipaddress.IPv4Address(loadbalancers['vip'])
)
}]
del(loadbalancers['vip'])
del(loadbalancers['vipid'])
else:
loadbalancers['vip'] = [None]
del(loadbalancers['vipid'])
if not loadbalancers['errmsg']:
loadbalancers['statusDescription'] = None
else:
loadbalancers['statusDescription'] =\
loadbalancers['errmsg']
del(loadbalancers['errmsg'])
monitor = session.query(
HealthMonitor.type, HealthMonitor.delay,
HealthMonitor.timeout, HealthMonitor.attempts,
HealthMonitor.path
).join(LoadBalancer.monitors).\
filter(LoadBalancer.id == lb_id).first()
if monitor is None:
monitor_data = {}
else:
monitor_data = {
'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'attemptsBeforeDeactivation': monitor.attempts
}
if monitor.path:
monitor_data['path'] = monitor.path
loadbalancers['monitor'] = monitor_data
session.commit()
return loadbalancers
# TODO: we should be able to delete loadbalancers, require lb_id, name,
# tenant and a confirm flag for verification

View File

@ -1,260 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pecan imports
import ConfigParser
import socket
import json
from pecan import expose, response, request, conf
from pecan.rest import RestController
from libra.common.api.lbaas import Device, db_session
from libra.common.api.lbaas import Vip, Limits, Counters, TenantLimits
from libra.openstack.common import log
from libra.admin_api.acl import tenant_is_admin, tenant_is_user
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from oslo.config import cfg
LOG = log.getLogger(__name__)
class LimitsController(RestController):
""" a sub-controller for StatusController """
@expose('json')
def get_one(self, tenant_id):
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
ret = {}
with db_session() as session:
limit = session.query(TenantLimits.loadbalancers).\
filter(TenantLimits.tenantid == tenant_id).scalar()
ret['maxLoadBalancers'] = limit
session.commit()
return ret
@expose('json')
def get_all(self):
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
ret = {}
with db_session() as session:
limits = session.query(Limits.name, Limits.value).all()
if limits is None:
response.status = 500
return dict(
faultcode="Server",
faultstring="Error obtaining limits"
)
for limit in limits:
ret[limit.name] = limit.value
session.commit()
return ret
@expose('json')
def put(self, tenant_id=None):
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
try:
data = json.loads(request.body)
except:
response.status = 400
return dict(
faultcode="Client",
faultstring="Invalid JSON received"
)
with db_session() as session:
if tenant_id is None:
for key, value in data.iteritems():
limit = session.query(Limits).filter(Limits.name == key).\
first()
if limit is None:
session.rollback()
response.status = 400
return dict(
faultcode="Client",
faultstring="Limit not found: {0}".format(key)
)
limit.value = value
else:
if 'maxLoadBalancers' in data:
limit = session.query(TenantLimits).\
filter(TenantLimits.tenantid == tenant_id).first()
if limit is not None:
limit.loadbalancers = data['maxLoadBalancers']
else:
new_limit = TenantLimits()
new_limit.tenantid = tenant_id
new_limit.loadbalancers = data['maxLoadBalancers']
session.add(new_limit)
else:
session.rollback()
response.status = 400
return dict(
faultcode="Client",
faultstring="No user settable limit in json"
)
session.commit()
class PoolController(RestController):
@expose('json')
def get(self):
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
NULL = None # For pep8
with db_session() as session:
dev_use = session.query(Device).\
filter(Device.status == 'ONLINE').count()
dev_free = session.query(Device).\
filter(Device.status == 'OFFLINE').count()
dev_error = session.query(Device).\
filter(Device.status == 'ERROR').count()
dev_pending = session.query(Device).\
filter(Device.status == 'DELETED').count()
vips_use = session.query(Vip).\
filter(Vip.device > 0).count()
vips_free = session.query(Vip).\
filter(Vip.device == NULL).count()
vips_bad = session.query(Vip).\
filter(Vip.device == 0).count()
status = {
"devices": {
"used": dev_use,
"available": dev_free,
"error": dev_error,
"pendingDelete": dev_pending
},
"vips": {
"used": vips_use,
"available": vips_free,
"bad": vips_bad
}
}
session.commit()
return status
class ServiceController(RestController):
@expose('json')
def get(self):
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
ret = {
'mysql': [],
'gearman': []
}
config = ConfigParser.SafeConfigParser()
config.read(cfg.CONF['config_file'])
# Connect to all MySQL servers and test
for section in conf.database:
db_conf = config._sections[section]
conn_string = '''mysql+mysqlconnector://%s:%s@%s:%s/%s''' % (
db_conf['username'],
db_conf['password'],
db_conf['host'],
db_conf['port'],
db_conf['schema']
)
if 'ssl_key' in db_conf:
ssl_args = {'ssl': {
'cert': db_conf['ssl_cert'],
'key': db_conf['ssl_key'],
'ca': db_conf['ssl_ca']
}}
engine = create_engine(
conn_string, isolation_level="READ COMMITTED",
pool_size=1, connect_args=ssl_args, pool_recycle=3600
)
else:
engine = create_engine(
conn_string, isolation_level="READ COMMITTED",
pool_size=1, pool_recycle=3600
)
session = sessionmaker(bind=engine)()
try:
session.execute("SELECT 1")
session.close()
ret['mysql'].append(
{"ip": db_conf['host'], "status": 'ONLINE'}
)
except:
ret['mysql'].append(
{"ip": db_conf['host'], "status": 'OFFLINE'}
)
# Socket connect to all gearman servers, TODO: a better gearman test
for server in conf.gearman.server:
ghost, gport = server.split(':')
try:
sock = socket.socket()
sock.settimeout(5)
sock.connect((ghost, int(gport)))
sock.close()
ret['gearman'].append({"ip": ghost, "status": 'ONLINE'})
except socket.error:
ret['gearman'].append({"ip": ghost, "status": 'OFFLINE'})
try:
sock.close()
except:
pass
return ret
class CountersController(RestController):
@expose('json')
def get(self):
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
counters = session.query(Counters.name, Counters.value).all()
return counters
class StatusController(RestController):
pool = PoolController()
service = ServiceController()
counters = CountersController()
limits = LimitsController()

View File

@ -1,191 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
# pecan imports
from pecan import expose, request, response
from pecan.rest import RestController
from libra.openstack.common import log
from libra.admin_api.acl import tenant_is_user, tenant_is_admin
from libra.common.api.lbaas import db_session, AdminAuth
LOG = log.getLogger(__name__)
class UserController(RestController):
@expose('json')
def get_all(self):
"""
Get a list of users
"""
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
user = session.query(
AdminAuth.tenant_id.label('tenant'), AdminAuth.level
).all()
session.commit()
return user
@expose('json')
def get_one(self, tenant_id=None):
"""
Get a single Admin API user or details about self
"""
if not tenant_is_user(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
user = session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).first()
if user is None:
response.status = 404
return dict(
faultcode="Client",
faultstatus="User not found"
)
ret = {
"tenant": user.tenant_id,
"level": user.level
}
session.commit()
return ret
@expose('json')
def delete(self, tenant_id):
""" Delete a given user from the Admin API """
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
with db_session() as session:
user_test = session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).count()
if user_test == 0:
response.status = 404
return dict(
faultcode="Client",
faultstring="Tenant not found"
)
session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).delete()
session.commit()
response.status = 204
return None
@expose('json')
def post(self):
""" Add a new user to the Admin API """
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
try:
data = json.loads(request.body)
except:
response.status = 400
return dict(
faultcode="Client",
faultstring="Invalid JSON received"
)
if data['tenant'] is None:
response.status = 400
return dict(
faultcode="Client",
faultstring="Tenant ID required"
)
tenant_id = data['tenant']
if 'level' not in data:
level = 'USER'
elif data['level'] not in ['USER', 'ADMIN']:
response.status = 400
return dict(
faultcode="Client",
faultstring="Only USER or ADMIN levels allowed"
)
else:
level = data['level']
with db_session() as session:
user_test = session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).count()
if user_test > 0:
response.status = 400
return dict(
faultcode="Client",
faultstring="Tenant already has an account"
)
user = AdminAuth()
user.tenant_id = tenant_id
user.level = level
session.add(user)
session.commit()
@expose('json')
def put(self, tenant_id):
""" Change the leve for an Admin API user """
if not tenant_is_admin(request.headers):
response.status = 401
return dict(
faultcode="Client",
faultstring="Client not authorized to access this function"
)
try:
data = json.loads(request.body)
except:
response.status = 400
return dict(
faultcode="Client",
faultstring="Invalid JSON received"
)
if tenant_id is None:
response.status = 400
return dict(
faultcode="Client",
faultstring="Tenant ID required"
)
if not data['level']:
level = 'USER'
elif data['level'] not in ['USER', 'ADMIN']:
response.status = 400
return dict(
faultcode="Client",
faultstring="Only USER or ADMIN levels allowed"
)
else:
level = data['level']
with db_session() as session:
user = session.query(AdminAuth).\
filter(AdminAuth.tenant_id == tenant_id).first()
if not user:
response.status = 404
return dict(
faultcode="Client",
faultstring="Tenant does not have an account"
)
user.level = level
session.commit()

View File

@ -1,42 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import expose, response
from devices import DevicesController
from loadbalancers import LoadBalancersController
from status import StatusController
from user import UserController
from libra.admin_api.model.responses import Responses
class V2Controller(object):
"""v2 control object."""
@expose('json')
def index(self):
response.status = 200
return Responses.versions_v2_0
@expose('json')
def _default(self):
"""default route.. acts as catch all for any wrong urls.
For now it returns a 404 because no action is defined for /"""
response.status = 404
return Responses._default
devices = DevicesController()
loadbalancers = LoadBalancersController()
status = StatusController()
user = UserController()

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,397 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import threading
import uuid
from datetime import datetime
from gearman.constants import JOB_UNKNOWN
from oslo.config import cfg
from sqlalchemy import func
from libra.common.api.lbaas import Device, PoolBuilding, Vip, db_session
from libra.common.api.lbaas import Counters
from libra.common.json_gearman import JSONGearmanClient
from libra.openstack.common import log
# TODO: Lots of duplication of code here, need to cleanup
LOG = log.getLogger(__name__)
class Pool(object):
DELETE_SECONDS = cfg.CONF['admin_api'].delete_timer_seconds
PROBE_SECONDS = cfg.CONF['admin_api'].probe_timer_seconds
VIPS_SECONDS = cfg.CONF['admin_api'].vips_timer_seconds
def __init__(self):
self.probe_timer = None
self.delete_timer = None
self.vips_time = None
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.vip_pool_size = cfg.CONF['admin_api']['vip_pool_size']
self.node_pool_size = cfg.CONF['admin_api']['node_pool_size']
self.start_delete_sched()
self.start_probe_sched()
self.start_vips_sched()
def shutdown(self):
if self.probe_timer:
self.probe_timer.cancel()
if self.delete_timer:
self.delete_timer.cancel()
if self.vips_timer:
self.vips_timer.cancel()
def delete_devices(self):
""" Searches for all devices in the DELETED state and removes them """
minute = datetime.now().minute
if self.server_id != minute % self.number_of_servers:
LOG.info('Not our turn to run delete check, sleeping')
self.start_delete_sched()
return
LOG.info('Running device delete check')
try:
message = []
with db_session() as session:
devices = session.query(Device).\
filter(Device.status == 'DELETED').all()
for device in devices:
job_data = {
'action': 'DELETE_DEVICE',
'name': device.name
}
unique_uuid = str(uuid.uuid4())
message.append(dict(task='libra_pool_mgm',
data=job_data,
unique=unique_uuid))
counter = session.query(Counters).\
filter(Counters.name == 'devices_deleted').first()
counter.value += len(devices)
session.commit()
if not message:
LOG.info("No devices to delete")
else:
gear = GearmanWork()
gear.send_delete_message(message)
except:
LOG.exception("Exception when deleting devices")
self.start_delete_sched()
def probe_vips(self):
minute = datetime.now().minute
if self.server_id != minute % self.number_of_servers:
LOG.info('Not our turn to run vips check, sleeping')
self.start_vips_sched()
return
LOG.info('Running vips count probe check')
try:
with db_session() as session:
NULL = None # For pep8
vip_count = session.query(Vip).\
filter(Vip.device == NULL).count()
if vip_count >= self.vip_pool_size:
LOG.info("Enough vips exist, no work to do")
session.commit()
self.start_vips_sched()
return
build_count = self.vip_pool_size - vip_count
self._build_vips(build_count)
except:
LOG.exception(
"Uncaught exception during vip pool expansion"
)
self.start_vips_sched()
def probe_devices(self):
minute = datetime.now().minute
if self.server_id != minute % self.number_of_servers:
LOG.info('Not our turn to run probe check, sleeping')
self.start_probe_sched()
return
LOG.info('Running device count probe check')
try:
with db_session() as session:
# Double check we have no outstanding builds assigned to us
session.query(PoolBuilding).\
filter(PoolBuilding.server_id == self.server_id).\
delete()
session.flush()
dev_count = session.query(Device).\
filter(Device.status == 'OFFLINE').count()
if dev_count >= self.node_pool_size:
LOG.info("Enough devices exist, no work to do")
session.commit()
self.start_probe_sched()
return
build_count = self.node_pool_size - dev_count
built = session.query(func.sum(PoolBuilding.qty)).first()
if not built[0]:
built = 0
else:
built = built[0]
if build_count - built <= 0:
LOG.info(
"Other servers are building enough nodes"
)
session.commit()
self.start_probe_sched()
return
build_count -= built
building = PoolBuilding()
building.server_id = self.server_id
building.qty = build_count
session.add(building)
session.commit()
# Closed the DB session because we don't want it hanging around
# for a long time locking tables
self._build_nodes(build_count)
with db_session() as session:
session.query(PoolBuilding).\
filter(PoolBuilding.server_id == self.server_id).\
delete()
session.commit()
except:
LOG.exception("Uncaught exception during pool expansion")
self.start_probe_sched()
def _build_nodes(self, count):
message = []
it = 0
job_data = {'action': 'BUILD_DEVICE'}
while it < count:
unique_uuid = str(uuid.uuid4())
message.append(dict(task='libra_pool_mgm',
data=job_data,
unique=unique_uuid))
it += 1
gear = GearmanWork()
gear.send_create_message(message)
def _build_vips(self, count):
message = []
it = 0
job_data = {'action': 'BUILD_IP'}
while it < count:
unique_uuid = str(uuid.uuid4())
message.append(dict(task='libra_pool_mgm',
data=job_data,
unique=unique_uuid))
it += 1
gear = GearmanWork()
gear.send_vips_message(message)
def start_probe_sched(self):
seconds = datetime.now().second
if seconds < self.PROBE_SECONDS:
sleeptime = self.PROBE_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.PROBE_SECONDS)
LOG.info('Pool probe check timer sleeping for %d seconds', sleeptime)
self.probe_timer = threading.Timer(sleeptime, self.probe_devices, ())
self.probe_timer.start()
def start_vips_sched(self):
seconds = datetime.now().second
if seconds < self.VIPS_SECONDS:
sleeptime = self.VIPS_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.VIPS_SECONDS)
LOG.info('Pool vips check timer sleeping for %d seconds', sleeptime)
self.vips_timer = threading.Timer(sleeptime, self.probe_vips, ())
self.vips_timer.start()
def start_delete_sched(self):
seconds = datetime.now().second
if seconds < self.DELETE_SECONDS:
sleeptime = self.DELETE_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.DELETE_SECONDS)
LOG.info('Pool delete check timer sleeping for %d seconds', sleeptime)
self.delete_timer = threading.Timer(sleeptime, self.delete_devices, ())
self.delete_timer.start()
class GearmanWork(object):
def __init__(self):
server_list = []
for server in cfg.CONF['gearman']['servers']:
host, port = server.split(':')
server_list.append({'host': host,
'port': int(port),
'keyfile': cfg.CONF['gearman']['ssl_key'],
'certfile': cfg.CONF['gearman']['ssl_cert'],
'ca_certs': cfg.CONF['gearman']['ssl_ca'],
'keepalive': cfg.CONF['gearman']['keepalive'],
'keepcnt': cfg.CONF['gearman']['keepcnt'],
'keepidle': cfg.CONF['gearman']['keepidle'],
'keepintvl': cfg.CONF['gearman']['keepintvl']
})
self.gearman_client = JSONGearmanClient(server_list)
def send_delete_message(self, message):
LOG.info("Sending %d gearman messages", len(message))
job_status = self.gearman_client.submit_multiple_jobs(
message, background=False, wait_until_complete=True,
max_retries=10, poll_timeout=30.0
)
delete_count = 0
for status in job_status:
if status.state == JOB_UNKNOWN:
LOG.error('Gearman Job server fail')
continue
if status.timed_out:
LOG.error('Gearman timeout whilst deleting device')
continue
if status.result['response'] == 'FAIL':
LOG.error(
'Pool manager failed to delete a device, removing from DB'
)
delete_count += 1
with db_session() as session:
session.query(Device).\
filter(Device.name == status.result['name']).delete()
session.commit()
LOG.info('%d freed devices delete from pool', delete_count)
def send_vips_message(self, message):
# TODO: make this gearman part more async, not wait for all builds
LOG.info("Sending %d gearman messages", len(message))
job_status = self.gearman_client.submit_multiple_jobs(
message, background=False, wait_until_complete=True,
max_retries=10, poll_timeout=3600.0
)
built_count = 0
for status in job_status:
if status.state == JOB_UNKNOWN:
LOG.error('Gearman Job server fail')
continue
if status.timed_out:
LOG.error('Gearman timeout whilst building vip')
continue
if status.result['response'] == 'FAIL':
LOG.error('Pool manager failed to build a vip')
continue
built_count += 1
try:
self._add_vip(status.result)
except:
LOG.exception(
'Could not add vip to DB, node data: {0}'
.format(status.result)
)
LOG.info(
'{vips} vips built and added to pool'.format(vips=built_count)
)
def send_create_message(self, message):
# TODO: make this gearman part more async, not wait for all builds
LOG.info("Sending {0} gearman messages".format(len(message)))
job_status = self.gearman_client.submit_multiple_jobs(
message, background=False, wait_until_complete=True,
max_retries=10, poll_timeout=3600.0
)
built_count = 0
for status in job_status:
if status.state == JOB_UNKNOWN:
LOG.error('Gearman Job server fail')
continue
if status.timed_out:
LOG.error('Gearman timeout whilst building device')
continue
if status.result['response'] == 'FAIL':
LOG.error('Pool manager failed to build a device')
if 'name' in status.result:
self._add_bad_node(status.result)
continue
built_count += 1
try:
self._add_node(status.result)
except:
LOG.exception(
'Could not add node to DB, node data: {0}'
.format(status.result)
)
LOG.info(
'{nodes} devices built and added to pool'.format(nodes=built_count)
)
def _add_vip(self, data):
LOG.info('Adding vip {0} to DB'.format(data['ip']))
vip = Vip()
vip.ip = int(ipaddress.IPv4Address(unicode(data['ip'])))
with db_session() as session:
session.add(vip)
counter = session.query(Counters).\
filter(Counters.name == 'vips_built').first()
counter.value += 1
session.commit()
def _add_node(self, data):
LOG.info('Adding device {0} to DB'.format(data['name']))
device = Device()
device.name = data['name']
device.publicIpAddr = data['addr']
# TODO: kill this field, make things use publicIpAddr instead
device.floatingIpAddr = data['addr']
device.az = data['az']
device.type = data['type']
device.pingCount = 0
device.status = 'OFFLINE'
device.created = None
with db_session() as session:
session.add(device)
counter = session.query(Counters).\
filter(Counters.name == 'devices_built').first()
counter.value += 1
session.commit()
def _add_bad_node(self, data):
LOG.info(
'Adding bad device {0} to DB to be deleted'.format(data['name'])
)
device = Device()
device.name = data['name']
device.publicIpAddr = data['addr']
# TODO: kill this field, make things use publicIpAddr instead
device.floatingIpAddr = data['addr']
device.az = data['az']
device.type = data['type']
device.pingCount = 0
device.status = 'DELETED'
device.created = None
with db_session() as session:
session.add(device)
counter = session.query(Counters).\
filter(Counters.name == 'devices_bad_built').first()
counter.value += 1
session.commit()

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,74 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from datetime import datetime, timedelta
from oslo.config import cfg
from libra.common.api.lbaas import LoadBalancer, db_session, Counters
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class ExpungeScheduler(object):
def __init__(self):
self.expunge_timer = None
self.expire_days = cfg.CONF['admin_api']['expire_days']
if not self.expire_days:
LOG.info('Expunge not configured, disabled')
return
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.run_expunge()
def shutdown(self):
if self.expunge_timer:
self.expunge_timer.cancel()
def run_expunge(self):
day = datetime.now().day
if self.server_id != day % self.number_of_servers:
LOG.info('Not our turn to run expunge check, sleeping')
self.expunge_timer = threading.Timer(
24 * 60 * 60, self.run_expunge, ()
)
with db_session() as session:
try:
exp = datetime.now() - timedelta(
days=int(self.expire_days)
)
exp_time = exp.strftime('%Y-%m-%d %H:%M:%S')
LOG.info(
'Expunging deleted loadbalancers older than {0}'
.format(exp_time)
)
count = session.query(
LoadBalancer.status
).filter(LoadBalancer.updated < exp_time).\
filter(LoadBalancer.status == 'DELETED').delete()
counter = session.query(Counters).\
filter(Counters.name == 'loadbalancers_expunged').first()
counter.value += count
session.commit()
LOG.info(
'{0} deleted load balancers expunged'.format(count)
)
except:
LOG.exception('Exception occurred during expunge')
LOG.info('Expunge thread sleeping for 24 hours')
self.expunge_timer = threading.Timer(
24 * 60 * 60, self.run_expunge, ())

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,99 +0,0 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
from libra.common.api.lbaas import loadbalancers_devices, Vip, Counters
from libra.common.api.lbaas import Device, LoadBalancer, db_session
from libra.common.api.gearman_client import submit_job, submit_vip_job
from libra.openstack.common import log
LOG = log.getLogger(__name__)
def rebuild_device(device_id):
new_device_id = None
new_device_name = None
with db_session() as session:
new_device = session.query(Device).\
filter(~Device.id.in_(
session.query(loadbalancers_devices.c.device)
)).\
filter(Device.status == "OFFLINE").\
filter(Device.pingCount == 0).\
with_lockmode('update').\
first()
if new_device is None:
session.rollback()
LOG.error(
'No spare devices when trying to rebuild device {0}'
.format(device_id)
)
return (
500,
dict(
faultcode="Server",
faultstring='No spare devices when trying to rebuild '
'device {0}'.format(device_id)
)
)
new_device_id = new_device.id
new_device_name = new_device.name
LOG.info(
"Moving device {0} to device {1}"
.format(device_id, new_device_id)
)
lbs = session.query(LoadBalancer).\
join(LoadBalancer.devices).\
filter(Device.id == device_id).all()
for lb in lbs:
lb.devices = [new_device]
lb.status = "ERROR(REBUILDING)"
new_device.status = 'BUILDING'
lbid = lbs[0].id
session.commit()
submit_job(
'UPDATE', new_device_name, new_device_id, lbid
)
with db_session() as session:
new_device = session.query(Device).\
filter(Device.id == new_device_id).first()
vip = session.query(Vip).filter(Vip.device == device_id).first()
if vip:
vip.device = new_device_id
device = session.query(Device).\
filter(Device.id == device_id).first()
device.status = 'DELETED'
lbs = session.query(LoadBalancer).\
join(LoadBalancer.devices).\
filter(Device.id == new_device_id).all()
for lb in lbs:
lb.errmsg = "Load Balancer rebuild on new device"
if vip:
LOG.info(
"Moving IP {0} and marking device {1} for deletion"
.format(str(ipaddress.IPv4Address(vip.ip)), device_id)
)
submit_vip_job(
'ASSIGN', new_device_name, vip.id
)
new_device.status = 'ONLINE'
counter = session.query(Counters).\
filter(Counters.name == 'loadbalancers_rebuild').first()
counter.value += 1
session.commit()
return (
200,
dict(oldId=device_id, newId=new_device_id)
)

View File

@ -1,27 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def init_model():
"""
This is a stub method which is called at application startup time.
If you need to bind to a parse database configuration, set up tables or
ORM classes, or perform any database initialization, this is the
recommended place to do it.
For more information working with databases, and some common recipes,
see http://pecan.readthedocs.org/en/latest/databases.html
"""
pass

View File

@ -1,70 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the 'License'); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Class Responses
responder objects for framework.
"""
class Responses(object):
"""404 - not found"""
_default = {'status': '404', 'message': 'Object not Found'}
"""not found """
not_found = {'message': 'Object not Found'}
"""service_unavailable"""
service_unavailable = {'message': 'Service Unavailable'}
versions = {
"versions": [
{
"id": "v1",
"updated": "2014-01-13T16:55:25Z",
"status": "DEPRECATED"
},
{
"id": "v2.0",
"updated": "2014-01-13T16:55:25Z",
"status": "CURRENT"
}
]
}
versions_v1 = {
"version": {
"id": "v1",
"updated": "2014-01-13T16:55:25Z",
"status": "DEPRECATED",
"media-types": [
{
"base": "application/json"
}
]
}
}
versions_v2_0 = {
"version": {
"id": "v2",
"updated": "2014-01-13T16:55:25Z",
"status": "CURRENT",
"media-types": [
{
"base": "application/json"
}
]
}
}

View File

@ -1,49 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from wsme import types as wtypes
from wsme import wsattr
from wsme.types import Base, Enum
class LB(Base):
id = wsattr(int, mandatory=True)
tenantid = wsattr(wtypes.text, mandatory=True)
class DevicePost(Base):
name = wsattr(wtypes.text, mandatory=True)
publicIpAddr = wsattr(wtypes.text, mandatory=True)
floatingIpAddr = wsattr(wtypes.text, mandatory=True)
az = wsattr(int, mandatory=True)
type = wsattr(wtypes.text, mandatory=True)
class DeviceResp(Base):
id = int
name = wtypes.text
floatingIpAddr = wtypes.text
publicIpAddr = wtypes.text
az = int
type = wtypes.text
created = wtypes.text
updated = wtypes.text
status = wtypes.text
loadBalancers = wsattr(['LB'])
class DevicePut(Base):
status = Enum(wtypes.text, 'ONLINE', 'ERROR')
statusDescription = wsattr(wtypes.text, mandatory=True)

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,192 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import datetime
from oslo.config import cfg
from libra.common.api.lbaas import Billing, db_session
from libra.common.api.mnb import update_mnb, test_mnb_connection
from libra.openstack.common import timeutils
from libra.openstack.common import log as logging
from sqlalchemy.sql import func
LOG = logging.getLogger(__name__)
class BillingStats(object):
EXISTS_SECONDS = cfg.CONF['admin_api'].exists_timer_seconds
USAGE_SECONDS = cfg.CONF['admin_api'].usage_timer_seconds
def __init__(self, drivers):
self.drivers = drivers
self.usage_timer = None
self.exists_timer = None
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.exists_freq = cfg.CONF['admin_api'].exists_freq
self.usage_freq = cfg.CONF['admin_api'].usage_freq
self.start_usage_sched()
self.start_exists_sched()
def shutdown(self):
if self.usage_timer:
self.usage_timer.cancel()
if self.exists_timer:
self.exists_timer.cancel()
def update_usage(self):
# Work out if it is our turn to run
minute = datetime.datetime.now().minute
if self.server_id != minute % self.number_of_servers:
self.start_usage_sched()
return
# Send periodic usage notifications
try:
self._exec_usage()
except Exception:
LOG.exception('Uncaught exception during billing usage update')
# Need to restart timer after every billing cycle
self.start_usage_sched()
def update_exists(self):
# Work out if it is our turn to run
minute = datetime.datetime.now().minute
if self.server_id != minute % self.number_of_servers:
self.start_exists_sched()
return
# Send periodic exists notifications
try:
self._exec_exists()
except Exception:
LOG.exception('Uncaught exception during billing exists update')
# Need to restart timer after every billing cycle
self.start_exists_sched()
def _exec_exists(self):
with db_session() as session:
# Check if it's time to send exists notifications
delta = datetime.timedelta(minutes=self.exists_freq)
exp = timeutils.utcnow() - delta
exp_time = exp.strftime('%Y-%m-%d %H:%M:%S')
updated = session.query(
Billing.last_update
).filter(Billing.name == "exists").\
filter(Billing.last_update > exp_time).\
first()
if updated is not None:
# Not time yet
LOG.info('Not time to send exists notifications yet {0}'.
format(exp_time))
session.rollback()
return
# Check the connection before sending the notifications
if not test_mnb_connection():
# Abort the exists notifications
LOG.info("Aborting exists notifications. Could not connect")
session.rollback()
return
# Update the exists timestamp now
session.query(Billing).\
filter(Billing.name == "exists").\
update({"last_update": func.now()},
synchronize_session='fetch')
session.commit()
# Send the notifications
update_mnb('lbaas.instance.exists', None, None)
def _exec_usage(self):
with db_session() as session:
# Next check if it's time to send bandwidth usage notifications
delta = datetime.timedelta(minutes=self.usage_freq)
exp = timeutils.utcnow() - delta
start, = session.query(
Billing.last_update
).filter(Billing.name == "usage").\
first()
if start and start > exp:
# Not time yet
LOG.info('Not time to send usage statistics yet {0}'.
format(exp))
session.rollback()
return
# Check the connection before sending the notifications
if not test_mnb_connection():
# Abort the exists notifications
LOG.info("Aborting usage notifications. Could not connect")
session.rollback()
return
# Calculate the stopping point by rounding backward to the nearest
# N minutes. i.e. if N = 60, this will round us back to HH:00:00,
# or if N = 15, it will round us back to HH:15:00, HH:30:00,
# HH:45:00, or HH:00:00, whichever is closest.
N = cfg.CONF['admin_api'].usage_freq
now = timeutils.utcnow()
stop = now - datetime.timedelta(minutes=now.minute % N,
seconds=now.second,
microseconds=now.microsecond)
# Release the lock
session.query(Billing).\
filter(Billing.name == "usage").\
update({"last_update": stop},
synchronize_session='fetch')
session.commit()
# Send the usage notifications. Pass the timestamps to save
# queries.
update_mnb('lbaas.bandwidth.usage', start, stop)
def start_usage_sched(self):
# Always try to hit the expected second mark for usage
seconds = datetime.datetime.now().second
if seconds < self.USAGE_SECONDS:
sleeptime = self.USAGE_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.USAGE_SECONDS)
LOG.info('LB usage timer sleeping for {secs} seconds'
.format(secs=sleeptime))
self.usage_timer =\
threading.Timer(sleeptime, self.update_usage, ())
self.usage_timer.start()
def start_exists_sched(self):
# Always try to hit the expected second mark for exists
seconds = datetime.datetime.now().second
if seconds < self.EXISTS_SECONDS:
sleeptime = self.EXISTS_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.EXISTS_SECONDS)
LOG.info('LB exists timer sleeping for {secs} seconds'
.format(secs=sleeptime))
self.exists_timer =\
threading.Timer(sleeptime, self.update_exists, ())
self.exists_timer.start()

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,29 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
known_drivers = {
'dummy': 'libra.admin_api.stats.drivers.dummy.driver.DummyDriver',
'datadog': 'libra.admin_api.stats.drivers.datadog.driver.DatadogDriver',
'database': 'libra.admin_api.stats.drivers.database.driver.DbDriver'
}
class AlertDriver(object):
def send_alert(self, message, device_id, device_ip, device_name, device_tenant):
raise NotImplementedError()
def send_delete(self, message, device_id, device_ip, device_name):
raise NotImplementedError()
def send_node_change(self, message, lbid, degraded):
raise NotImplementedError()

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,76 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
from libra.admin_api.stats.drivers.base import AlertDriver
from libra.common.api.lbaas import Device, LoadBalancer, db_session
from libra.common.api.lbaas import loadbalancers_devices
from libra.admin_api.library.rebuild import rebuild_device
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class DbDriver(AlertDriver):
def send_alert(self, message, device_id, device_ip, device_name, device_tenant):
with db_session() as session:
device = session.query(Device).\
filter(Device.id == device_id).first()
device.status = "ERROR"
errmsg = "Load Balancer has failed, attempting rebuild"
lbs = session.query(
loadbalancers_devices.c.loadbalancer).\
filter(loadbalancers_devices.c.device == device_id).\
all()
# TODO: make it so that we don't get stuck in LB ERROR here when
# a rebuild fails due to something like a bad device. Maybe have
# an attempted rebuild count?
for lb in lbs:
session.query(LoadBalancer).\
filter(LoadBalancer.id == lb[0]).\
update({"status": "ERROR", "errmsg": errmsg},
synchronize_session='fetch')
session.flush()
session.commit()
self._rebuild_device(device_id)
def send_delete(self, message, device_id, device_ip, device_name):
with db_session() as session:
session.query(Device).\
filter(Device.id == device_id).\
update({"status": "DELETED"}, synchronize_session='fetch')
session.commit()
def send_node_change(self, message, lbid, degraded):
with db_session() as session:
lb = session.query(LoadBalancer).\
filter(LoadBalancer.id == lbid).first()
if lb.status == 'ERROR':
lb.errmsg = "Load balancer has failed"
elif lb.status == 'ACTIVE' and degraded:
lb.errmsg = "A node on the load balancer has failed"
lb.status = 'DEGRADED'
elif lb.status == 'DEGRADED' and not degraded:
lb.errmsg = "A node on the load balancer has recovered"
lb.status = 'ACTIVE'
session.commit()
def _rebuild_device(self, device_id):
rebuild_device(device_id)

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,55 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
from dogapi import dog_http_api as api
from oslo.config import cfg
from libra.admin_api.stats.drivers.base import AlertDriver
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class DatadogDriver(AlertDriver):
def __init__(self):
super(DatadogDriver, self).__init__()
api.api_key = cfg.CONF['admin_api']['datadog_api_key']
api.application_key = cfg.CONF['admin_api']['datadog_app_key']
self.dd_env = cfg.CONF['admin_api']['datadog_env']
self.dd_tags = cfg.CONF['admin_api']['datadog_tags']
self.dd_message_tail = cfg.CONF['admin_api']['datadog_message_tail']
def send_alert(self, message, device_id, device_ip, device_name, device_tenant):
title = 'Load balancer failure in {0}: {1} {2} {3} {4}'.format(
self.dd_env, device_id, device_ip, device_name, device_tenant)
text = 'Load balancer failed with message {0} {1}'.format(
message, self.dd_message_tail
)
tags = self.dd_tags.split()
resp = api.event_with_response(
title, text, tags=tags, alert_type='error'
)
LOG.info('Datadog alert response: {0}'.format(resp))
def send_delete(self, message, device_id, device_ip, device_name):
title = 'Load balancer unreachable in {0}: {1} {2}'.\
format(self.dd_env, device_ip, device_name)
text = 'Load balancer unreachable with message {0} {1}'.format(
message, self.dd_message_tail
)
tags = self.dd_tags.split()
resp = api.event_with_response(
title, text, tags=tags, alert_type='success'
)
LOG.info('Datadog alert response: {0}'.format(resp))

View File

@ -1,13 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,29 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
from libra.admin_api.stats.drivers.base import AlertDriver
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class DummyDriver(AlertDriver):
def send_alert(self, message, device_id, device_ip, device_name, device_tenant):
LOG.info('Dummy alert of: {0}'.format(message))
def send_delete(self, message, device_id, device_ip, device_name):
LOG.info('Dummy delete of: {0}'.format(message))
def send_node_change(self, message, lbid, degraded):
LOG.info('Dummy node change of: {0}'.format(message))

View File

@ -1,164 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from datetime import datetime
from oslo.config import cfg
from libra.common.api.lbaas import Counters, Device, db_session
from libra.admin_api.stats.stats_gearman import GearJobs
from libra.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OfflineStats(object):
OFFLINE_SECONDS = cfg.CONF['admin_api'].offline_timer_seconds
def __init__(self, drivers):
self.drivers = drivers
self.offline_timer = None
self.ping_limit = cfg.CONF['admin_api']['stats_offline_ping_limit']
self.error_limit = cfg.CONF['admin_api']['stats_device_error_limit']
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.start_offline_sched()
def shutdown(self):
if self.offline_timer:
self.offline_timer.cancel()
def check_offline_lbs(self):
# Work out if it is our turn to run
minute = datetime.now().minute
if self.server_id != minute % self.number_of_servers:
LOG.info('Not our turn to run OFFLINE check, sleeping')
self.start_offline_sched()
return
tested = 0
failed = 0
try:
tested, failed = self._exec_offline_check()
except Exception:
LOG.exception('Uncaught exception during OFFLINE check')
# Need to restart timer after every ping cycle
LOG.info(
'{tested} OFFLINE loadbalancers tested, {failed} failed'
.format(tested=tested, failed=failed)
)
self.start_offline_sched()
def _exec_offline_check(self):
tested = 0
failed = 0
node_list = []
LOG.info('Running OFFLINE check')
with db_session() as session:
# Join to ensure device is in-use
devices = session.query(
Device.id, Device.name
).filter(Device.status == 'OFFLINE').all()
tested = len(devices)
if tested == 0:
LOG.info('No OFFLINE Load Balancers to check')
return (0, 0)
for lb in devices:
node_list.append(lb.name)
gearman = GearJobs()
failed_lbs = gearman.offline_check(node_list)
failed = len(failed_lbs)
if failed > self.error_limit:
LOG.error(
'Too many simultaneous Load Balancer Failures.'
' Aborting deletion attempt'
)
return tested, failed
if failed > 0:
self._send_delete(failed_lbs)
# Clear the ping counts for all devices not in
# the failed list
succeeded = list(set(node_list) - set(failed_lbs))
session.query(Device.name, Device.pingCount).\
filter(Device.name.in_(succeeded)).\
update({"pingCount": 0}, synchronize_session='fetch')
session.commit()
return tested, failed
def _send_delete(self, failed_nodes):
with db_session() as session:
for lb in failed_nodes:
# Get the current ping count
data = session.query(
Device.id, Device.pingCount, Device.name, Device.floatingIpAddr).\
filter(Device.name == lb).first()
if not data:
LOG.error(
'Device {0} no longer exists'.format(data.id)
)
continue
if data.pingCount < self.ping_limit:
data.pingCount += 1
LOG.error(
'Offline Device {0} has failed {1} ping attempts'.
format(lb, data.pingCount)
)
session.query(Device).\
filter(Device.name == lb).\
update({"pingCount": data.pingCount},
synchronize_session='fetch')
session.flush()
continue
message = (
'Load balancer {0} unreachable and marked for deletion'.
format(lb)
)
for driver in self.drivers:
instance = driver()
LOG.info(
'Sending delete request for {0} to {1}'.format(
lb, instance.__class__.__name__
)
)
instance.send_delete(message, data.id, data.floatingIpAddr, data.name)
counter = session.query(Counters).\
filter(Counters.name == 'devices_offline_failed').first()
counter.value += 1
session.commit()
def start_offline_sched(self):
# Always try to hit the expected second mark for offline checks
seconds = datetime.now().second
if seconds < self.OFFLINE_SECONDS:
sleeptime = self.OFFLINE_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.OFFLINE_SECONDS)
LOG.info('LB offline check timer sleeping for {secs} seconds'
.format(secs=sleeptime))
self.offline_timer = threading.Timer(
sleeptime, self.check_offline_lbs, ()
)
self.offline_timer.start()

View File

@ -1,246 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from datetime import datetime
from oslo.config import cfg
from libra.common.api.lbaas import LoadBalancer, Device, Node, db_session
from libra.openstack.common import log as logging
from libra.admin_api.stats.stats_gearman import GearJobs
LOG = logging.getLogger(__name__)
class PingStats(object):
PING_SECONDS = cfg.CONF['admin_api'].ping_timer_seconds
def __init__(self, drivers):
self.drivers = drivers
self.ping_timer = None
self.error_limit = cfg.CONF['admin_api']['stats_device_error_limit']
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.stats_driver = cfg.CONF['admin_api']['stats_driver']
LOG.info("Selected stats drivers: %s", self.stats_driver)
self.start_ping_sched()
def shutdown(self):
if self.ping_timer:
self.ping_timer.cancel()
def ping_lbs(self):
# Work out if it is our turn to run
minute = datetime.now().minute
if self.server_id != minute % self.number_of_servers:
LOG.info('Not our turn to run ping check, sleeping')
self.start_ping_sched()
return
pings = 0
failed = 0
try:
pings, failed = self._exec_ping()
except Exception:
LOG.exception('Uncaught exception during LB ping')
# Need to restart timer after every ping cycle
LOG.info('{pings} loadbalancers pinged, {failed} failed'
.format(pings=pings, failed=failed))
self.start_ping_sched()
def _exec_ping(self):
pings = 0
failed = 0
node_list = []
LOG.info('Running ping check')
with db_session() as session:
devices = session.query(
Device.id, Device.name
).filter(Device.status == 'ONLINE').all()
pings = len(devices)
if pings == 0:
LOG.info('No LBs to ping')
return (0, 0)
for lb in devices:
node_list.append(lb.name)
gearman = GearJobs()
failed_lbs, node_status = gearman.send_pings(node_list)
failed = len(failed_lbs)
if failed > self.error_limit:
LOG.error(
'Too many simultaneous Load Balancer Failures.'
' Aborting recovery attempt'
)
return pings, failed
if failed > 0:
self._send_fails(failed_lbs)
# Process node status after lb status
self._update_nodes(node_status)
session.commit()
return pings, failed
def _send_fails(self, failed_lbs):
with db_session() as session:
for lb in failed_lbs:
data = self._get_lb(lb, session)
if not data:
LOG.error(
'Device {0} has no Loadbalancer attached'.
format(lb)
)
continue
message = (
'Load balancer failed\n'
'ID: {0}\n'
'IP: {1}\n'
'name: {2}\n'
'tenant: {3}\n'.format(
data.id, data.floatingIpAddr, data.name,
data.tenantid
)
)
for driver in self.drivers:
instance = driver()
LOG.info(
'Sending failure of {0} to {1}'.format(
lb, instance.__class__.__name__
)
)
instance.send_alert(message, data.id, data.floatingIpAddr, data.name, data.tenantid)
session.commit()
def _get_lb(self, lb, session):
lb = session.query(
LoadBalancer.tenantid, Device.floatingIpAddr, Device.id, Device.name
).join(LoadBalancer.devices).\
filter(Device.name == lb).first()
return lb
def _update_nodes(self, node_status):
lbids = []
degraded = []
failed_nodes = dict()
repaired_nodes = dict()
errormsg = dict()
with db_session() as session:
for lb, nodes in node_status.iteritems():
data = self._get_lb(lb, session)
if not data:
LOG.error(
'Device {0} has no Loadbalancer attached'.
format(lb)
)
continue
# Iterate the list of nodes returned from the worker
# and track any status changes
for node in nodes:
# Get the last known status from the nodes table
node_data = session.query(Node).\
filter(Node.id == int(node['id'])).first()
if node_data is None:
LOG.error(
'DB error getting node {0} to set status {1}'
.format(node['id'], node['status'])
)
continue
# Note all degraded LBs
if (node['status'] == 'DOWN' and
node_data.lbid not in degraded):
degraded.append(node_data.lbid)
new_status = None
# Compare node status to the workers status
if (node['status'] == 'DOWN' and
node_data.status == 'ONLINE'):
new_status = 'ERROR'
if node_data.lbid not in failed_nodes:
failed_nodes[node_data.lbid] = []
failed_nodes[node_data.lbid].append(node['id'])
elif (node['status'] == 'UP' and
node_data.status == 'ERROR'):
new_status = 'ONLINE'
if node_data.lbid not in repaired_nodes:
repaired_nodes[node_data.lbid] = []
repaired_nodes[node_data.lbid].append(node['id'])
else:
# No change
continue
# Note all LBs with node status changes
if node_data.lbid not in lbids:
lbids.append(node_data.lbid)
errormsg[node_data.lbid] =\
'Node status change ID:'\
' {0}, IP: {1}, tenant: {2}'.\
format(
node_data.lbid,
data.floatingIpAddr,
data.tenantid)
# Change the node status in the node table
session.query(Node).\
filter(Node.id == int(node['id'])).\
update({"status": new_status},
synchronize_session='fetch')
session.flush()
session.commit()
# Generate a status message per LB for the alert.
for lbid in lbids:
message = errormsg[lbid]
if lbid in failed_nodes:
message += ' failed:'
message += ','.join(str(x) for x in failed_nodes[lbid])
message += '\n'
if lbid in repaired_nodes:
message += ' repaired: '
message += ','.join(str(x) for x in repaired_nodes[lbid])
# Send the LB node change alert
if lbid in degraded:
is_degraded = True
else:
is_degraded = False
for driver in self.drivers:
instance = driver()
LOG.info(
'Sending change of node status on LB {0} to {1}'.format(
lbid, instance.__class__.__name__)
)
try:
instance.send_node_change(message, lbid, is_degraded)
except NotImplementedError:
pass
def start_ping_sched(self):
# Always try to hit the expected second mark for pings
seconds = datetime.now().second
if seconds < self.PING_SECONDS:
sleeptime = self.PING_SECONDS - seconds
else:
sleeptime = 60 - (seconds - self.PING_SECONDS)
LOG.info('LB ping check timer sleeping for %d seconds', sleeptime)
self.ping_timer = threading.Timer(sleeptime, self.ping_lbs, ())
self.ping_timer.start()

View File

@ -1,224 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gearman.constants import JOB_UNKNOWN
from oslo.config import cfg
from libra.common.json_gearman import JSONGearmanClient
from libra.openstack.common import log
LOG = log.getLogger(__name__)
class GearJobs(object):
def __init__(self):
self.poll_timeout = cfg.CONF['admin_api']['stats_poll_timeout']
self.poll_retry = cfg.CONF['admin_api']['stats_poll_timeout_retry']
server_list = []
for server in cfg.CONF['gearman']['servers']:
host, port = server.split(':')
server_list.append({'host': host,
'port': int(port),
'keyfile': cfg.CONF['gearman']['ssl_key'],
'certfile': cfg.CONF['gearman']['ssl_cert'],
'ca_certs': cfg.CONF['gearman']['ssl_ca'],
'keepalive': cfg.CONF['gearman']['keepalive'],
'keepcnt': cfg.CONF['gearman']['keepcnt'],
'keepidle': cfg.CONF['gearman']['keepidle'],
'keepintvl': cfg.CONF['gearman']['keepintvl']
})
self.gm_client = JSONGearmanClient(server_list)
def send_pings(self, node_list):
# TODO: lots of duplicated code that needs cleanup
list_of_jobs = []
failed_list = []
node_status = dict()
retry_list = []
# The message name is STATS for historical reasons. Real
# data statistics are gathered with METRICS messages.
job_data = {"hpcs_action": "STATS"}
for node in node_list:
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_pings = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_timeout
)
for ping in submitted_pings:
if ping.state == JOB_UNKNOWN:
# TODO: Gearman server failed, ignoring for now
LOG.error('Gearman Job server fail')
continue
if ping.timed_out:
# Ping timeout
retry_list.append(ping.job.task)
continue
if ping.result['hpcs_response'] == 'FAIL':
if (
'status' in ping.result and
ping.result['status'] == 'DELETED'
):
continue
# Error returned by Gearman
failed_list.append(ping.job.task)
continue
else:
if 'nodes' in ping.result:
node_status[ping.job.task] = ping.result['nodes']
list_of_jobs = []
if len(retry_list) > 0:
LOG.info(
"{0} pings timed out, retrying".format(len(retry_list))
)
for node in retry_list:
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_pings = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_retry
)
for ping in submitted_pings:
if ping.state == JOB_UNKNOWN:
# TODO: Gearman server failed, ignoring for now
LOG.error('Gearman Job server fail')
continue
if ping.timed_out:
# Ping timeout
failed_list.append(ping.job.task)
continue
if ping.result['hpcs_response'] == 'FAIL':
if (
'status' in ping.result and
ping.result['status'] == 'DELETED'
):
continue
# Error returned by Gearman
failed_list.append(ping.job.task)
continue
else:
if 'nodes' in ping.result:
node_status[ping.job.task] = ping.result['nodes']
return failed_list, node_status
def offline_check(self, node_list):
list_of_jobs = []
failed_list = []
job_data = {"hpcs_action": "DIAGNOSTICS"}
for node in node_list:
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_pings = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_timeout
)
for ping in submitted_pings:
if ping.state == JOB_UNKNOWN:
LOG.error(
"Gearman Job server failed during OFFLINE check of {0}".
format(ping.job.task)
)
elif ping.timed_out:
failed_list.append(ping.job.task)
elif ping.result['network'] == 'FAIL':
failed_list.append(ping.job.task)
else:
gearman_count = 0
gearman_fail = 0
for gearman_test in ping.result['gearman']:
gearman_count += 1
if gearman_test['status'] == 'FAIL':
gearman_fail += 1
# Need 2/3rds gearman up
max_fail_count = gearman_count / 3
if gearman_fail > max_fail_count:
failed_list.append(ping.job.task)
return failed_list
def get_discover(self, name):
# Used in the v2 devices controller
job_data = {"hpcs_action": "DISCOVER"}
job = self.gm_client.submit_job(
str(name), job_data, background=False, wait_until_complete=True,
poll_timeout=10
)
if job.state == JOB_UNKNOWN:
# Gearman server failed
return None
elif job.timed_out:
# Time out is a fail
return None
elif job.result['hpcs_response'] == 'FAIL':
# Fail response is a fail
return None
return job.result
def get_stats(self, node_list):
# TODO: lots of duplicated code that needs cleanup
list_of_jobs = []
failed_list = []
retry_list = []
results = {}
job_data = {"hpcs_action": "METRICS"}
for node in node_list:
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_stats = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_timeout
)
for stats in submitted_stats:
if stats.state == JOB_UNKNOWN:
# TODO: Gearman server failed, ignoring for now
retry_list.append(stats.job.task)
elif stats.timed_out:
# Timeout
retry_list.append(stats.job.task)
elif stats.result['hpcs_response'] == 'FAIL':
# Error returned by Gearman
failed_list.append(stats.job.task)
else:
# Success
results[stats.job.task] = stats.result
list_of_jobs = []
if len(retry_list) > 0:
LOG.info(
"{0} Statistics gathering timed out, retrying".
format(len(retry_list))
)
for node in retry_list:
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_stats = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_retry
)
for stats in submitted_stats:
if stats.state == JOB_UNKNOWN:
# TODO: Gearman server failed, ignoring for now
LOG.error(
"Gearman Job server failed gathering statistics "
"on {0}".format(stats.job.task)
)
failed_list.append(stats.job.task)
elif stats.timed_out:
# Timeout
failed_list.append(stats.job.task)
elif stats.result['hpcs_response'] == 'FAIL':
# Error returned by Gearman
failed_list.append(stats.job.task)
else:
# Success
results[stats.job.task] = stats.result
return failed_list, results

Some files were not shown because too many files have changed in this diff Show More