Rename Quantum to Neutron

This change renames everything to Neutron while providing backwards
compatible adjustments for Grizzly configuration files.

implements blueprint: remove-use-of-quantum

Change-Id: Ie7d07ba7c89857e13d4ddc8f0e9b68de020a3d19
This commit is contained in:
Mark McClain 2013-07-03 19:39:46 -04:00
parent 57d798231f
commit ddf221b92a
49 changed files with 5852 additions and 102 deletions

3
.gitignore vendored
View File

@ -8,6 +8,9 @@ dist/
doc/build
*.DS_Store
*.pyc
neutron.egg-info/
neutron/vcsversion.py
neutron/versioninfo
quantum.egg-info/
quantum/vcsversion.py
quantum/versioninfo

View File

@ -23,7 +23,8 @@ argument-rgx=[a-z_][a-z0-9_]{1,30}$
# and be lowecased with underscores
method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$
# Module names matching quantum-* are ok (files in bin/)
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(quantum-[a-z0-9_-]+))$
# Don't require docstrings on tests.

View File

@ -1,4 +1,4 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ quantum/tests/unit $LISTOPT $IDOPTION
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ neutron/tests/unit $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,4 +1,4 @@
Quantum Style Commandments
Neutron Style Commandments
==========================
- Step 1: Read http://www.python.org/dev/peps/pep-0008/
@ -35,13 +35,13 @@ Example::
The following imports,
from quantum.api import networks
from quantum import wsgi
from neutron.api import networks
from neutron import wsgi
are considered equivalent for ordering purposes to
import quantum.api.networks
import quantum.wsgi
import neutron.api.networks
import neutron.wsgi
- Organize your imports according to the following template
@ -52,7 +52,7 @@ Example::
\n
{{third-party lib imports in human alphabetical order}}
\n
{{quantum imports in human alphabetical order}}
{{neutron imports in human alphabetical order}}
\n
\n
{{begin your code}}
@ -71,13 +71,13 @@ Example::
import testtools
import webob.exc
import quantum.api.networks
from quantum.api import ports
from quantum.db import models
from quantum.extensions import multiport
from quantum.openstack.common import log as logging
import quantum.manager
from quantum import service
import neutron.api.networks
from neutron.api import ports
from neutron.db import models
from neutron.extensions import multiport
from neutron.openstack.common import log as logging
import neutron.manager
from neutron import service
Docstrings
@ -202,8 +202,8 @@ submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
All unittest classes must ultimately inherit from testtools.TestCase. In the
Quantum test suite, this should be done by inheriting from
quantum.tests.base.BaseTestCase.
Neutron test suite, this should be done by inheriting from
neutron.tests.base.BaseTestCase.
All setUp and tearDown methods must upcall using the super() method.
tearDown methods should be avoided and addCleanup calls should be preferred.

View File

@ -2,11 +2,11 @@ include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include quantum/db/migration/README
include quantum/db/migration/alembic.ini
include quantum/db/migration/alembic_migrations/script.py.mako
include quantum/db/migration/alembic_migrations/versions/README
recursive-include quantum/locale *
include neutron/db/migration/README
include neutron/db/migration/alembic.ini
include neutron/db/migration/alembic_migrations/script.py.mako
include neutron/db/migration/alembic_migrations/versions/README
recursive-include neutron/locale *
exclude .gitignore
exclude .gitreview

View File

@ -1,25 +1,25 @@
# -- Welcome!
You have come across a cloud computing network fabric controller. It has
identified itself as "Quantum." It aims to tame your (cloud) networking!
identified itself as "Neutron." It aims to tame your (cloud) networking!
# -- External Resources:
The homepage for Quantum is: http://launchpad.net/quantum . Use this
The homepage for Neutron is: http://launchpad.net/neutron . Use this
site for asking for help, and filing bugs. Code is available on github at
<http://github.com/openstack/quantum>.
<http://github.com/openstack/neutron>.
The latest and most in-depth documentation on how to use Quantum is
The latest and most in-depth documentation on how to use Neutron is
available at: <http://docs.openstack.org>. This includes:
Quantum Administrator Guide
Neutron Administrator Guide
http://docs.openstack.org/trunk/openstack-network/admin/content/
Quantum API Reference:
Neutron API Reference:
http://docs.openstack.org/api/openstack-network/2.0/content/
The start of some developer documentation is available at:
http://wiki.openstack.org/QuantumDevelopment
http://wiki.openstack.org/NeutronDevelopment
For help using or hacking on Quantum, you can send mail to
For help using or hacking on Neutron, you can send mail to
<mailto:openstack-dev@lists.openstack.org>.

View File

@ -2,22 +2,22 @@
# Show debugging output in log (sets DEBUG log level output)
# debug = true
# The LBaaS agent will resync its state with Quantum to recover from any
# The LBaaS agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# periodic_interval = 10
# OVS based plugins(OVS, Ryu, NEC, NVP, BigSwitch/Floodlight)
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# OVS based plugins(Ryu, NEC, NVP, BigSwitch/Floodlight) that use OVS
# as OpenFlow switch and check port status
# ovs_use_veth = True
# LinuxBridge
# interface_driver = quantum.agent.linux.interface.BridgeInterfaceDriver
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent requires a driver to manage the loadbalancer. HAProxy is the
# opensource version.
#device_driver = quantum.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
#device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).

21
neutron/__init__.py Normal file
View File

@ -0,0 +1,21 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
gettext.install('neutron', unicode=1)

17
neutron/db/__init__.py Normal file
View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.

View File

@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,701 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import validates
from neutron.api.v2 import attributes
from neutron.common import exceptions as q_exc
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import loadbalancer
from neutron.extensions.loadbalancer import LoadBalancerPluginBase
from neutron import manager
from neutron.openstack.common.db import exception
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
class SessionPersistence(model_base.BASEV2):
vip_id = sa.Column(sa.String(36),
sa.ForeignKey("vips.id"),
primary_key=True)
type = sa.Column(sa.Enum("SOURCE_IP",
"HTTP_COOKIE",
"APP_COOKIE",
name="sesssionpersistences_type"),
nullable=False)
cookie_name = sa.Column(sa.String(1024))
class PoolStatistics(model_base.BASEV2):
"""Represents pool statistics."""
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
primary_key=True)
bytes_in = sa.Column(sa.Integer, nullable=False)
bytes_out = sa.Column(sa.Integer, nullable=False)
active_connections = sa.Column(sa.Integer, nullable=False)
total_connections = sa.Column(sa.Integer, nullable=False)
@validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class Vip(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron loadbalancer vip."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
protocol_port = sa.Column(sa.Integer, nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
pool_id = sa.Column(sa.String(36), nullable=False, unique=True)
session_persistence = orm.relationship(SessionPersistence,
uselist=False,
backref="vips",
cascade="all, delete-orphan")
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
connection_limit = sa.Column(sa.Integer)
port = orm.relationship(models_v2.Port)
class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron loadbalancer member."""
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron loadbalancer pool."""
vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id"))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
subnet_id = sa.Column(sa.String(36), nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
lb_method = sa.Column(sa.Enum("ROUND_ROBIN",
"LEAST_CONNECTIONS",
"SOURCE_IP",
name="pools_lb_method"),
nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
stats = orm.relationship(PoolStatistics,
uselist=False,
backref="pools",
cascade="all, delete-orphan")
members = orm.relationship(Member, backref="pools",
cascade="all, delete-orphan")
monitors = orm.relationship("PoolMonitorAssociation", backref="pools",
cascade="all, delete-orphan")
vip = orm.relationship(Vip, backref='pool')
class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron loadbalancer healthmonitor."""
type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmontiors_type"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16))
url_path = sa.Column(sa.String(255))
expected_codes = sa.Column(sa.String(64))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
pools = orm.relationship(
"PoolMonitorAssociation",
backref="healthmonitor",
cascade="all"
)
class PoolMonitorAssociation(model_base.BASEV2):
"""Many-to-many association between pool and healthMonitor classes."""
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id"),
primary_key=True)
monitor_id = sa.Column(sa.String(36),
sa.ForeignKey("healthmonitors.id"),
primary_key=True)
class LoadBalancerPluginDb(LoadBalancerPluginBase,
base_db.CommonDbMixin):
"""Wraps loadbalancer with SQLAlchemy models.
A class that wraps the implementation of the Neutron loadbalancer
plugin database access interface using SQLAlchemy models.
"""
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def update_status(self, context, model, id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, id)
v_db.update({'status': status})
def _get_resource(self, context, model, id):
try:
r = self._get_by_id(context, model, id)
except exc.NoResultFound:
if issubclass(model, Vip):
raise loadbalancer.VipNotFound(vip_id=id)
elif issubclass(model, Pool):
raise loadbalancer.PoolNotFound(pool_id=id)
elif issubclass(model, Member):
raise loadbalancer.MemberNotFound(member_id=id)
elif issubclass(model, HealthMonitor):
raise loadbalancer.HealthMonitorNotFound(monitor_id=id)
else:
raise
return r
def assert_modification_allowed(self, obj):
status = getattr(obj, 'status', None)
if status == constants.PENDING_DELETE:
raise loadbalancer.StateInvalid(id=id, state=status)
########################################################
# VIP DB access
def _make_vip_dict(self, vip, fields=None):
fixed_ip = (vip.port.fixed_ips or [{}])[0]
res = {'id': vip['id'],
'tenant_id': vip['tenant_id'],
'name': vip['name'],
'description': vip['description'],
'subnet_id': fixed_ip.get('subnet_id'),
'address': fixed_ip.get('ip_address'),
'port_id': vip['port_id'],
'protocol_port': vip['protocol_port'],
'protocol': vip['protocol'],
'pool_id': vip['pool_id'],
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up'],
'status': vip['status']}
if vip['session_persistence']:
s_p = {
'type': vip['session_persistence']['type']
}
if vip['session_persistence']['type'] == 'APP_COOKIE':
s_p['cookie_name'] = vip['session_persistence']['cookie_name']
res['session_persistence'] = s_p
return self._fields(res, fields)
def _check_session_persistence_info(self, info):
"""Performs sanity check on session persistence info.
:param info: Session persistence info
"""
if info['type'] == 'APP_COOKIE':
if not info.get('cookie_name'):
raise ValueError(_("'cookie_name' should be specified for this"
" type of session persistence."))
else:
if 'cookie_name' in info:
raise ValueError(_("'cookie_name' is not allowed for this type"
" of session persistence"))
def _create_session_persistence_db(self, session_info, vip_id):
self._check_session_persistence_info(session_info)
sesspersist_db = SessionPersistence(
type=session_info['type'],
cookie_name=session_info.get('cookie_name'),
vip_id=vip_id)
return sesspersist_db
def _update_vip_session_persistence(self, context, vip_id, info):
self._check_session_persistence_info(info)
vip = self._get_resource(context, Vip, vip_id)
with context.session.begin(subtransactions=True):
# Update sessionPersistence table
sess_qry = context.session.query(SessionPersistence)
sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first()
# Insert a None cookie_info if it is not present to overwrite an
# an existing value in the database.
if 'cookie_name' not in info:
info['cookie_name'] = None
if sesspersist_db:
sesspersist_db.update(info)
else:
sesspersist_db = SessionPersistence(
type=info['type'],
cookie_name=info['cookie_name'],
vip_id=vip_id)
context.session.add(sesspersist_db)
# Update vip table
vip.session_persistence = sesspersist_db
context.session.add(vip)
def _delete_session_persistence(self, context, vip_id):
with context.session.begin(subtransactions=True):
sess_qry = context.session.query(SessionPersistence)
sess_qry.filter_by(vip_id=vip_id).delete()
def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address):
# resolve subnet and create port
subnet = self._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': vip_db.tenant_id,
'name': 'vip-' + vip_db.id,
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': '',
'fixed_ips': [fixed_ip]
}
port = self._core_plugin.create_port(context, {'port': port_data})
vip_db.port_id = port['id']
def create_vip(self, context, vip):
v = vip['vip']
tenant_id = self._get_tenant_id_for_create(context, v)
with context.session.begin(subtransactions=True):
if v['pool_id']:
pool = self._get_resource(context, Pool, v['pool_id'])
# validate that the pool has same tenant
if pool['tenant_id'] != tenant_id:
raise q_exc.NotAuthorized()
# validate that the pool has same protocol
if pool['protocol'] != v['protocol']:
raise loadbalancer.ProtocolMismatch(
vip_proto=v['protocol'],
pool_proto=pool['protocol'])
else:
pool = None
vip_db = Vip(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=v['name'],
description=v['description'],
port_id=None,
protocol_port=v['protocol_port'],
protocol=v['protocol'],
pool_id=v['pool_id'],
connection_limit=v['connection_limit'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
session_info = v['session_persistence']
if session_info:
s_p = self._create_session_persistence_db(
session_info,
vip_db['id'])
vip_db.session_persistence = s_p
try:
context.session.add(vip_db)
context.session.flush()
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
# create a port to reserve address for IPAM
self._create_port_for_vip(
context,
vip_db,
v['subnet_id'],
v.get('address')
)
if pool:
pool['vip_id'] = vip_db['id']
return self._make_vip_dict(vip_db)
def update_vip(self, context, id, vip):
v = vip['vip']
sess_persist = v.pop('session_persistence', None)
with context.session.begin(subtransactions=True):
vip_db = self._get_resource(context, Vip, id)
self.assert_modification_allowed(vip_db)
if sess_persist:
self._update_vip_session_persistence(context, id, sess_persist)
else:
self._delete_session_persistence(context, id)
if v:
try:
# in case new pool already has a vip
# update will raise integrity error at first query
old_pool_id = vip_db['pool_id']
vip_db.update(v)
# If the pool_id is changed, we need to update
# the associated pools
if 'pool_id' in v:
new_pool = self._get_resource(context, Pool,
v['pool_id'])
self.assert_modification_allowed(new_pool)
# check that the pool matches the tenant_id
if new_pool['tenant_id'] != vip_db['tenant_id']:
raise q_exc.NotAuthorized()
# validate that the pool has same protocol
if new_pool['protocol'] != vip_db['protocol']:
raise loadbalancer.ProtocolMismatch(
vip_proto=vip_db['protocol'],
pool_proto=new_pool['protocol'])
if old_pool_id:
old_pool = self._get_resource(
context,
Pool,
old_pool_id
)
old_pool['vip_id'] = None
new_pool['vip_id'] = vip_db['id']
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
return self._make_vip_dict(vip_db)
def delete_vip(self, context, id):
with context.session.begin(subtransactions=True):
vip = self._get_resource(context, Vip, id)
qry = context.session.query(Pool)
for pool in qry.filter_by(vip_id=id):
pool.update({"vip_id": None})
context.session.delete(vip)
if vip.port: # this is a Neutron port
self._core_plugin.delete_port(context, vip.port.id)
def get_vip(self, context, id, fields=None):
vip = self._get_resource(context, Vip, id)
return self._make_vip_dict(vip, fields)
def get_vips(self, context, filters=None, fields=None):
return self._get_collection(context, Vip,
self._make_vip_dict,
filters=filters, fields=fields)
########################################################
# Pool DB access
def _make_pool_dict(self, pool, fields=None):
res = {'id': pool['id'],
'tenant_id': pool['tenant_id'],
'name': pool['name'],
'description': pool['description'],
'subnet_id': pool['subnet_id'],
'protocol': pool['protocol'],
'vip_id': pool['vip_id'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up'],
'status': pool['status']}
# Get the associated members
res['members'] = [member['id'] for member in pool['members']]
# Get the associated health_monitors
res['health_monitors'] = [
monitor['monitor_id'] for monitor in pool['monitors']]
return self._fields(res, fields)
def _update_pool_stats(self, context, pool_id, data=None):
"""Update a pool with new stats structure."""
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, pool_id)
self.assert_modification_allowed(pool_db)
pool_db.stats = self._create_pool_stats(context, pool_id, data)
def _create_pool_stats(self, context, pool_id, data=None):
# This is internal method to add pool statistics. It won't
# be exposed to API
if not data:
data = {}
stats_db = PoolStatistics(
pool_id=pool_id,
bytes_in=data.get("bytes_in", 0),
bytes_out=data.get("bytes_out", 0),
active_connections=data.get("active_connections", 0),
total_connections=data.get("total_connections", 0)
)
return stats_db
def _delete_pool_stats(self, context, pool_id):
# This is internal method to delete pool statistics. It won't
# be exposed to API
with context.session.begin(subtransactions=True):
stats_qry = context.session.query(PoolStatistics)
try:
stats = stats_qry.filter_by(pool_id=pool_id).one()
except exc.NoResultFound:
raise loadbalancer.PoolStatsNotFound(pool_id=pool_id)
context.session.delete(stats)
def create_pool(self, context, pool):
v = pool['pool']
tenant_id = self._get_tenant_id_for_create(context, v)
with context.session.begin(subtransactions=True):
pool_db = Pool(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=v['name'],
description=v['description'],
subnet_id=v['subnet_id'],
protocol=v['protocol'],
lb_method=v['lb_method'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
pool_db.stats = self._create_pool_stats(context, pool_db['id'])
context.session.add(pool_db)
pool_db = self._get_resource(context, Pool, pool_db['id'])
return self._make_pool_dict(pool_db)
def update_pool(self, context, id, pool):
p = pool['pool']
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, id)
self.assert_modification_allowed(pool_db)
if p:
pool_db.update(p)
return self._make_pool_dict(pool_db)
def delete_pool(self, context, id):
# Check if the pool is in use
vip = context.session.query(Vip).filter_by(pool_id=id).first()
if vip:
raise loadbalancer.PoolInUse(pool_id=id)
with context.session.begin(subtransactions=True):
self._delete_pool_stats(context, id)
pool_db = self._get_resource(context, Pool, id)
context.session.delete(pool_db)
def get_pool(self, context, id, fields=None):
pool = self._get_resource(context, Pool, id)
return self._make_pool_dict(pool, fields)
def get_pools(self, context, filters=None, fields=None):
collection = self._model_query(context, Pool)
collection = self._apply_filters_to_query(collection, Pool, filters)
return [self._make_pool_dict(c, fields)
for c in collection]
def stats(self, context, pool_id):
with context.session.begin(subtransactions=True):
pool = self._get_resource(context, Pool, pool_id)
stats = pool['stats']
res = {'bytes_in': stats['bytes_in'],
'bytes_out': stats['bytes_out'],
'active_connections': stats['active_connections'],
'total_connections': stats['total_connections']}
return {'stats': res}
def create_pool_health_monitor(self, context, health_monitor, pool_id):
monitor_id = health_monitor['health_monitor']['id']
with context.session.begin(subtransactions=True):
pool = self._get_resource(context, Pool, pool_id)
assoc = PoolMonitorAssociation(pool_id=pool_id,
monitor_id=monitor_id)
pool.monitors.append(assoc)
monitors = [monitor['monitor_id'] for monitor in pool['monitors']]
res = {"health_monitor": monitors}
return res
def delete_pool_health_monitor(self, context, id, pool_id):
with context.session.begin(subtransactions=True):
pool = self._get_resource(context, Pool, pool_id)
try:
monitor_qry = context.session.query(PoolMonitorAssociation)
monitor = monitor_qry.filter_by(monitor_id=id,
pool_id=pool_id).one()
pool.monitors.remove(monitor)
except exc.NoResultFound:
raise loadbalancer.HealthMonitorNotFound(monitor_id=id)
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
# TODO(markmcclain) look into why pool_id is ignored
healthmonitor = self._get_resource(context, HealthMonitor, id)
return self._make_health_monitor_dict(healthmonitor, fields)
########################################################
# Member DB access
def _make_member_dict(self, member, fields=None):
res = {'id': member['id'],
'tenant_id': member['tenant_id'],
'pool_id': member['pool_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up'],
'status': member['status']}
return self._fields(res, fields)
def create_member(self, context, member):
v = member['member']
tenant_id = self._get_tenant_id_for_create(context, v)
with context.session.begin(subtransactions=True):
# ensuring that pool exists
self._get_resource(context, Pool, v['pool_id'])
member_db = Member(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
pool_id=v['pool_id'],
address=v['address'],
protocol_port=v['protocol_port'],
weight=v['weight'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(member_db)
return self._make_member_dict(member_db)
def update_member(self, context, id, member):
v = member['member']
with context.session.begin(subtransactions=True):
member_db = self._get_resource(context, Member, id)
self.assert_modification_allowed(member_db)
if v:
member_db.update(v)
return self._make_member_dict(member_db)
def delete_member(self, context, id):
with context.session.begin(subtransactions=True):
member_db = self._get_resource(context, Member, id)
context.session.delete(member_db)
def get_member(self, context, id, fields=None):
member = self._get_resource(context, Member, id)
return self._make_member_dict(member, fields)
def get_members(self, context, filters=None, fields=None):
return self._get_collection(context, Member,
self._make_member_dict,
filters=filters, fields=fields)
########################################################
# HealthMonitor DB access
def _make_health_monitor_dict(self, health_monitor, fields=None):
res = {'id': health_monitor['id'],
'tenant_id': health_monitor['tenant_id'],
'type': health_monitor['type'],
'delay': health_monitor['delay'],
'timeout': health_monitor['timeout'],
'max_retries': health_monitor['max_retries'],
'admin_state_up': health_monitor['admin_state_up'],
'status': health_monitor['status']}
# no point to add the values below to
# the result if the 'type' is not HTTP/S
if res['type'] in ['HTTP', 'HTTPS']:
for attr in ['url_path', 'http_method', 'expected_codes']:
res[attr] = health_monitor[attr]
return self._fields(res, fields)
def create_health_monitor(self, context, health_monitor):
v = health_monitor['health_monitor']
tenant_id = self._get_tenant_id_for_create(context, v)
with context.session.begin(subtransactions=True):
monitor_db = HealthMonitor(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
type=v['type'],
delay=v['delay'],
timeout=v['timeout'],
max_retries=v['max_retries'],
http_method=v['http_method'],
url_path=v['url_path'],
expected_codes=v['expected_codes'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(monitor_db)
return self._make_health_monitor_dict(monitor_db)
def update_health_monitor(self, context, id, health_monitor):
v = health_monitor['health_monitor']
with context.session.begin(subtransactions=True):
monitor_db = self._get_resource(context, HealthMonitor, id)
self.assert_modification_allowed(monitor_db)
if v:
monitor_db.update(v)
return self._make_health_monitor_dict(monitor_db)
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
monitor_db = self._get_resource(context, HealthMonitor, id)
context.session.delete(monitor_db)
def get_health_monitor(self, context, id, fields=None):
healthmonitor = self._get_resource(context, HealthMonitor, id)
return self._make_health_monitor_dict(healthmonitor, fields)
def get_health_monitors(self, context, filters=None, fields=None):
return self._get_collection(context, HealthMonitor,
self._make_health_monitor_dict,
filters=filters, fields=fields)

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,43 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
STATS_CURRENT_CONNECTIONS = 'CURRENT_CONNECTIONS'
STATS_MAX_CONNECTIONS = 'MAX_CONNECTIONS'
STATS_CURRENT_SESSIONS = 'CURRENT_SESSIONS'
STATS_MAX_SESSIONS = 'MAX_SESSIONS'
STATS_TOTAL_SESSIONS = 'TOTAL_SESSIONS'
STATS_IN_BYTES = 'IN_BYTES'
STATS_OUT_BYTES = 'OUT_BYTES'
STATS_CONNECTION_ERRORS = 'CONNECTION_ERRORS'
STATS_RESPONSE_ERRORS = 'RESPONSE_ERRORS'

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,131 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import abc
class LoadBalancerAbstractDriver(object):
"""Abstract lbaas driver that expose ~same API as lbaas plugin.
The configuration elements (Vip,Member,etc) are the dicts that
are returned to the tenant.
Get operations are not part of the API - it will be handled
by the lbaas plugin.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and set the Vip status to ACTIVE/ERROR according
to the backend call result
self.plugin.update_status(context, Vip, vip["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_vip(self, context, old_vip, vip):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Vip, id, constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and try to delete the Vip.
if the deletion was successfull, delete the record from the database.
if the deletion has failed, set the Vip status to ERROR.
"""
pass
@abc.abstractmethod
def create_pool(self, context, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Pool, pool["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_pool(self, context, old_pool, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context,
Pool,
pool["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_pool(self, context, pool):
"""Driver can call the code below in order to delete the pool.
self.plugin._delete_db_pool(context, pool["id"])
or set the status to ERROR if deletion failed
"""
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_member(self, context, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member, member["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_member(self, context, old_member, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member,
member["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_member(self, context, member):
pass
@abc.abstractmethod
def create_health_monitor(self, context, health_monitor):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, HealthMonitor,
health_monitor["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_health_monitor(self, context,
old_health_monitor,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
pass

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,70 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import eventlet
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.common import legacy
from neutron.openstack.common.rpc import service as rpc_service
from neutron.openstack.common import service
from neutron.services.loadbalancer.drivers.haproxy import (
agent_manager as manager,
plugin_driver
)
OPTS = [
cfg.IntOpt(
'periodic_interval',
default=10,
help=_('Seconds between periodic task runs')
)
]
class LbaasAgentService(rpc_service.Service):
def start(self):
super(LbaasAgentService, self).start()
self.tg.add_timer(
cfg.CONF.periodic_interval,
self.manager.run_periodic_tasks,
None,
None
)
def main():
eventlet.monkey_patch()
cfg.CONF.register_opts(OPTS)
cfg.CONF.register_opts(manager.OPTS)
# import interface options just in case the driver uses namespaces
cfg.CONF.register_opts(interface.OPTS)
config.register_root_helper(cfg.CONF)
cfg.CONF(project='neutron')
legacy.modernize_quantum_config(cfg.CONF)
config.setup_logging(cfg.CONF)
mgr = manager.LbaasAgentManager(cfg.CONF)
svc = LbaasAgentService(
host=cfg.CONF.host,
topic=plugin_driver.TOPIC_LOADBALANCER_AGENT,
manager=mgr
)
service.launch(svc).wait()

View File

@ -0,0 +1,81 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from neutron.openstack.common.rpc import proxy
class LbaasAgentApi(proxy.RpcProxy):
"""Agent side of the Agent to Plugin RPC API."""
API_VERSION = '1.0'
def __init__(self, topic, context, host):
super(LbaasAgentApi, self).__init__(topic, self.API_VERSION)
self.context = context
self.host = host
def get_ready_devices(self):
return self.call(
self.context,
self.make_msg('get_ready_devices', host=self.host),
topic=self.topic
)
def get_logical_device(self, pool_id):
return self.call(
self.context,
self.make_msg(
'get_logical_device',
pool_id=pool_id,
host=self.host
),
topic=self.topic
)
def pool_destroyed(self, pool_id):
return self.call(
self.context,
self.make_msg('pool_destroyed', pool_id=pool_id, host=self.host),
topic=self.topic
)
def plug_vip_port(self, port_id):
return self.call(
self.context,
self.make_msg('plug_vip_port', port_id=port_id, host=self.host),
topic=self.topic
)
def unplug_vip_port(self, port_id):
return self.call(
self.context,
self.make_msg('unplug_vip_port', port_id=port_id, host=self.host),
topic=self.topic
)
def update_pool_stats(self, pool_id, stats):
return self.call(
self.context,
self.make_msg(
'update_pool_stats',
pool_id=pool_id,
stats=stats,
host=self.host
),
topic=self.topic
)

View File

@ -0,0 +1,230 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import weakref
from oslo.config import cfg
from neutron.agent.common import config
from neutron import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import periodic_task
from neutron.services.loadbalancer.drivers.haproxy import (
agent_api,
plugin_driver
)
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
OPTS = [
cfg.StrOpt(
'device_driver',
default=('neutron.services.loadbalancer.drivers'
'.haproxy.namespace_driver.HaproxyNSDriver'),
help=_('The driver used to manage the loadbalancing device'),
),
cfg.StrOpt(
'loadbalancer_state_path',
default='$state_path/lbaas',
help=_('Location to store config and state files'),
),
cfg.StrOpt(
'interface_driver',
help=_('The driver used to manage the virtual interface')
),
cfg.StrOpt(
'user_group',
default='nogroup',
help=_('The user group'),
),
]
class LogicalDeviceCache(object):
"""Manage a cache of known devices."""
class Device(object):
"""Inner classes used to hold values for weakref lookups."""
def __init__(self, port_id, pool_id):
self.port_id = port_id
self.pool_id = pool_id
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash((self.port_id, self.pool_id))
def __init__(self):
self.devices = set()
self.port_lookup = weakref.WeakValueDictionary()
self.pool_lookup = weakref.WeakValueDictionary()
def put(self, device):
port_id = device['vip']['port_id']
pool_id = device['pool']['id']
d = self.Device(device['vip']['port_id'], device['pool']['id'])
if d not in self.devices:
self.devices.add(d)
self.port_lookup[port_id] = d
self.pool_lookup[pool_id] = d
def remove(self, device):
if not isinstance(device, self.Device):
device = self.Device(
device['vip']['port_id'], device['pool']['id']
)
if device in self.devices:
self.devices.remove(device)
def remove_by_pool_id(self, pool_id):
d = self.pool_lookup.get(pool_id)
if d:
self.devices.remove(d)
def get_by_pool_id(self, pool_id):
return self.pool_lookup.get(pool_id)
def get_by_port_id(self, port_id):
return self.port_lookup.get(port_id)
def get_pool_ids(self):
return self.pool_lookup.keys()
class LbaasAgentManager(periodic_task.PeriodicTasks):
def __init__(self, conf):
self.conf = conf
try:
vif_driver = importutils.import_object(conf.interface_driver, conf)
except ImportError:
# the driver is optional
msg = _('Error importing interface driver: %s')
raise SystemExit(msg % conf.interface_driver)
vif_driver = None
try:
self.driver = importutils.import_object(
conf.device_driver,
config.get_root_helper(self.conf),
conf.loadbalancer_state_path,
vif_driver,
self._vip_plug_callback
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % conf.device_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = agent_api.LbaasAgentApi(
plugin_driver.TOPIC_PROCESS_ON_HOST,
ctx,
conf.host
)
self.needs_resync = False
self.cache = LogicalDeviceCache()
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id in self.cache.get_pool_ids():
try:
stats = self.driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error upating stats'))
self.needs_resync = True
def _vip_plug_callback(self, action, port):
if action == 'plug':
self.plugin_rpc.plug_vip_port(port['id'])
elif action == 'unplug':
self.plugin_rpc.unplug_vip_port(port['id'])
def sync_state(self):
known_devices = set(self.cache.get_pool_ids())
try:
ready_logical_devices = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_devices - ready_logical_devices:
self.destroy_device(deleted_id)
for pool_id in ready_logical_devices:
self.refresh_device(pool_id)
except Exception:
LOG.exception(_('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
def refresh_device(self, pool_id):
try:
logical_config = self.plugin_rpc.get_logical_device(pool_id)
if self.driver.exists(pool_id):
self.driver.update(logical_config)
else:
self.driver.create(logical_config)
self.cache.put(logical_config)
except Exception:
LOG.exception(_('Unable to refresh device for pool: %s'), pool_id)
self.needs_resync = True
def destroy_device(self, pool_id):
device = self.cache.get_by_pool_id(pool_id)
if not device:
return
try:
self.driver.destroy(pool_id)
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
self.needs_resync = True
self.cache.remove(device)
def remove_orphans(self):
try:
self.driver.remove_orphans(self.cache.get_pool_ids())
except NotImplementedError:
pass # Not all drivers will support this
def reload_pool(self, context, pool_id=None, host=None):
"""Handle RPC cast from plugin to reload a pool."""
if pool_id:
self.refresh_device(pool_id)
def modify_pool(self, context, pool_id=None, host=None):
"""Handle RPC cast from plugin to modify a pool if known to agent."""
if self.cache.get_by_pool_id(pool_id):
self.refresh_device(pool_id)
def destroy_pool(self, context, pool_id=None, host=None):
"""Handle RPC cast from plugin to destroy a pool if known to agent."""
if self.cache.get_by_pool_id(pool_id):
self.destroy_device(pool_id)

View File

@ -0,0 +1,228 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import itertools
from oslo.config import cfg
from neutron.agent.linux import utils
from neutron.plugins.common import constants as qconstants
from neutron.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_CURRENT_CONNECTIONS: 'qcur',
constants.STATS_MAX_CONNECTIONS: 'qmax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE = qconstants.ACTIVE
def save_config(conf_path, logical_config, socket_path=None):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(logical_config, socket_path=socket_path))
data.extend(_build_defaults(logical_config))
data.extend(_build_frontend(logical_config))
data.extend(_build_backend(logical_config))
utils.replace_file(conf_path, '\n'.join(data))
def _build_global(config, socket_path=None):
opts = [
'daemon',
'user nobody',
'group %s' % cfg.CONF.user_group,
'log /dev/log local0',
'log /dev/log local1 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults(config):
opts = [
'log global',
'retries 3',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option tcplog',
'bind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if member['status'] == ACTIVE and member['admin_state_up']:
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %d' % config['members'].index(member)
opts.append(server)
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for monitor in config['healthmonitors']:
if monitor['status'] == ACTIVE and monitor['admin_state_up']:
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE:
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval

View File

@ -0,0 +1,218 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
import shutil
import socket
import netaddr
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
class HaproxyNSDriver(object):
def __init__(self, root_helper, state_path, vif_driver, vip_plug_callback):
self.root_helper = root_helper
self.state_path = state_path
self.vif_driver = vif_driver
self.vip_plug_callback = vip_plug_callback
self.pool_to_port_id = {}
def create(self, logical_config):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
self._plug(namespace, logical_config['vip']['port'])
self._spawn(logical_config)
def update(self, logical_config):
pool_id = logical_config['pool']['id']
pid_path = self._get_state_file_path(pool_id, 'pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(logical_config, extra_args)
def _spawn(self, logical_config, extra_cmd_args=()):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
conf_path = self._get_state_file_path(pool_id, 'conf')
pid_path = self._get_state_file_path(pool_id, 'pid')
sock_path = self._get_state_file_path(pool_id, 'sock')
hacfg.save_config(conf_path, logical_config, sock_path)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
ns.netns.execute(cmd)
# remember the pool<>port mapping
self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
def destroy(self, pool_id):
namespace = get_ns_name(pool_id)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
pid_path = self._get_state_file_path(pool_id, 'pid')
# kill the process
kill_pids_in_file(self.root_helper, pid_path)
# unplug the ports
if pool_id in self.pool_to_port_id:
self._unplug(namespace, self.pool_to_port_id[pool_id])
# remove the configuration directory
conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
ns.garbage_collect_namespace()
def exists(self, pool_id):
namespace = get_ns_name(pool_id)
root_ns = ip_lib.IPWrapper(self.root_helper)
socket_path = self._get_state_file_path(pool_id, 'sock')
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock')
if os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 2 -1\n')
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_('Error while connecting to stats socket: %s') % e)
return {}
else:
LOG.warn(_('Stats socket not found for pool %s') % pool_id)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return {}
stat_names = [line.strip('# ') for line in stat_lines[0].split(',')]
stat_values = [line.strip() for line in stat_lines[1].split(',')]
stats = dict(zip(stat_names, stat_values))
unified_stats = {}
for stat in hacfg.STATS_MAP:
unified_stats[stat] = stats.get(hacfg.STATS_MAP[stat], '')
return unified_stats
def remove_orphans(self, known_pool_ids):
raise NotImplementedError()
def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, pool_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _plug(self, namespace, port, reuse_existing=True):
self.vip_plug_callback('plug', port)
interface_name = self.vif_driver.get_device_name(Wrap(port))
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port['network_id'],
port['id'],
interface_name,
port['mac_address'],
namespace=namespace
)
cidrs = [
'%s/%s' % (ip['ip_address'],
netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
for ip in port['fixed_ips']
]
self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
self.vip_plug_callback('unplug', port_stub)
interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
# NOTE (markmcclain) For compliance with interface.py which expects objects
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
def kill_pids_in_file(root_helper, pid_path):
if os.path.exists(pid_path):
with open(pid_path, 'r') as pids:
for pid in pids:
pid = pid.strip()
try:
utils.execute(['kill', '-9', pid], root_helper)
except RuntimeError:
LOG.exception(
_('Unable to kill haproxy process: %s'),
pid
)

View File

@ -0,0 +1,299 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import uuid
from oslo.config import cfg
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.db.loadbalancer import loadbalancer_db
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
LOG = logging.getLogger(__name__)
ACTIVE_PENDING = (
constants.ACTIVE,
constants.PENDING_CREATE,
constants.PENDING_UPDATE
)
# topic name for this particular agent implementation
TOPIC_PROCESS_ON_HOST = 'q-lbaas-process-on-host'
TOPIC_LOADBALANCER_AGENT = 'lbaas_process_on_host_agent'
class LoadBalancerCallbacks(object):
RPC_API_VERSION = '1.0'
def __init__(self, plugin):
self.plugin = plugin
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
def get_ready_devices(self, context, host=None):
with context.session.begin(subtransactions=True):
qry = (context.session.query(loadbalancer_db.Pool.id).
join(loadbalancer_db.Vip))
qry = qry.filter(loadbalancer_db.Vip.status.in_(ACTIVE_PENDING))
qry = qry.filter(loadbalancer_db.Pool.status.in_(ACTIVE_PENDING))
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Vip.admin_state_up == up)
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
return [id for id, in qry]
def get_logical_device(self, context, pool_id=None, activate=True,
**kwargs):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
if activate:
# set all resources to active
if pool.status in ACTIVE_PENDING:
pool.status = constants.ACTIVE
if pool.vip.status in ACTIVE_PENDING:
pool.vip.status = constants.ACTIVE
for m in pool.members:
if m.status in ACTIVE_PENDING:
m.status = constants.ACTIVE
for hm in pool.monitors:
if hm.healthmonitor.status in ACTIVE_PENDING:
hm.healthmonitor.status = constants.ACTIVE
if (pool.status != constants.ACTIVE
or pool.vip.status != constants.ACTIVE):
raise q_exc.Invalid(_('Expected active pool and vip'))
retval = {}
retval['pool'] = self.plugin._make_pool_dict(pool)
retval['vip'] = self.plugin._make_vip_dict(pool.vip)
retval['vip']['port'] = (
self.plugin._core_plugin._make_port_dict(pool.vip.port)
)
for fixed_ip in retval['vip']['port']['fixed_ips']:
fixed_ip['subnet'] = (
self.plugin._core_plugin.get_subnet(
context,
fixed_ip['subnet_id']
)
)
retval['members'] = [
self.plugin._make_member_dict(m)
for m in pool.members if m.status == constants.ACTIVE
]
retval['healthmonitors'] = [
self.plugin._make_health_monitor_dict(hm.healthmonitor)
for hm in pool.monitors
if hm.healthmonitor.status == constants.ACTIVE
]
return retval
def pool_destroyed(self, context, pool_id=None, host=None):
"""Agent confirmation hook that a pool has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to plug.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = True
port['device_owner'] = 'neutron:' + constants.LOADBALANCER
port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
# TODO(markmcclain): add stats collection
pass
class LoadBalancerAgentApi(proxy.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
API_VERSION = '1.0'
def __init__(self, topic, host):
super(LoadBalancerAgentApi, self).__init__(topic, self.API_VERSION)
self.host = host
def reload_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('reload_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
def destroy_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('destroy_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
def modify_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('modify_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
class HaproxyOnHostPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
self.agent_rpc = LoadBalancerAgentApi(
TOPIC_LOADBALANCER_AGENT,
cfg.CONF.host
)
self.callbacks = LoadBalancerCallbacks(plugin)
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
TOPIC_PROCESS_ON_HOST,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
self.plugin = plugin
def create_vip(self, context, vip):
self.agent_rpc.reload_pool(context, vip['pool_id'])
def update_vip(self, context, old_vip, vip):
if vip['status'] in ACTIVE_PENDING:
self.agent_rpc.reload_pool(context, vip['pool_id'])
else:
self.agent_rpc.destroy_pool(context, vip['pool_id'])
def delete_vip(self, context, vip):
self.plugin._delete_db_vip(context, vip['id'])
self.agent_rpc.destroy_pool(context, vip['pool_id'])
def create_pool(self, context, pool):
# don't notify here because a pool needs a vip to be useful
pass
def update_pool(self, context, old_pool, pool):
if pool['status'] in ACTIVE_PENDING:
if pool['vip_id'] is not None:
self.agent_rpc.reload_pool(context, pool['id'])
else:
self.agent_rpc.destroy_pool(context, pool['id'])
def delete_pool(self, context, pool):
self.plugin._delete_db_pool(context, pool['id'])
self.agent_rpc.destroy_pool(context, pool['id'])
def create_member(self, context, member):
self.agent_rpc.modify_pool(context, member['pool_id'])
def update_member(self, context, old_member, member):
# member may change pool id
if member['pool_id'] != old_member['pool_id']:
self.agent_rpc.modify_pool(context, old_member['pool_id'])
self.agent_rpc.modify_pool(context, member['pool_id'])
def delete_member(self, context, member):
self.plugin._delete_db_member(context, member['id'])
self.agent_rpc.modify_pool(context, member['pool_id'])
def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
# monitors are unused here because agent will fetch what is necessary
self.agent_rpc.modify_pool(context, pool_id)
def delete_health_monitor(self, context, healthmon_id, pool_id):
# healthmon_id is not used in this driver
self.agent_rpc.modify_pool(context, pool_id)
def create_pool_health_monitor(self, context, healthmon, pool_id):
# healthmon is not used here
self.agent_rpc.modify_pool(context, pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(
context, health_monitor['id'], pool_id
)
# healthmon_id is not used here
self.agent_rpc.modify_pool(context, pool_id)
def create_health_monitor(self, context, health_monitor):
pass
def stats(self, context, pool_id):
pass

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,112 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron.openstack.common import log as logging
from neutron.services.loadbalancer.drivers import (
abstract_driver
)
LOG = logging.getLogger(__name__)
def log(method):
def wrapper(*args, **kwargs):
data = {"method_name": method.__name__, "args": args, "kwargs": kwargs}
LOG.debug(_('NoopLbaaSDriver method %(method_name)s'
'called with arguments %(args)s %(kwargs)s ')
% data)
return method(*args, **kwargs)
return wrapper
class NoopLbaaSDriver(abstract_driver.LoadBalancerAbstractDriver):
"""A dummy lbass driver that:
1) Logs methods input
2) Uses the plugin API in order to update
the config elements status in DB
"""
def __init__(self, plugin):
self.plugin = plugin
@log
def create_vip(self, context, vip):
pass
@log
def update_vip(self, context, old_vip, vip):
pass
@log
def delete_vip(self, context, vip):
self.plugin._delete_db_vip(context, vip["id"])
@log
def create_pool(self, context, pool):
pass
@log
def update_pool(self, context, old_pool, pool):
pass
@log
def delete_pool(self, context, pool):
pass
@log
def stats(self, context, pool_id):
return {"bytes_in": 0,
"bytes_out": 0,
"active_connections": 0,
"total_connections": 0}
@log
def create_member(self, context, member):
pass
@log
def update_member(self, context, old_member, member):
pass
@log
def delete_member(self, context, member):
self.plugin._delete_db_member(context, member["id"])
@log
def create_health_monitor(self, context, health_monitor):
pass
@log
def update_health_monitor(self, context, old_health_monitor,
health_monitor,
pool_association):
pass
@log
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
pass
@log
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(
context, health_monitor["id"],
pool_id
)

View File

@ -0,0 +1,233 @@
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from oslo.config import cfg
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
DEFAULT_DRIVER = ("neutron.services.loadbalancer.drivers.haproxy"
".plugin_driver.HaproxyOnHostPluginDriver")
lbaas_plugin_opts = [
cfg.StrOpt('driver_fqn',
default=DEFAULT_DRIVER,
help=_('LBaaS driver Fully Qualified Name'))
]
cfg.CONF.register_opts(lbaas_plugin_opts, "LBAAS")
class LoadBalancerPlugin(loadbalancer_db.LoadBalancerPluginDb):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas"]
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-driver from configuration.
That method will later leverage service type framework
"""
try:
self.driver = importutils.import_object(
cfg.CONF.LBAAS.driver_fqn, self
)
except ImportError:
LOG.exception(_("Error loading LBaaS driver %s"),
cfg.CONF.LBAAS.driver_fqn)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
self.driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
self.driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, loadbalancer_db.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
self.driver.delete_vip(context, v)
def create_pool(self, context, pool):
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.driver.create_pool(context, p)
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
self.driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_pool(context, id)
def delete_pool(self, context, id):
self.update_status(context, loadbalancer_db.Pool,
id, constants.PENDING_DELETE)
p = self.get_pool(context, id)
self.driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
self.driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
self.driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, loadbalancer_db.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
self.driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
# no PENDING_CREATE status sinse healthmon is shared DB object
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
self.driver.create_health_monitor(context, hm)
return hm
def update_health_monitor(self, context, id, health_monitor):
if 'status' not in health_monitor['health_monitor']:
health_monitor['health_monitor']['status'] = (
constants.PENDING_UPDATE
)
old_hm = self.get_health_monitor(context, id)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
loadbalancer_db.PoolMonitorAssociation
).filter_by(monitor_id=hm['id'])
for assoc in qry:
self.driver.update_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
hm = self.get_health_monitor(context, id)
qry = context.session.query(
loadbalancer_db.PoolMonitorAssociation
).filter_by(monitor_id=id)
for assoc in qry:
self.driver.delete_pool_health_monitor(context,
hm,
assoc['pool_id'])
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
# open issue: PoolMonitorAssociation has no status field
# so we cant set the status to pending and let the driver
# set the real status of the association
self.driver.create_pool_health_monitor(
context, health_monitor, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
hm = self.get_health_monitor(context, id)
self.driver.delete_pool_health_monitor(
context, hm, pool_id)
def stats(self, context, pool_id):
stats_data = self.driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self)._update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [
self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [
self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip

View File

@ -0,0 +1,35 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
import os
setattr(__builtin__, '_', lambda x: x)
from oslo.config import cfg
reldir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
absdir = os.path.abspath(reldir)
cfg.CONF.state_path = absdir
# An empty lock path forces lockutils.synchronized to use a temporary
# location for lock files that will be cleaned up automatically.
cfg.CONF.lock_path = ''
cfg.CONF.use_stderr = False

View File

@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -0,0 +1,57 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from oslo.config import cfg
from neutron.services.loadbalancer.drivers.haproxy import agent
from neutron.tests import base
class TestLbaasService(base.BaseTestCase):
def setUp(self):
super(TestLbaasService, self).setUp()
self.addCleanup(cfg.CONF.reset)
cfg.CONF.register_opts(agent.OPTS)
def test_start(self):
with mock.patch.object(
agent.rpc_service.Service, 'start'
) as mock_start:
mgr = mock.Mock()
agent_service = agent.LbaasAgentService('host', 'topic', mgr)
agent_service.start()
self.assertTrue(mock_start.called)
def test_main(self):
logging_str = 'neutron.agent.common.config.setup_logging'
with contextlib.nested(
mock.patch(logging_str),
mock.patch.object(agent.service, 'launch'),
mock.patch.object(agent, 'eventlet'),
mock.patch('sys.argv'),
mock.patch.object(agent.manager, 'LbaasAgentManager')
) as (mock_logging, mock_launch, mock_eventlet, sys_argv, mgr_cls):
agent.main()
self.assertTrue(mock_eventlet.monkey_patch.called)
mock_launch.assert_called_once_with(mock.ANY)

View File

@ -0,0 +1,367 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from neutron.services.loadbalancer.drivers.haproxy import (
agent_manager as manager
)
from neutron.tests import base
class TestLogicalDeviceCache(base.BaseTestCase):
def setUp(self):
super(TestLogicalDeviceCache, self).setUp()
self.cache = manager.LogicalDeviceCache()
def test_put(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.assertEqual(len(self.cache.devices), 1)
self.assertEqual(len(self.cache.port_lookup), 1)
self.assertEqual(len(self.cache.pool_lookup), 1)
def test_double_put(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.cache.put(fake_device)
self.assertEqual(len(self.cache.devices), 1)
self.assertEqual(len(self.cache.port_lookup), 1)
self.assertEqual(len(self.cache.pool_lookup), 1)
def test_remove_in_cache(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.assertEqual(len(self.cache.devices), 1)
self.cache.remove(fake_device)
self.assertFalse(len(self.cache.devices))
self.assertFalse(self.cache.port_lookup)
self.assertFalse(self.cache.pool_lookup)
def test_remove_in_cache_same_object(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.assertEqual(len(self.cache.devices), 1)
self.cache.remove(set(self.cache.devices).pop())
self.assertFalse(len(self.cache.devices))
self.assertFalse(self.cache.port_lookup)
self.assertFalse(self.cache.pool_lookup)
def test_remove_by_pool_id(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.assertEqual(len(self.cache.devices), 1)
self.cache.remove_by_pool_id('pool_id')
self.assertFalse(len(self.cache.devices))
self.assertFalse(self.cache.port_lookup)
self.assertFalse(self.cache.pool_lookup)
def test_get_by_pool_id(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
dev = self.cache.get_by_pool_id('pool_id')
self.assertEqual(dev.pool_id, 'pool_id')
self.assertEqual(dev.port_id, 'port_id')
def test_get_by_port_id(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
dev = self.cache.get_by_port_id('port_id')
self.assertEqual(dev.pool_id, 'pool_id')
self.assertEqual(dev.port_id, 'port_id')
def test_get_pool_ids(self):
fake_device = {
'vip': {'port_id': 'port_id'},
'pool': {'id': 'pool_id'}
}
self.cache.put(fake_device)
self.assertEqual(self.cache.get_pool_ids(), ['pool_id'])
class TestManager(base.BaseTestCase):
def setUp(self):
super(TestManager, self).setUp()
self.addCleanup(mock.patch.stopall)
mock_conf = mock.Mock()
mock_conf.interface_driver = 'intdriver'
mock_conf.device_driver = 'devdriver'
mock_conf.AGENT.root_helper = 'sudo'
mock_conf.loadbalancer_state_path = '/the/path'
self.mock_importer = mock.patch.object(manager, 'importutils').start()
rpc_mock_cls = mock.patch(
'neutron.services.loadbalancer.drivers'
'.haproxy.agent_api.LbaasAgentApi'
).start()
self.mgr = manager.LbaasAgentManager(mock_conf)
self.rpc_mock = rpc_mock_cls.return_value
self.log = mock.patch.object(manager, 'LOG').start()
self.mgr.needs_resync = False
def test_initialize_service_hook(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.initialize_service_hook(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_needs_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = True
self.mgr.periodic_resync(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_no_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = False
self.mgr.periodic_resync(mock.Mock())
self.assertFalse(sync.called)
def test_collect_stats(self):
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_pool_ids.return_value = ['1', '2']
self.mgr.collect_stats(mock.Mock())
self.rpc_mock.update_pool_stats.assert_has_calls([
mock.call('1', mock.ANY),
mock.call('2', mock.ANY)
])
def test_collect_stats_exception(self):
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_pool_ids.return_value = ['1', '2']
with mock.patch.object(self.mgr, 'driver') as driver:
driver.get_stats.side_effect = Exception
self.mgr.collect_stats(mock.Mock())
self.assertFalse(self.rpc_mock.called)
self.assertTrue(self.mgr.needs_resync)
self.assertTrue(self.log.exception.called)
def test_vip_plug_callback(self):
self.mgr._vip_plug_callback('plug', {'id': 'id'})
self.rpc_mock.plug_vip_port.assert_called_once_with('id')
def test_vip_unplug_callback(self):
self.mgr._vip_plug_callback('unplug', {'id': 'id'})
self.rpc_mock.unplug_vip_port.assert_called_once_with('id')
def _sync_state_helper(self, cache, ready, refreshed, destroyed):
with contextlib.nested(
mock.patch.object(self.mgr, 'cache'),
mock.patch.object(self.mgr, 'refresh_device'),
mock.patch.object(self.mgr, 'destroy_device')
) as (mock_cache, refresh, destroy):
mock_cache.get_pool_ids.return_value = cache
self.rpc_mock.get_ready_devices.return_value = ready
self.mgr.sync_state()
self.assertEqual(len(refreshed), len(refresh.mock_calls))
self.assertEqual(len(destroyed), len(destroy.mock_calls))
refresh.assert_has_calls([mock.call(i) for i in refreshed])
destroy.assert_has_calls([mock.call(i) for i in destroyed])
self.assertFalse(self.mgr.needs_resync)
def test_sync_state_all_known(self):
self._sync_state_helper(['1', '2'], ['1', '2'], ['1', '2'], [])
def test_sync_state_all_unknown(self):
self._sync_state_helper([], ['1', '2'], ['1', '2'], [])
def test_sync_state_destroy_all(self):
self._sync_state_helper(['1', '2'], [], [], ['1', '2'])
def test_sync_state_both(self):
self._sync_state_helper(['1'], ['2'], ['2'], ['1'])
def test_sync_state_exception(self):
self.rpc_mock.get_ready_devices.side_effect = Exception
self.mgr.sync_state()
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_refresh_device_exists(self):
config = self.rpc_mock.get_logical_device.return_value
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
driver.exists.return_value = True
self.mgr.refresh_device(config)
driver.exists.assert_called_once_with(config)
driver.update.assert_called_once_with(config)
cache.put.assert_called_once_with(config)
self.assertFalse(self.mgr.needs_resync)
def test_refresh_device_new(self):
config = self.rpc_mock.get_logical_device.return_value
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
driver.exists.return_value = False
self.mgr.refresh_device(config)
driver.exists.assert_called_once_with(config)
driver.create.assert_called_once_with(config)
cache.put.assert_called_once_with(config)
self.assertFalse(self.mgr.needs_resync)
def test_refresh_device_exception(self):
config = self.rpc_mock.get_logical_device.return_value
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
driver.exists.side_effect = Exception
self.mgr.refresh_device(config)
driver.exists.assert_called_once_with(config)
self.assertTrue(self.mgr.needs_resync)
self.assertTrue(self.log.exception.called)
self.assertFalse(cache.put.called)
def test_destroy_device_known(self):
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = True
self.mgr.destroy_device('pool_id')
cache.get_by_pool_id.assert_called_once_with('pool_id')
driver.destroy.assert_called_once_with('pool_id')
self.rpc_mock.pool_destroyed.assert_called_once_with(
'pool_id'
)
cache.remove.assert_called_once_with(True)
self.assertFalse(self.mgr.needs_resync)
def test_destroy_device_unknown(self):
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = None
self.mgr.destroy_device('pool_id')
cache.get_by_pool_id.assert_called_once_with('pool_id')
self.assertFalse(driver.destroy.called)
def test_destroy_device_exception(self):
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = True
driver.destroy.side_effect = Exception
self.mgr.destroy_device('pool_id')
cache.get_by_pool_id.assert_called_once_with('pool_id')
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_remove_orphans(self):
with mock.patch.object(self.mgr, 'driver') as driver:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_pool_ids.return_value = ['1', '2']
self.mgr.remove_orphans()
driver.remove_orphans.assert_called_once_with(['1', '2'])
def test_reload_pool(self):
with mock.patch.object(self.mgr, 'refresh_device') as refresh:
self.mgr.reload_pool(mock.Mock(), pool_id='pool_id')
refresh.assert_called_once_with('pool_id')
def test_modify_pool_known(self):
with mock.patch.object(self.mgr, 'refresh_device') as refresh:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = True
self.mgr.reload_pool(mock.Mock(), pool_id='pool_id')
refresh.assert_called_once_with('pool_id')
def test_modify_pool_unknown(self):
with mock.patch.object(self.mgr, 'refresh_device') as refresh:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = False
self.mgr.modify_pool(mock.Mock(), pool_id='pool_id')
self.assertFalse(refresh.called)
def test_destroy_pool_known(self):
with mock.patch.object(self.mgr, 'destroy_device') as destroy:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = True
self.mgr.destroy_pool(mock.Mock(), pool_id='pool_id')
destroy.assert_called_once_with('pool_id')
def test_destroy_pool_unknown(self):
with mock.patch.object(self.mgr, 'destroy_device') as destroy:
with mock.patch.object(self.mgr, 'cache') as cache:
cache.get_by_pool_id.return_value = False
self.mgr.destroy_pool(mock.Mock(), pool_id='pool_id')
self.assertFalse(destroy.called)

View File

@ -0,0 +1,137 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import mock
from neutron.services.loadbalancer.drivers.haproxy import (
agent_api as api
)
from neutron.tests import base
class TestApiCache(base.BaseTestCase):
def setUp(self):
super(TestApiCache, self).setUp()
self.addCleanup(mock.patch.stopall)
self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host')
self.make_msg = mock.patch.object(self.api, 'make_msg').start()
self.mock_call = mock.patch.object(self.api, 'call').start()
def test_init(self):
self.assertEqual(self.api.host, 'host')
self.assertEqual(self.api.context, mock.sentinel.context)
def test_get_ready_devices(self):
self.assertEqual(
self.api.get_ready_devices(),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with('get_ready_devices', host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_get_logical_device(self):
self.assertEqual(
self.api.get_logical_device('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'get_logical_device',
pool_id='pool_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_pool_destroyed(self):
self.assertEqual(
self.api.pool_destroyed('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'pool_destroyed',
pool_id='pool_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_plug_vip_port(self):
self.assertEqual(
self.api.plug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'plug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_unplug_vip_port(self):
self.assertEqual(
self.api.unplug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'unplug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_update_pool_stats(self):
self.assertEqual(
self.api.update_pool_stats('pool_id', {'stat': 'stat'}),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'update_pool_stats',
pool_id='pool_id',
stats={'stat': 'stat'},
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)

View File

@ -0,0 +1,213 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Oleg Bondarev (obondarev@mirantis.com)
import contextlib
import mock
from oslo.config import cfg as config
from neutron.services.loadbalancer.drivers.haproxy import cfg
from neutron.tests import base
class TestHaproxyCfg(base.BaseTestCase):
def test_save_config(self):
with contextlib.nested(
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_global'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_defaults'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_frontend'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_backend'),
mock.patch('neutron.agent.linux.utils.replace_file')
) as (b_g, b_d, b_f, b_b, replace):
test_config = ['globals', 'defaults', 'frontend', 'backend']
b_g.return_value = [test_config[0]]
b_d.return_value = [test_config[1]]
b_f.return_value = [test_config[2]]
b_b.return_value = [test_config[3]]
cfg.save_config('test_path', mock.Mock())
replace.assert_called_once_with('test_path',
'\n'.join(test_config))
def test_build_global(self):
if not hasattr(config.CONF, 'user_group'):
config.CONF.register_opt(config.StrOpt('user_group'))
config.CONF.set_override('user_group', 'test_group')
expected_opts = ['global',
'\tdaemon',
'\tuser nobody',
'\tgroup test_group',
'\tlog /dev/log local0',
'\tlog /dev/log local1 notice',
'\tstats socket test_path mode 0666 level user']
opts = cfg._build_global(mock.Mock(), 'test_path')
self.assertEqual(expected_opts, list(opts))
config.CONF.reset()
def test_build_defaults(self):
expected_opts = ['defaults',
'\tlog global',
'\tretries 3',
'\toption redispatch',
'\ttimeout connect 5000',
'\ttimeout client 50000',
'\ttimeout server 50000']
opts = cfg._build_defaults(mock.Mock())
self.assertEqual(expected_opts, list(opts))
config.CONF.reset()
def test_build_frontend(self):
test_config = {'vip': {'id': 'vip_id',
'protocol': 'HTTP',
'port': {'fixed_ips': [
{'ip_address': '10.0.0.2'}]
},
'protocol_port': 80,
'connection_limit': 2000,
},
'pool': {'id': 'pool_id'}}
expected_opts = ['frontend vip_id',
'\toption tcplog',
'\tbind 10.0.0.2:80',
'\tmode http',
'\tdefault_backend pool_id',
'\tmaxconn 2000',
'\toption forwardfor']
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
test_config['vip']['connection_limit'] = -1
expected_opts.remove('\tmaxconn 2000')
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_build_backend(self):
test_config = {'pool': {'id': 'pool_id',
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN'},
'members': [{'status': 'ACTIVE',
'admin_state_up': True,
'id': 'member1_id',
'address': '10.0.0.3',
'protocol_port': 80,
'weight': 1}],
'healthmonitors': [{'status': 'ACTIVE',
'admin_state_up': True,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP'}],
'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
expected_opts = ['backend pool_id',
'\tmode http',
'\tbalance roundrobin',
'\toption forwardfor',
'\ttimeout check 2s',
'\tcookie SRV insert indirect nocache',
'\tserver member1_id 10.0.0.3:80 weight 1 '
'check inter 3s fall 4 cookie 0']
opts = cfg._build_backend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_get_server_health_option(self):
test_config = {'healthmonitors': [{'status': 'ERROR',
'admin_state_up': False,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP',
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200'}]}
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['status'] = 'ACTIVE'
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['admin_state_up'] = True
expected = (' check inter 3s fall 4', ['timeout check 2s'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['type'] = 'HTTPS'
expected = (' check inter 3s fall 4',
['timeout check 2s',
'option httpchk GET /',
'http-check expect rstatus 200',
'option ssl-hello-chk'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
def test_has_http_cookie_persistence(self):
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertTrue(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
def test_get_session_persistence(self):
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertEqual(cfg._get_session_persistence(config),
['stick-table type ip size 10k', 'stick on src'])
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertEqual(cfg._get_session_persistence(config),
['cookie SRV insert indirect nocache'])
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'test'}}}
self.assertEqual(cfg._get_session_persistence(config),
['appsession test len 56 timeout 3h'])
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}}
self.assertEqual(cfg._get_session_persistence(config), [])
config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}}
self.assertEqual(cfg._get_session_persistence(config), [])
def test_expand_expected_codes(self):
exp_codes = ''
self.assertEqual(cfg._expand_expected_codes(exp_codes), set([]))
exp_codes = '200'
self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200']))
exp_codes = '200, 201'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201']))
exp_codes = '200, 201,202'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202']))
exp_codes = '200-202'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202']))
exp_codes = '200-202, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '205']))
exp_codes = '200, 201-203'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '203']))
exp_codes = '200, 201-203, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '203', '205']))
exp_codes = '201-200, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205']))

View File

@ -0,0 +1,289 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from neutron.common import exceptions
from neutron.services.loadbalancer.drivers.haproxy import (
namespace_driver
)
from neutron.tests import base
class TestHaproxyNSDriver(base.BaseTestCase):
def setUp(self):
super(TestHaproxyNSDriver, self).setUp()
self.vif_driver = mock.Mock()
self.vip_plug_callback = mock.Mock()
self.driver = namespace_driver.HaproxyNSDriver(
'sudo',
'/the/path',
self.vif_driver,
self.vip_plug_callback
)
self.fake_config = {
'pool': {'id': 'pool_id'},
'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}}
}
def test_create(self):
with mock.patch.object(self.driver, '_plug') as plug:
with mock.patch.object(self.driver, '_spawn') as spawn:
self.driver.create(self.fake_config)
plug.assert_called_once_with(
'qlbaas-pool_id', {'id': 'port_id'}
)
spawn.assert_called_once_with(self.fake_config)
def test_update(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(self.driver, '_spawn'),
mock.patch('__builtin__.open')
) as (gsp, spawn, mock_open):
mock_open.return_value = ['5']
self.driver.update(self.fake_config)
mock_open.assert_called_once_with(gsp.return_value, 'r')
spawn.assert_called_once_with(self.fake_config, ['-sf', '5'])
def test_spawn(self):
with contextlib.nested(
mock.patch.object(namespace_driver.hacfg, 'save_config'),
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
) as (mock_save, gsp, ip_wrap):
gsp.side_effect = lambda x, y: y
self.driver._spawn(self.fake_config)
mock_save.assert_called_once_with('conf', self.fake_config, 'sock')
cmd = ['haproxy', '-f', 'conf', '-p', 'pid']
ip_wrap.assert_has_calls([
mock.call('sudo', 'qlbaas-pool_id'),
mock.call().netns.execute(cmd)
])
def test_destroy(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(namespace_driver, 'kill_pids_in_file'),
mock.patch.object(self.driver, '_unplug'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('os.path.isdir'),
mock.patch('shutil.rmtree')
) as (gsp, kill, unplug, ip_wrap, isdir, rmtree):
gsp.side_effect = lambda x, y: '/pool/' + y
self.driver.pool_to_port_id['pool_id'] = 'port_id'
isdir.return_value = True
self.driver.destroy('pool_id')
kill.assert_called_once_with('sudo', '/pool/pid')
unplug.assert_called_once_with('qlbaas-pool_id', 'port_id')
isdir.called_once_with('/pool')
rmtree.assert_called_once_with('/pool')
ip_wrap.assert_has_calls([
mock.call('sudo', 'qlbaas-pool_id'),
mock.call().garbage_collect_namespace()
])
def test_exists(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('socket.socket'),
mock.patch('os.path.exists'),
) as (gsp, ip_wrap, socket, path_exists):
gsp.side_effect = lambda x, y: '/pool/' + y
ip_wrap.return_value.netns.exists.return_value = True
path_exists.return_value = True
self.driver.exists('pool_id')
ip_wrap.assert_has_calls([
mock.call('sudo'),
mock.call().netns.exists('qlbaas-pool_id')
])
self.assertTrue(self.driver.exists('pool_id'))
def test_get_stats(self):
raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,'
'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,'
'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,'
'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,'
'check_status,check_code,check_duration,hrsp_1xx,'
'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
'8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
'10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n')
raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,'
'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,'
'status,weight,act,bck,chkfail,chkdown,lastchg,'
'downtime,qlimit,pid,iid,sid,throttle,lbtot,'
'tracked,type,rate,rate_lim,rate_max,check_status,'
'check_code,check_duration,hrsp_1xx,hrsp_2xx,'
'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,'
'\n')
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('socket.socket'),
mock.patch('os.path.exists'),
) as (gsp, socket, path_exists):
gsp.side_effect = lambda x, y: '/pool/' + y
path_exists.return_value = True
socket.return_value = socket
socket.recv.return_value = raw_stats
exp_stats = {'CONNECTION_ERRORS': '0',
'CURRENT_CONNECTIONS': '1',
'CURRENT_SESSIONS': '3',
'IN_BYTES': '7764',
'MAX_CONNECTIONS': '2',
'MAX_SESSIONS': '4',
'OUT_BYTES': '2365',
'RESPONSE_ERRORS': '0',
'TOTAL_SESSIONS': '10'}
stats = self.driver.get_stats('pool_id')
self.assertEqual(exp_stats, stats)
socket.recv.return_value = raw_stats_empty
self.assertEqual({}, self.driver.get_stats('pool_id'))
path_exists.return_value = False
socket.reset_mock()
self.assertEqual({}, self.driver.get_stats('pool_id'))
self.assertFalse(socket.called)
def test_plug(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1'}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
self.vip_plug_callback.assert_called_once_with('plug', test_port)
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with('test_interface',
['10.0.0.2/24'],
namespace=
'test_ns')
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
ip_wrap.assert_has_calls([
mock.call('sudo', namespace='test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False),
])
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port, False)
def test_plug_no_gw(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24'}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
self.vip_plug_callback.assert_called_once_with('plug', test_port)
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with('test_interface',
['10.0.0.2/24'],
namespace=
'test_ns')
self.assertFalse(ip_wrap.called)
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port, False)
def test_unplug(self):
self.vif_driver.get_device_name.return_value = 'test_interface'
self.driver._unplug('test_ns', 'port_id')
self.vip_plug_callback.assert_called_once_with('unplug',
{'id': 'port_id'})
self.vif_driver.unplug('test_interface', namespace='test_ns')
def test_kill_pids_in_file(self):
with contextlib.nested(
mock.patch('os.path.exists'),
mock.patch('__builtin__.open'),
mock.patch('neutron.agent.linux.utils.execute')
) as (path_exists, mock_open, mock_execute):
file_mock = mock.MagicMock()
mock_open.return_value = file_mock
file_mock.__enter__.return_value = file_mock
file_mock.__iter__.return_value = iter(['123'])
path_exists.return_value = False
namespace_driver.kill_pids_in_file('sudo', 'test_path')
path_exists.assert_called_once_with('test_path')
self.assertFalse(mock_open.called)
self.assertFalse(mock_execute.called)
path_exists.return_value = True
mock_execute.side_effect = RuntimeError
namespace_driver.kill_pids_in_file('sudo', 'test_path')
mock_execute.assert_called_once_with(
['kill', '-9', '123'], 'sudo')
def test_get_state_file_path(self):
with mock.patch('os.makedirs') as mkdir:
path = self.driver._get_state_file_path('pool_id', 'conf')
self.assertEqual('/the/path/pool_id/conf', path)
mkdir.assert_called_once_with('/the/path/pool_id', 0o755)

View File

@ -0,0 +1,361 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import mock
from neutron.common import exceptions
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.haproxy import (
plugin_driver
)
from neutron.tests import base
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp()
# create another API instance to make testing easier
# pass a mock to our API instance
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = plugin_driver.LoadBalancerCallbacks(
self.plugin_instance
)
def test_get_ready_devices(self):
with self.vip() as vip:
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in xrange(0, 3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 2)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertNotIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_inactive(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
self.assertRaises(
exceptions.Invalid,
self.callbacks.get_logical_device,
context.get_admin_context(),
pool['pool']['id'],
activate=False
)
def test_get_logical_device_activate(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': []
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'], activate=True
)
self.assertEqual(logical_config, expected)
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in expected.iteritems():
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.addCleanup(mock.patch.stopall)
self.api = plugin_driver.LoadBalancerAgentApi('topic', 'host')
self.mock_cast = mock.patch.object(self.api, 'cast').start()
self.mock_msg = mock.patch.object(self.api, 'make_msg').start()
def test_init(self):
self.assertEqual(self.api.topic, 'topic')
self.assertEqual(self.api.host, 'host')
def _call_test_helper(self, method_name):
rv = getattr(self.api, method_name)(mock.sentinel.context, 'the_id')
self.assertEqual(rv, self.mock_cast.return_value)
self.mock_cast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic'
)
self.mock_msg.assert_called_once_with(
method_name,
pool_id='the_id',
host='host'
)
def test_reload_pool(self):
self._call_test_helper('reload_pool')
def test_destroy_pool(self):
self._call_test_helper('destroy_pool')
def test_modify_pool(self):
self._call_test_helper('modify_pool')
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(plugin_driver, 'LOG')
api_cls = mock.patch.object(plugin_driver,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.addCleanup(mock.patch.stopall)
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id']
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.reset_mock()
ctx = context.get_admin_context()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id']
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip:
self.mock_api.reset_mock()
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
self.mock_api.destroy_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id']
)
def test_update_health_monitor_associated_with_pool(self):
with self.health_monitor(type='HTTP') as monitor:
with self.pool() as pool:
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY,
pool['pool']['id']
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY,
pool['pool']['id']
)
# TODO(obondarev): improve plugin_driver test coverage (bug 1191007)

View File

@ -0,0 +1,504 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the spec
import copy
import mock
from oslo.config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class LoadBalancerTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
loadbalancer.RESOURCE_ATTRIBUTE_MAP)
return loadbalancer.Loadbalancer.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class LoadBalancerExtensionTestCase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
super(LoadBalancerExtensionTestCase, self).setUp()
plugin = 'neutron.extensions.loadbalancer.LoadBalancerPluginBase'
# Ensure 'stale' patched copies of the plugin are never returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', test_api_v2.etcdir('neutron.conf.test')]
config.parse(args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.get_plugin_type.return_value = constants.LOADBALANCER
ext_mgr = LoadBalancerTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
super(LoadBalancerExtensionTestCase, self).setUp()
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
super(LoadBalancerExtensionTestCase, self).tearDown()
def test_vip_create(self):
vip_id = _uuid()
data = {'vip': {'name': 'vip1',
'description': 'descr_vip1',
'subnet_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'protocol': 'HTTP',
'pool_id': _uuid(),
'session_persistence': {'type': 'HTTP_COOKIE'},
'connection_limit': 100,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['vip'])
return_value.update({'status': "ACTIVE", 'id': vip_id})
instance = self.plugin.return_value
instance.create_vip.return_value = return_value
res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vip.assert_called_with(mock.ANY,
vip=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_list(self):
vip_id = _uuid()
return_value = [{'name': 'vip1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': vip_id}]
instance = self.plugin.return_value
instance.get_vips.return_value = return_value
res = self.api.get(_get_path('lb/vips', fmt=self.fmt))
instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_vip_update(self):
vip_id = _uuid()
update_data = {'vip': {'admin_state_up': False}}
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.update_vip.return_value = return_value
res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_vip.assert_called_with(mock.ANY, vip_id,
vip=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_get(self):
vip_id = _uuid()
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.get_vip.return_value = return_value
res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt))
instance.get_vip.assert_called_with(mock.ANY, vip_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def _test_entity_delete(self, entity):
"""Does the entity deletion based on naming convention."""
entity_id = _uuid()
res = self.api.delete(_get_path('lb/' + entity + 's', id=entity_id,
fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_vip_delete(self):
self._test_entity_delete('vip')
def test_pool_create(self):
pool_id = _uuid()
hm_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'subnet_id': _uuid(),
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': [hm_id],
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['pool'])
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lb/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_stats(self):
pool_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lb/pools', id=pool_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, pool_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(res['stats'], stats['stats'])
def test_member_create(self):
member_id = _uuid()
data = {'member': {'pool_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'status': "ACTIVE", 'id': member_id})
instance = self.plugin.return_value
instance.create_member.return_value = return_value
res = self.api.post(_get_path('lb/members', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_member.assert_called_with(mock.ANY,
member=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_members.return_value = return_value
res = self.api.get(_get_path('lb/members', fmt=self.fmt))
instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_member_update(self):
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.update_member.return_value = return_value
res = self.api.put(_get_path('lb/members', id=member_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_member.assert_called_with(mock.ANY, member_id,
member=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.get_member.return_value = return_value
res = self.api.get(_get_path('lb/members', id=member_id,
fmt=self.fmt))
instance.get_member.assert_called_with(mock.ANY, member_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_delete(self):
self._test_entity_delete('member')
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
instance = self.plugin.return_value
instance.create_health_monitor.return_value = return_value
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_health_monitor.assert_called_with(mock.ANY,
health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_health_monitors.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt))
instance.get_health_monitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'health_monitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_health_monitor.return_value = return_value
res = self.api.put(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, health_monitor=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_health_monitor.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_delete(self):
self._test_entity_delete('health_monitor')
def test_create_pool_health_monitor(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'id': health_monitor_id,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
instance = self.plugin.return_value
instance.create_pool_health_monitor.return_value = return_value
res = self.api.post('/lb/pools/id1/health_monitors',
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool_health_monitor.assert_called_with(
mock.ANY, pool_id='id1', health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_delete_pool_health_monitor(self):
health_monitor_id = _uuid()
res = self.api.delete('/lb/pools/id1/health_monitors/%s' %
health_monitor_id)
instance = self.plugin.return_value
instance.delete_pool_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, pool_id='id1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
class LoadBalancerExtensionTestCaseXML(LoadBalancerExtensionTestCase):
fmt = 'xml'

View File

@ -30,4 +30,4 @@ module=timeutils
module=uuidutils
# The base module to hold the copy of openstack.common
base=quantum
base=neutron

View File

@ -12,7 +12,7 @@ httplib2
iso8601>=0.1.4
kombu>=1.0.4
netaddr
python-quantumclient>=2.2.0,<3.0.0
python-neutronclient>=2.2.3,<3.0.0
pyudev
sqlalchemy>=0.7.8,<=0.7.99
WebOb>=1.2

132
setup.cfg
View File

@ -1,5 +1,5 @@
[metadata]
name = quantum
name = neutron
version = 2013.2
summary = OpenStack Networking
description-file =
@ -20,73 +20,93 @@ classifier =
[files]
packages =
neutron
quantum
data_files =
etc/quantum =
etc/neutron =
etc/api-paste.ini
etc/dhcp_agent.ini
etc/l3_agent.ini
etc/lbaas_agent.ini
etc/metadata_agent.ini
etc/policy.json
etc/quantum.conf
etc/neutron.conf
etc/rootwrap.conf
etc/quantum/rootwrap.d =
etc/quantum/rootwrap.d/dhcp.filters
etc/quantum/rootwrap.d/iptables-firewall.filters
etc/quantum/rootwrap.d/l3.filters
etc/quantum/rootwrap.d/lbaas-haproxy.filters
etc/quantum/rootwrap.d/linuxbridge-plugin.filters
etc/quantum/rootwrap.d/nec-plugin.filters
etc/quantum/rootwrap.d/openvswitch-plugin.filters
etc/quantum/rootwrap.d/ryu-plugin.filters
etc/init.d = etc/init.d/quantum-server
etc/quantum/plugins/bigswitch = etc/quantum/plugins/bigswitch/restproxy.ini
etc/quantum/plugins/brocade = etc/quantum/plugins/brocade/brocade.ini
etc/quantum/plugins/cisco = etc/quantum/plugins/cisco/cisco_plugins.ini
etc/quantum/plugins/hyperv = etc/quantum/plugins/hyperv/hyperv_quantum_plugin.ini
etc/quantum/plugins/linuxbridge = etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini
etc/quantum/plugins/metaplugin = etc/quantum/plugins/metaplugin/metaplugin.ini
etc/quantum/plugins/midonet = etc/quantum/plugins/midonet/midonet.ini
etc/quantum/plugins/ml2 = etc/quantum/plugins/ml2/ml2_conf.ini
etc/quantum/plugins/mlnx = etc/quantum/plugins/mlnx/mlnx_conf.ini
etc/quantum/plugins/nec = etc/quantum/plugins/nec/nec.ini
etc/quantum/plugins/nicira = etc/quantum/plugins/nicira/nvp.ini
etc/quantum/plugins/openvswitch = etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
etc/quantum/plugins/plumgrid = etc/quantum/plugins/plumgrid/plumgrid.ini
etc/quantum/plugins/ryu = etc/quantum/plugins/ryu/ryu.ini
etc/neutron/rootwrap.d =
etc/neutron/rootwrap.d/dhcp.filters
etc/neutron/rootwrap.d/iptables-firewall.filters
etc/neutron/rootwrap.d/l3.filters
etc/neutron/rootwrap.d/lbaas-haproxy.filters
etc/neutron/rootwrap.d/linuxbridge-plugin.filters
etc/neutron/rootwrap.d/nec-plugin.filters
etc/neutron/rootwrap.d/openvswitch-plugin.filters
etc/neutron/rootwrap.d/ryu-plugin.filters
etc/init.d = etc/init.d/neutron-server
etc/neutron/plugins/bigswitch = etc/neutron/plugins/bigswitch/restproxy.ini
etc/neutron/plugins/brocade = etc/neutron/plugins/brocade/brocade.ini
etc/neutron/plugins/cisco = etc/neutron/plugins/cisco/cisco_plugins.ini
etc/neutron/plugins/hyperv = etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
etc/neutron/plugins/linuxbridge = etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini
etc/neutron/plugins/metaplugin = etc/neutron/plugins/metaplugin/metaplugin.ini
etc/neutron/plugins/midonet = etc/neutron/plugins/midonet/midonet.ini
etc/neutron/plugins/ml2 = etc/neutron/plugins/ml2/ml2_conf.ini
etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini
etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini
etc/neutron/plugins/nicira = etc/neutron/plugins/nicira/nvp.ini
etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini
etc/neutron/plugins/ryu = etc/neutron/plugins/ryu/ryu.ini
scripts =
bin/quantum-rootwrap
bin/neutron-rootwrap
[global]
setup-hooks =
pbr.hooks.setup_hook
quantum.hooks.setup_hook
neutron.hooks.setup_hook
[entry_points]
console_scripts =
quantum-check-nvp-config = quantum.plugins.nicira.check_nvp_config:main
quantum-db-manage = quantum.db.migration.cli:main
quantum-debug = quantum.debug.shell:main
quantum-dhcp-agent = quantum.agent.dhcp_agent:main
quantum-dhcp-agent-dnsmasq-lease-update = quantum.agent.linux.dhcp:Dnsmasq.lease_update
quantum-hyperv-agent = quantum.plugins.hyperv.agent.hyperv_quantum_agent:main
quantum-l3-agent = quantum.agent.l3_agent:main
quantum-lbaas-agent = quantum.services.loadbalancer.drivers.haproxy.agent:main
quantum-linuxbridge-agent = quantum.plugins.linuxbridge.agent.linuxbridge_quantum_agent:main
quantum-metadata-agent = quantum.agent.metadata.agent:main
quantum-mlnx-agent = quantum.plugins.mlnx.agent.eswitch_quantum_agent:main
quantum-nec-agent = quantum.plugins.nec.agent.nec_quantum_agent:main
quantum-netns-cleanup = quantum.agent.netns_cleanup_util:main
quantum-ns-metadata-proxy = quantum.agent.metadata.namespace_proxy:main
quantum-openvswitch-agent = quantum.plugins.openvswitch.agent.ovs_quantum_agent:main
quantum-ovs-cleanup = quantum.agent.ovs_cleanup_util:main
quantum-ryu-agent = quantum.plugins.ryu.agent.ryu_quantum_agent:main
quantum-server = quantum.server:main
quantum.ml2.type_drivers =
flat = quantum.plugins.ml2.drivers.type_flat:FlatTypeDriver
local = quantum.plugins.ml2.drivers.type_local:LocalTypeDriver
vlan = quantum.plugins.ml2.drivers.type_vlan:VlanTypeDriver
neutron-check-nvp-config = neutron.plugins.nicira.check_nvp_config:main
neutron-db-manage = neutron.db.migration.cli:main
neutron-debug = neutron.debug.shell:main
neutron-dhcp-agent = neutron.agent.dhcp_agent:main
neutron-dhcp-agent-dnsmasq-lease-update = neutron.agent.linux.dhcp:Dnsmasq.lease_update
neutron-hyperv-agent = neutron.plugins.hyperv.agent.hyperv_neutron_agent:main
neutron-l3-agent = neutron.agent.l3_agent:main
neutron-lbaas-agent = neutron.services.loadbalancer.drivers.haproxy.agent:main
neutron-linuxbridge-agent = neutron.plugins.linuxbridge.agent.linuxbridge_neutron_agent:main
neutron-metadata-agent = neutron.agent.metadata.agent:main
neutron-mlnx-agent = neutron.plugins.mlnx.agent.eswitch_neutron_agent:main
neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main
neutron-netns-cleanup = neutron.agent.netns_cleanup_util:main
neutron-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main
neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main
neutron-ovs-cleanup = neutron.agent.ovs_cleanup_util:main
neutron-ryu-agent = neutron.plugins.ryu.agent.ryu_neutron_agent:main
neutron-server = neutron.server:main
quantum-check-nvp-config = neutron.plugins.nicira.check_nvp_config:main
quantum-db-manage = neutron.db.migration.cli:main
quantum-debug = neutron.debug.shell:main
quantum-dhcp-agent = neutron.agent.dhcp_agent:main
quantum-dhcp-agent-dnsmasq-lease-update = neutron.agent.linux.dhcp:Dnsmasq.lease_update
quantum-hyperv-agent = neutron.plugins.hyperv.agent.hyperv_neutron_agent:main
quantum-l3-agent = neutron.agent.l3_agent:main
quantum-lbaas-agent = neutron.services.loadbalancer.drivers.haproxy.agent:main
quantum-linuxbridge-agent = neutron.plugins.linuxbridge.agent.linuxbridge_neutron_agent:main
quantum-metadata-agent = neutron.agent.metadata.agent:main
quantum-mlnx-agent = neutron.plugins.mlnx.agent.eswitch_neutron_agent:main
quantum-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main
quantum-netns-cleanup = neutron.agent.netns_cleanup_util:main
quantum-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main
quantum-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main
quantum-ovs-cleanup = neutron.agent.ovs_cleanup_util:main
quantum-ryu-agent = neutron.plugins.ryu.agent.ryu_neutron_agent:main
quantum-server = neutron.server:main
neutron.ml2.type_drivers =
flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
[build_sphinx]
all_files = 1
@ -96,13 +116,13 @@ source-dir = doc/source
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = quantum/locale/quantum.pot
output_file = neutron/locale/neutron.pot
[compile_catalog]
directory = quantum/locale
domain = quantum
directory = neutron/locale
domain = neutron
[update_catalog]
domain = quantum
output_dir = quantum/locale
input_file = quantum/locale/quantum.pot
domain = neutron
output_dir = neutron/locale
input_file = neutron/locale/neutron.pot

View File

@ -2,4 +2,4 @@
rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
rm -rf */*.deb
rm -rf ./plugins/**/build/ ./plugins/**/dist
rm -rf ./plugins/**/lib/quantum_*_plugin.egg-info ./plugins/quantum-*
rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*

View File

@ -92,6 +92,6 @@ msg_format_checkers = [
]
file_black_list = ["./quantum/tests/unit",
"./quantum/openstack",
file_black_list = ["./neutron/tests/unit",
"./neutron/openstack",
"./quantum/plugins/bigswitch/tests"]

View File

@ -21,7 +21,7 @@
# under the License.
"""
Installation script for Quantum's development virtualenv
Installation script for Neutron's development virtualenv
"""
import os
@ -33,12 +33,12 @@ import install_venv_common as install_venv
def print_help():
help = """
Quantum development environment setup is complete.
Neutron development environment setup is complete.
Quantum development uses virtualenv to track and manage Python dependencies
Neutron development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Quantum virtualenv for the extent of your current shell
To activate the Neutron virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
@ -59,7 +59,7 @@ def main(argv):
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Quantum'
project = 'Neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)

View File

@ -40,7 +40,7 @@ def main(argv):
os.path.join(root, 'tools', 'test-requires'),
])
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'quantum'
project = 'neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
#NOTE(dprince): For Tox we only run post_process (which patches files, etc)

View File

@ -19,7 +19,7 @@ commands =
flake8
[testenv:i18n]
commands = python ./tools/check_i18n.py ./quantum ./tools/i18n_cfg.py
commands = python ./tools/check_i18n.py ./neutron ./tools/i18n_cfg.py
[testenv:cover]
commands =