Nuke lbaas v1

v1 is not going to ship in Newton, the deprecation notice
been around for two cycles, and there is now a migration
script.

Also includes: Fix alembic migration env

The include_object() checking was missing, and also the inclusion
of all lbaas DB models

Co-Authored-By: Henry Gessau <gessau@gmail.com>
Co-Authored-By: Adam Harwell <flux.adam@gmail.com>
Change-Id: I506949e75bc62681412358ba689cb07b16311b68
This commit is contained in:
Doug Wiegley 2016-02-29 21:04:37 -08:00
parent b0741bf319
commit 7aa3d9ff9c
90 changed files with 115 additions and 15830 deletions

View File

@ -24,27 +24,13 @@ function neutron_agent_lbaas_install_agent_packages {
}
function neutron_lbaas_configure_common {
if is_service_enabled $LBAAS_V1 && is_service_enabled $LBAAS_V2; then
die $LINENO "Do not enable both Version 1 and Version 2 of LBaaS."
fi
cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_LBAAS_CONF
if is_service_enabled $LBAAS_V1; then
inicomment $NEUTRON_LBAAS_CONF service_providers service_provider
iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV1
elif is_service_enabled $LBAAS_V2; then
inicomment $NEUTRON_LBAAS_CONF service_providers service_provider
iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV2
fi
inicomment $NEUTRON_LBAAS_CONF service_providers service_provider
iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV2
if is_service_enabled $LBAAS_V1; then
_neutron_service_plugin_class_add $LBAASV1_PLUGIN
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
elif is_service_enabled $LBAAS_V2; then
_neutron_service_plugin_class_add $LBAASV2_PLUGIN
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
_neutron_service_plugin_class_add $LBAASV2_PLUGIN
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
# Ensure config is set up properly for authentication neutron-lbaas
iniset $NEUTRON_LBAAS_CONF service_auth auth_url $AUTH_URL
@ -92,17 +78,12 @@ function neutron_lbaas_generate_config_files {
function neutron_lbaas_start {
local is_run_process=True
if is_service_enabled $LBAAS_V1; then
LBAAS_VERSION="q-lbaas"
AGENT_LBAAS_BINARY=${AGENT_LBAASV1_BINARY}
elif is_service_enabled $LBAAS_V2; then
LBAAS_VERSION="q-lbaasv2"
AGENT_LBAAS_BINARY=${AGENT_LBAASV2_BINARY}
# Octavia doesn't need the LBaaS V2 service running. If Octavia is the
# only provider then don't run the process.
if [[ "$NEUTRON_LBAAS_SERVICE_PROVIDERV2" == "$NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA" ]]; then
is_run_process=False
fi
LBAAS_VERSION="q-lbaasv2"
AGENT_LBAAS_BINARY=${AGENT_LBAASV2_BINARY}
# Octavia doesn't need the LBaaS V2 service running. If Octavia is the
# only provider then don't run the process.
if [[ "$NEUTRON_LBAAS_SERVICE_PROVIDERV2" == "$NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA" ]]; then
is_run_process=False
fi
if [[ "$is_run_process" == "True" ]] ; then

View File

@ -1,14 +1,11 @@
# settings for LBaaS devstack pluginlib/neutron_plugins/services/loadbalancer
# For backward compatibility, treat q-lbaas as lbaas v1.
# In the future, the q-lbaas may default to q-lbaasv2
AGENT_LBAASV1_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
AGENT_LBAASV2_BINARY="$NEUTRON_BIN_DIR/neutron-lbaasv2-agent"
LBAAS_V1="q-lbaas"
LBAAS_V2="q-lbaasv2"
LBAAS_ANY="$LBAAS_V1 $LBAAS_V2"
LBAAS_ANY="$LBAAS_V2"
BARBICAN="barbican-svc"
@ -21,7 +18,6 @@ AUTH_VERSION=${AUTH_VERSION:-"2"}
LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
LBAAS_AGENT_CONF_FILENAME=$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini
LBAASV1_PLUGIN=${LBAASV1_PLUGIN:-"neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin"}
LBAASV2_PLUGIN=${LBAASV2_PLUGIN:-"neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2"}
NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
@ -29,5 +25,4 @@ NEUTRON_LBAAS_CONF=$NEUTRON_CONF_DIR/neutron_lbaas.conf
NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA=${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"}
NEUTRON_LBAAS_SERVICE_PROVIDERV1=${NEUTRON_LBAAS_SERVICE_PROVIDERV1:-"LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"}
NEUTRON_LBAAS_SERVICE_PROVIDERV2=${NEUTRON_LBAAS_SERVICE_PROVIDERV2:-${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA}}

View File

@ -1,17 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lbaas.services.loadbalancer.agent import agent
def main():
agent.main()

View File

@ -1,873 +0,0 @@
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import api as db_api
from neutron.db import common_db_mixin as base_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db as st_db
from neutron import manager
from neutron.plugins.common import constants
from neutron_lib import constants as n_constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception
from oslo_utils import excutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import validates
from neutron_lbaas._i18n import _, _LE
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.services.loadbalancer import constants as lb_const
class SessionPersistence(model_base.BASEV2):
vip_id = sa.Column(sa.String(36),
sa.ForeignKey("vips.id"),
primary_key=True)
type = sa.Column(sa.Enum("SOURCE_IP",
"HTTP_COOKIE",
"APP_COOKIE",
name="sesssionpersistences_type"),
nullable=False)
cookie_name = sa.Column(sa.String(1024))
class PoolStatistics(model_base.BASEV2):
"""Represents pool statistics."""
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
primary_key=True)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
@validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class Vip(model_base.BASEV2, model_base.HasId, model_base.HasProject,
model_base.HasStatusDescription):
"""Represents a v2 neutron loadbalancer vip."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
protocol_port = sa.Column(sa.Integer, nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
pool_id = sa.Column(sa.String(36), nullable=False, unique=True)
session_persistence = orm.relationship(SessionPersistence,
uselist=False,
backref="vips",
cascade="all, delete-orphan")
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
connection_limit = sa.Column(sa.Integer)
port = orm.relationship(models_v2.Port)
class Member(model_base.BASEV2, model_base.HasId, model_base.HasProject,
model_base.HasStatusDescription):
"""Represents a v2 neutron loadbalancer member."""
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_member0pool_id0address0port'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
class Pool(model_base.BASEV2, model_base.HasId, model_base.HasProject,
model_base.HasStatusDescription):
"""Represents a v2 neutron loadbalancer pool."""
vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id"))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
subnet_id = sa.Column(sa.String(36), nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
lb_method = sa.Column(sa.Enum("ROUND_ROBIN",
"LEAST_CONNECTIONS",
"SOURCE_IP",
name="pools_lb_method"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
stats = orm.relationship(PoolStatistics,
uselist=False,
backref="pools",
cascade="all, delete-orphan")
members = orm.relationship(Member, backref="pools",
cascade="all, delete-orphan")
monitors = orm.relationship("PoolMonitorAssociation", backref="pools",
cascade="all, delete-orphan")
vip = orm.relationship(Vip, backref='pool')
provider = orm.relationship(
st_db.ProviderResourceAssociation,
uselist=False,
lazy="joined",
primaryjoin="Pool.id==ProviderResourceAssociation.resource_id",
foreign_keys=[st_db.ProviderResourceAssociation.resource_id]
)
class HealthMonitor(model_base.BASEV2, model_base.HasId,
model_base.HasProject):
"""Represents a v2 neutron loadbalancer healthmonitor."""
type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmontiors_type"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16))
url_path = sa.Column(sa.String(255))
expected_codes = sa.Column(sa.String(64))
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
pools = orm.relationship(
"PoolMonitorAssociation", backref="healthmonitor",
cascade="all", lazy="joined"
)
class PoolMonitorAssociation(model_base.BASEV2,
model_base.HasStatusDescription):
"""Many-to-many association between pool and healthMonitor classes."""
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id"),
primary_key=True)
monitor_id = sa.Column(sa.String(36),
sa.ForeignKey("healthmonitors.id"),
primary_key=True)
class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase,
base_db.CommonDbMixin):
"""Wraps loadbalancer with SQLAlchemy models.
A class that wraps the implementation of the Neutron loadbalancer
plugin database access interface using SQLAlchemy models.
"""
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def update_status(self, context, model, id, status,
status_description=None):
with context.session.begin(subtransactions=True):
if issubclass(model, Vip):
try:
v_db = (self._model_query(context, model).
filter(model.id == id).
options(orm.noload('port')).
one())
except exc.NoResultFound:
raise loadbalancer.VipNotFound(vip_id=id)
else:
v_db = self._get_resource(context, model, id)
if v_db.status != status:
v_db.status = status
# update status_description in two cases:
# - new value is passed
# - old value is not None (needs to be updated anyway)
if status_description or v_db['status_description']:
v_db.status_description = status_description
def _get_resource(self, context, model, id):
try:
r = self._get_by_id(context, model, id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, Vip):
raise loadbalancer.VipNotFound(vip_id=id)
elif issubclass(model, Pool):
raise loadbalancer.PoolNotFound(pool_id=id)
elif issubclass(model, Member):
raise loadbalancer.MemberNotFound(member_id=id)
elif issubclass(model, HealthMonitor):
raise loadbalancer.HealthMonitorNotFound(monitor_id=id)
ctx.reraise = True
return r
def assert_modification_allowed(self, obj):
status = getattr(obj, 'status', None)
if status == constants.PENDING_DELETE:
id = getattr(obj, 'id', None)
raise loadbalancer.StateInvalid(id=id, state=status)
########################################################
# VIP DB access
def _make_vip_dict(self, vip, fields=None):
fixed_ip = {}
# it's possible that vip doesn't have created port yet
if vip.port:
fixed_ip = (vip.port.fixed_ips or [{}])[0]
res = {'id': vip['id'],
'tenant_id': vip['tenant_id'],
'name': vip['name'],
'description': vip['description'],
'subnet_id': fixed_ip.get('subnet_id'),
'address': fixed_ip.get('ip_address'),
'port_id': vip['port_id'],
'protocol_port': vip['protocol_port'],
'protocol': vip['protocol'],
'pool_id': vip['pool_id'],
'session_persistence': None,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up'],
'status': vip['status'],
'status_description': vip['status_description']}
if vip['session_persistence']:
s_p = {
'type': vip['session_persistence']['type']
}
if vip['session_persistence']['type'] == 'APP_COOKIE':
s_p['cookie_name'] = vip['session_persistence']['cookie_name']
res['session_persistence'] = s_p
return self._fields(res, fields)
def _check_session_persistence_info(self, info):
"""Performs sanity check on session persistence info.
:param info: Session persistence info
"""
if info['type'] == 'APP_COOKIE':
if not info.get('cookie_name'):
raise ValueError(_("'cookie_name' should be specified for this"
" type of session persistence."))
else:
if 'cookie_name' in info:
raise ValueError(_("'cookie_name' is not allowed for this type"
" of session persistence"))
def _create_session_persistence_db(self, session_info, vip_id):
self._check_session_persistence_info(session_info)
sesspersist_db = SessionPersistence(
type=session_info['type'],
cookie_name=session_info.get('cookie_name'),
vip_id=vip_id)
return sesspersist_db
def _update_vip_session_persistence(self, context, vip_id, info):
self._check_session_persistence_info(info)
vip = self._get_resource(context, Vip, vip_id)
with context.session.begin(subtransactions=True):
# Update sessionPersistence table
sess_qry = context.session.query(SessionPersistence)
sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first()
# Insert a None cookie_info if it is not present to overwrite an
# an existing value in the database.
if 'cookie_name' not in info:
info['cookie_name'] = None
if sesspersist_db:
sesspersist_db.update(info)
else:
sesspersist_db = SessionPersistence(
type=info['type'],
cookie_name=info['cookie_name'],
vip_id=vip_id)
context.session.add(sesspersist_db)
# Update vip table
vip.session_persistence = sesspersist_db
context.session.add(vip)
def _delete_session_persistence(self, context, vip_id):
with context.session.begin(subtransactions=True):
sess_qry = context.session.query(SessionPersistence)
sess_qry.filter_by(vip_id=vip_id).delete()
def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address):
# resolve subnet and create port
subnet = self._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != n_constants.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
if subnet.get('gateway_ip') == ip_address:
raise n_exc.IpAddressInUse(net_id=subnet['network_id'],
ip_address=ip_address)
port_data = {
'tenant_id': vip_db.tenant_id,
'name': 'vip-' + vip_db.id,
'network_id': subnet['network_id'],
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': n_constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [fixed_ip]
}
port = self._core_plugin.create_port(context, {'port': port_data})
vip_db.port_id = port['id']
# explicitly sync session with db
context.session.flush()
def create_vip(self, context, vip):
v = vip['vip']
tenant_id = v['tenant_id']
with context.session.begin(subtransactions=True):
if v['pool_id']:
pool = self._get_resource(context, Pool, v['pool_id'])
# validate that the pool has same tenant
if pool['tenant_id'] != tenant_id:
raise n_exc.NotAuthorized()
# validate that the pool has same protocol
if pool['protocol'] != v['protocol']:
raise loadbalancer.ProtocolMismatch(
vip_proto=v['protocol'],
pool_proto=pool['protocol'])
if pool['status'] == constants.PENDING_DELETE:
raise loadbalancer.StateInvalid(state=pool['status'],
id=pool['id'])
vip_db = Vip(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=v['name'],
description=v['description'],
port_id=None,
protocol_port=v['protocol_port'],
protocol=v['protocol'],
pool_id=v['pool_id'],
connection_limit=v['connection_limit'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
session_info = v['session_persistence']
if session_info:
s_p = self._create_session_persistence_db(
session_info,
vip_db['id'])
vip_db.session_persistence = s_p
try:
context.session.add(vip_db)
context.session.flush()
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
try:
# create a port to reserve address for IPAM
# do it outside the transaction to avoid rpc calls
self._create_port_for_vip(
context, vip_db, v['subnet_id'], v.get('address'))
except Exception:
# catch any kind of exceptions
with excutils.save_and_reraise_exception():
context.session.delete(vip_db)
context.session.flush()
if v['pool_id']:
# fetching pool again
pool = self._get_resource(context, Pool, v['pool_id'])
# (NOTE): we rely on the fact that pool didn't change between
# above block and here
vip_db['pool_id'] = v['pool_id']
pool['vip_id'] = vip_db['id']
# explicitly flush changes as we're outside any transaction
context.session.flush()
return self._make_vip_dict(vip_db)
def update_vip(self, context, id, vip):
v = vip['vip']
sess_persist = v.pop('session_persistence', None)
with context.session.begin(subtransactions=True):
vip_db = self._get_resource(context, Vip, id)
self.assert_modification_allowed(vip_db)
if sess_persist:
self._update_vip_session_persistence(context, id, sess_persist)
else:
self._delete_session_persistence(context, id)
if v:
try:
# in case new pool already has a vip
# update will raise integrity error at first query
old_pool_id = vip_db['pool_id']
vip_db.update(v)
# If the pool_id is changed, we need to update
# the associated pools
if 'pool_id' in v:
new_pool = self._get_resource(context, Pool,
v['pool_id'])
self.assert_modification_allowed(new_pool)
# check that the pool matches the tenant_id
if new_pool['tenant_id'] != vip_db['tenant_id']:
raise n_exc.NotAuthorized()
# validate that the pool has same protocol
if new_pool['protocol'] != vip_db['protocol']:
raise loadbalancer.ProtocolMismatch(
vip_proto=vip_db['protocol'],
pool_proto=new_pool['protocol'])
if new_pool['status'] == constants.PENDING_DELETE:
raise loadbalancer.StateInvalid(
state=new_pool['status'],
id=new_pool['id'])
if old_pool_id:
old_pool = self._get_resource(
context,
Pool,
old_pool_id
)
old_pool['vip_id'] = None
new_pool['vip_id'] = vip_db['id']
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
return self._make_vip_dict(vip_db)
def delete_vip(self, context, id):
with context.session.begin(subtransactions=True):
vip = self._get_resource(context, Vip, id)
qry = context.session.query(Pool)
for pool in qry.filter_by(vip_id=id):
pool.update({"vip_id": None})
context.session.delete(vip)
if vip.port: # this is a Neutron port
self._delete_vip_port(context, vip.port.id)
@db_api.retry_db_errors
def _delete_vip_port(self, context, vip_port_id):
self._core_plugin.delete_port(context, vip_port_id)
def prevent_lbaas_port_deletion(self, context, port_id):
try:
port_db = self._core_plugin._get_port(context, port_id)
except n_exc.PortNotFound:
return
# Check only if the owner is loadbalancer.
if port_db['device_owner'] == n_constants.DEVICE_OWNER_LOADBALANCER:
filters = {'port_id': [port_id]}
if len(self.get_vips(context, filters=filters)) > 0:
reason = _('has device owner %s') % port_db['device_owner']
raise n_exc.ServicePortInUse(port_id=port_db['id'],
reason=reason)
def subscribe(self):
registry.subscribe(
_prevent_lbaas_port_delete_callback, resources.PORT,
events.BEFORE_DELETE)
def get_vip(self, context, id, fields=None):
vip = self._get_resource(context, Vip, id)
return self._make_vip_dict(vip, fields)
def get_vips(self, context, filters=None, fields=None):
return self._get_collection(context, Vip,
self._make_vip_dict,
filters=filters, fields=fields)
########################################################
# Pool DB access
def _make_pool_dict(self, pool, fields=None):
res = {'id': pool['id'],
'tenant_id': pool['tenant_id'],
'name': pool['name'],
'description': pool['description'],
'subnet_id': pool['subnet_id'],
'protocol': pool['protocol'],
'vip_id': pool['vip_id'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up'],
'status': pool['status'],
'status_description': pool['status_description'],
'provider': ''
}
if pool.provider:
res['provider'] = pool.provider.provider_name
# Get the associated members
res['members'] = [member['id'] for member in pool['members']]
# Get the associated health_monitors
res['health_monitors'] = [
monitor['monitor_id'] for monitor in pool['monitors']]
res['health_monitors_status'] = [
{'monitor_id': monitor['monitor_id'],
'status': monitor['status'],
'status_description': monitor['status_description']}
for monitor in pool['monitors']]
return self._fields(res, fields)
def update_pool_stats(self, context, pool_id, data=None):
"""Update a pool with new stats structure."""
data = data or {}
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, pool_id)
self.assert_modification_allowed(pool_db)
pool_db.stats = self._create_pool_stats(context, pool_id, data)
for member, stats in data.get('members', {}).items():
stats_status = stats.get(lb_const.STATS_STATUS)
if stats_status:
self.update_status(context, Member, member, stats_status)
def _create_pool_stats(self, context, pool_id, data=None):
# This is internal method to add pool statistics. It won't
# be exposed to API
if not data:
data = {}
stats_db = PoolStatistics(
pool_id=pool_id,
bytes_in=data.get(lb_const.STATS_IN_BYTES, 0),
bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0),
active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0),
total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0)
)
return stats_db
def _delete_pool_stats(self, context, pool_id):
# This is internal method to delete pool statistics. It won't
# be exposed to API
with context.session.begin(subtransactions=True):
stats_qry = context.session.query(PoolStatistics)
try:
stats = stats_qry.filter_by(pool_id=pool_id).one()
except exc.NoResultFound:
raise loadbalancer.PoolStatsNotFound(pool_id=pool_id)
context.session.delete(stats)
def create_pool(self, context, pool):
v = pool['pool']
with context.session.begin(subtransactions=True):
pool_db = Pool(id=uuidutils.generate_uuid(),
tenant_id=v['tenant_id'],
name=v['name'],
description=v['description'],
subnet_id=v['subnet_id'],
protocol=v['protocol'],
lb_method=v['lb_method'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
pool_db.stats = self._create_pool_stats(context, pool_db['id'])
context.session.add(pool_db)
return self._make_pool_dict(pool_db)
def update_pool(self, context, id, pool):
p = pool['pool']
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, id)
self.assert_modification_allowed(pool_db)
if p:
pool_db.update(p)
return self._make_pool_dict(pool_db)
def _ensure_pool_delete_conditions(self, context, pool_id):
if context.session.query(Vip).filter_by(pool_id=pool_id).first():
raise loadbalancer.PoolInUse(pool_id=pool_id)
def delete_pool(self, context, pool_id):
# Check if the pool is in use
self._ensure_pool_delete_conditions(context, pool_id)
with context.session.begin(subtransactions=True):
self._delete_pool_stats(context, pool_id)
pool_db = self._get_resource(context, Pool, pool_id)
context.session.delete(pool_db)
def get_pool(self, context, id, fields=None):
pool = self._get_resource(context, Pool, id)
return self._make_pool_dict(pool, fields)
def get_pools(self, context, filters=None, fields=None):
collection = self._model_query(context, Pool)
collection = self._apply_filters_to_query(collection, Pool, filters)
return [self._make_pool_dict(c, fields)
for c in collection]
def stats(self, context, pool_id):
with context.session.begin(subtransactions=True):
pool = self._get_resource(context, Pool, pool_id)
stats = pool['stats']
res = {lb_const.STATS_IN_BYTES: stats['bytes_in'],
lb_const.STATS_OUT_BYTES: stats['bytes_out'],
lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'],
lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']}
return {'stats': res}
def create_pool_health_monitor(self, context, health_monitor, pool_id):
monitor_id = health_monitor['health_monitor']['id']
with context.session.begin(subtransactions=True):
# To make sure health_monitor exist.
self._get_resource(context, HealthMonitor, monitor_id)
assoc_qry = context.session.query(PoolMonitorAssociation)
assoc = assoc_qry.filter_by(pool_id=pool_id,
monitor_id=monitor_id).first()
if assoc:
raise loadbalancer.PoolMonitorAssociationExists(
monitor_id=monitor_id, pool_id=pool_id)
pool = self._get_resource(context, Pool, pool_id)
assoc = PoolMonitorAssociation(pool_id=pool_id,
monitor_id=monitor_id,
status=constants.PENDING_CREATE)
pool.monitors.append(assoc)
monitors = [monitor['monitor_id'] for monitor in pool['monitors']]
res = {"health_monitor": monitors}
return res
def delete_pool_health_monitor(self, context, id, pool_id):
with context.session.begin(subtransactions=True):
assoc = self._get_pool_health_monitor(context, id, pool_id)
pool = self._get_resource(context, Pool, pool_id)
pool.monitors.remove(assoc)
def _get_pool_health_monitor(self, context, id, pool_id):
try:
assoc_qry = context.session.query(PoolMonitorAssociation)
return assoc_qry.filter_by(monitor_id=id, pool_id=pool_id).one()
except exc.NoResultFound:
raise loadbalancer.PoolMonitorAssociationNotFound(
monitor_id=id, pool_id=pool_id)
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
pool_hm = self._get_pool_health_monitor(context, id, pool_id)
# need to add tenant_id for admin_or_owner policy check to pass
hm = self.get_health_monitor(context, id)
res = {'pool_id': pool_id,
'monitor_id': id,
'status': pool_hm['status'],
'status_description': pool_hm['status_description'],
'tenant_id': hm['tenant_id']}
return self._fields(res, fields)
def update_pool_health_monitor(self, context, id, pool_id,
status, status_description=None):
with context.session.begin(subtransactions=True):
assoc = self._get_pool_health_monitor(context, id, pool_id)
self.assert_modification_allowed(assoc)
assoc.status = status
assoc.status_description = status_description
########################################################
# Member DB access
def _make_member_dict(self, member, fields=None):
res = {'id': member['id'],
'tenant_id': member['tenant_id'],
'pool_id': member['pool_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up'],
'status': member['status'],
'status_description': member['status_description']}
return self._fields(res, fields)
def create_member(self, context, member):
v = member['member']
try:
with context.session.begin(subtransactions=True):
# ensuring that pool exists
self._get_resource(context, Pool, v['pool_id'])
member_db = Member(id=uuidutils.generate_uuid(),
tenant_id=v['tenant_id'],
pool_id=v['pool_id'],
address=v['address'],
protocol_port=v['protocol_port'],
weight=v['weight'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(member_db)
return self._make_member_dict(member_db)
except exception.DBDuplicateEntry:
raise loadbalancer.MemberExists(
address=v['address'],
port=v['protocol_port'],
pool=v['pool_id'])
def update_member(self, context, id, member):
v = member['member']
try:
with context.session.begin(subtransactions=True):
member_db = self._get_resource(context, Member, id)
self.assert_modification_allowed(member_db)
if v:
member_db.update(v)
return self._make_member_dict(member_db)
except exception.DBDuplicateEntry:
raise loadbalancer.MemberExists(
address=member_db['address'],
port=member_db['protocol_port'],
pool=member_db['pool_id'])
def delete_member(self, context, id):
with context.session.begin(subtransactions=True):
member_db = self._get_resource(context, Member, id)
context.session.delete(member_db)
def get_member(self, context, id, fields=None):
member = self._get_resource(context, Member, id)
return self._make_member_dict(member, fields)
def get_members(self, context, filters=None, fields=None):
return self._get_collection(context, Member,
self._make_member_dict,
filters=filters, fields=fields)
########################################################
# HealthMonitor DB access
def _make_health_monitor_dict(self, health_monitor, fields=None):
res = {'id': health_monitor['id'],
'tenant_id': health_monitor['tenant_id'],
'type': health_monitor['type'],
'delay': health_monitor['delay'],
'timeout': health_monitor['timeout'],
'max_retries': health_monitor['max_retries'],
'admin_state_up': health_monitor['admin_state_up']}
# no point to add the values below to
# the result if the 'type' is not HTTP/S
if res['type'] in ['HTTP', 'HTTPS']:
for attr in ['url_path', 'http_method', 'expected_codes']:
res[attr] = health_monitor[attr]
res['pools'] = [{'pool_id': p['pool_id'],
'status': p['status'],
'status_description': p['status_description']}
for p in health_monitor.pools]
return self._fields(res, fields)
def create_health_monitor(self, context, health_monitor):
v = health_monitor['health_monitor']
with context.session.begin(subtransactions=True):
# setting ACTIVE status since healthmon is shared DB object
monitor_db = HealthMonitor(id=uuidutils.generate_uuid(),
tenant_id=v['tenant_id'],
type=v['type'],
delay=v['delay'],
timeout=v['timeout'],
max_retries=v['max_retries'],
http_method=v['http_method'],
url_path=v['url_path'],
expected_codes=v['expected_codes'],
admin_state_up=v['admin_state_up'])
context.session.add(monitor_db)
return self._make_health_monitor_dict(monitor_db)
def update_health_monitor(self, context, id, health_monitor):
v = health_monitor['health_monitor']
with context.session.begin(subtransactions=True):
monitor_db = self._get_resource(context, HealthMonitor, id)
self.assert_modification_allowed(monitor_db)
if v:
monitor_db.update(v)
return self._make_health_monitor_dict(monitor_db)
def delete_health_monitor(self, context, id):
"""Delete health monitor object from DB
Raises an error if the monitor has associations with pools
"""
query = self._model_query(context, PoolMonitorAssociation)
has_associations = query.filter_by(monitor_id=id).first()
if has_associations:
raise loadbalancer.HealthMonitorInUse(monitor_id=id)
with context.session.begin(subtransactions=True):
monitor_db = self._get_resource(context, HealthMonitor, id)
context.session.delete(monitor_db)
def get_health_monitor(self, context, id, fields=None):
healthmonitor = self._get_resource(context, HealthMonitor, id)
return self._make_health_monitor_dict(healthmonitor, fields)
def get_health_monitors(self, context, filters=None, fields=None):
return self._get_collection(context, HealthMonitor,
self._make_health_monitor_dict,
filters=filters, fields=fields)
def check_subnet_in_use(self, context, subnet_id):
query = context.session.query(Pool).filter_by(subnet_id=subnet_id)
if query.count():
pool_id = query.one().id
raise n_exc.SubnetInUse(
reason=_LE("Subnet is used by loadbalancer pool %s") % pool_id)
def _prevent_lbaas_port_delete_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
port_id = kwargs['port_id']
port_check = kwargs['port_check']
lbaasplugin = manager.NeutronManager.get_service_plugins().get(
constants.LOADBALANCER)
if lbaasplugin and port_check:
lbaasplugin.prevent_lbaas_port_deletion(context, port_id)
def is_subnet_in_use_callback(resource, event, trigger, **kwargs):
service = manager.NeutronManager.get_service_plugins().get(
constants.LOADBALANCER)
if service:
context = kwargs.get('context')
subnet_id = kwargs.get('subnet_id')
service.check_subnet_in_use(context, subnet_id)
def subscribe():
registry.subscribe(is_subnet_in_use_callback,
resources.SUBNET, events.BEFORE_DELETE)
subscribe()

View File

@ -13,12 +13,15 @@
from logging import config as logging_config
from alembic import context
from neutron.db import model_base
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration.models import head # noqa
from neutron.db import model_base
from neutron_lbaas.db.models import head # noqa
MYSQL_ENGINE = None
LBAAS_VERSION_TABLE = 'alembic_version_lbaas'
@ -39,6 +42,15 @@ def set_mysql_engine():
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
# external.LBAAS_TABLES is the list of LBaaS v1 tables, now defunct
external_tables = set(external.TABLES) - set(external.LBAAS_TABLES)
if type_ == 'table' and name in external_tables:
return False
else:
return True
def run_migrations_offline():
set_mysql_engine()
@ -47,6 +59,7 @@ def run_migrations_offline():
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = LBAAS_VERSION_TABLE
context.configure(**kwargs)
@ -68,6 +81,7 @@ def run_migrations_online():
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=LBAAS_VERSION_TABLE
)

View File

@ -1 +1 @@
4b4dc6d5d843
e6417a8b114d

View File

@ -0,0 +1,40 @@
# Copyright 2016 <PUT YOUR NAME/COMPANY HERE>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop v1 tables
Revision ID: e6417a8b114d
Create Date: 2016-08-23 12:48:46.985939
"""
from alembic import op
revision = 'e6417a8b114d'
down_revision = '4b4dc6d5d843'
def upgrade():
op.drop_table('nsxv_edge_pool_mappings')
op.drop_table('nsxv_edge_vip_mappings')
op.drop_table('nsxv_edge_monitor_mappings')
op.drop_table('members')
op.drop_table('poolstatisticss')
op.drop_table('poolloadbalanceragentbindings')
op.drop_table('poolmonitorassociations')
op.drop_table('pools')
op.drop_table('sessionpersistences')
op.drop_table('vips')
op.drop_table('healthmonitors')

View File

@ -15,10 +15,7 @@
from neutron.db.migration.models import head
import neutron_lbaas.agent_scheduler # noqa
import neutron_lbaas.db.loadbalancer.loadbalancer_db # noqa
import neutron_lbaas.db.loadbalancer.models # noqa
import neutron_lbaas.services.loadbalancer.agent_scheduler # noqa
import neutron_lbaas.services.loadbalancer.drivers.vmware.models # noqa
def get_metadata():

View File

@ -29,11 +29,9 @@ from oslo_utils import excutils
from neutron_lbaas._i18n import _, _LI, _LE, _LW
from neutron_lbaas.agent import agent_device_driver
from neutron_lbaas.drivers.haproxy import jinja_cfg
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg
from neutron_lbaas.services.loadbalancer.drivers.haproxy \
import namespace_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
@ -45,7 +43,31 @@ DRIVER_NAME = 'haproxy_ns'
STATE_PATH_V2_APPEND = 'v2'
cfg.CONF.register_opts(namespace_driver.OPTS, 'haproxy')
STATE_PATH_DEFAULT = '$state_path/lbaas'
USER_GROUP_DEFAULT = 'nogroup'
OPTS = [
cfg.StrOpt(
'loadbalancer_state_path',
default=STATE_PATH_DEFAULT,
help=_('Location to store config and state files'),
deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path',
group='DEFAULT')],
),
cfg.StrOpt(
'user_group',
default=USER_GROUP_DEFAULT,
help=_('The user group'),
deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')],
),
cfg.IntOpt(
'send_gratuitous_arp',
default=3,
help=_('When delete and re-add the same vip, send this many '
'gratuitous ARPs to flush the ARP cache in the Router. '
'Set it below or equal to 0 to disable this feature.'),
)
]
cfg.CONF.register_opts(OPTS, 'haproxy')
def get_ns_name(namespace_id):

View File

@ -1,138 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.extensions import agent
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron import policy
from neutron import wsgi
from neutron_lib import constants
from neutron_lbaas._i18n import _
from neutron_lbaas.extensions import loadbalancer
LOADBALANCER_POOL = 'loadbalancer-pool'
LOADBALANCER_POOLS = LOADBALANCER_POOL + 's'
LOADBALANCER_AGENT = 'loadbalancer-agent'
class PoolSchedulerController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return {'pools': []}
policy.enforce(request.context,
"get_%s" % LOADBALANCER_POOLS,
{},
plugin=lbaas_plugin)
return lbaas_plugin.list_pools_on_lbaas_agent(
request.context, kwargs['agent_id'])
class LbaasAgentHostingPoolController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return
policy.enforce(request.context,
"get_%s" % LOADBALANCER_AGENT,
{},
plugin=lbaas_plugin)
return lbaas_plugin.get_lbaas_agent_hosting_pool(
request.context, kwargs['pool_id'])
class Lbaas_agentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting LBaaS agent scheduler.
"""
@classmethod
def get_name(cls):
return "Loadbalancer Agent Scheduler"
@classmethod
def get_alias(cls):
return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_description(cls):
return "Schedule pools among lbaas agents"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(PoolSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_POOLS, controller, parent))
parent = dict(member_name="pool",
collection_name="pools")
controller = resource.Resource(LbaasAgentHostingPoolController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_AGENT, controller, parent,
path_prefix=loadbalancer.LOADBALANCER_PREFIX))
return exts
def get_extended_resources(self, version):
return {}
class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend):
message = _("No eligible loadbalancer agent found "
"for pool %(pool_id)s.")
class NoActiveLbaasAgent(agent.AgentNotFound):
message = _("No active loadbalancer agent found "
"for pool %(pool_id)s.")
class LbaasAgentSchedulerPluginBase(object):
"""REST API to operate the lbaas agent scheduler.
All of method must be in an admin context.
"""
@abc.abstractmethod
def list_pools_on_lbaas_agent(self, context, id):
pass
@abc.abstractmethod
def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None):
pass

View File

@ -23,9 +23,9 @@ from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron import policy
from neutron import wsgi
from neutron_lib import exceptions as nexception
from neutron_lbaas._i18n import _
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
@ -115,7 +115,11 @@ class Lbaas_agentschedulerv2(extensions.ExtensionDescriptor):
return {}
class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend):
class NoEligibleBackend(nexception.NotFound):
message = _("No eligible backend for pool %(pool_id)s")
class NoEligibleLbaasAgent(NoEligibleBackend):
message = _("No eligible agent found "
"for loadbalancer %(loadbalancer_id)s.")

View File

@ -1,525 +0,0 @@
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import service_base
from neutron_lib.api import converters
from neutron_lib.api import validators
from neutron_lib import constants as n_constants
from neutron_lib import exceptions as nexception
from neutron_lbaas._i18n import _
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
LOADBALANCER_PREFIX = "/lb"
# Loadbalancer Exceptions
class DelayOrTimeoutInvalid(nexception.BadRequest):
message = _("Delay must be greater than or equal to timeout")
class NoEligibleBackend(nexception.NotFound):
message = _("No eligible backend for pool %(pool_id)s")
class VipNotFound(nexception.NotFound):
message = _("Vip %(vip_id)s could not be found")
class VipExists(nexception.NeutronException):
message = _("Another Vip already exists for pool %(pool_id)s")
class PoolNotFound(nexception.NotFound):
message = _("Pool %(pool_id)s could not be found")
class MemberNotFound(nexception.NotFound):
message = _("Member %(member_id)s could not be found")
class HealthMonitorNotFound(nexception.NotFound):
message = _("Health_monitor %(monitor_id)s could not be found")
class PoolMonitorAssociationNotFound(nexception.NotFound):
message = _("Monitor %(monitor_id)s is not associated "
"with Pool %(pool_id)s")
class PoolMonitorAssociationExists(nexception.Conflict):
message = _('health_monitor %(monitor_id)s is already associated '
'with pool %(pool_id)s')
class StateInvalid(nexception.NeutronException):
message = _("Invalid state %(state)s of Loadbalancer resource %(id)s")
class PoolInUse(nexception.InUse):
message = _("Pool %(pool_id)s is still in use")
class HealthMonitorInUse(nexception.InUse):
message = _("Health monitor %(monitor_id)s still has associations with "
"pools")
class PoolStatsNotFound(nexception.NotFound):
message = _("Statistics of Pool %(pool_id)s could not be found")
class ProtocolMismatch(nexception.BadRequest):
message = _("Protocol %(vip_proto)s does not match "
"pool protocol %(pool_proto)s")
class MemberExists(nexception.NeutronException):
message = _("Member with address %(address)s and port %(port)s "
"already present in pool %(pool)s")
validators.validators['type:connection_limit'] = (
loadbalancerv2._validate_connection_limit)
RESOURCE_ATTRIBUTE_MAP = {
'vips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'default': n_constants.ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'port_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [1, 65535]},
'convert_to': converters.convert_to_int,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'session_persistence': {'allow_post': True, 'allow_put': True,
'convert_to':
converters.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'type': {'type:values': ['APP_COOKIE',
'HTTP_COOKIE',
'SOURCE_IP'],
'required': True},
'cookie_name': {'type:string': None,
'required': False}}},
'is_visible': True},
'connection_limit': {'allow_post': True, 'allow_put': True,
'validate': {'type:connection_limit':
lb_const.MIN_CONNECT_VALUE},
'default': lb_const.MIN_CONNECT_VALUE,
'convert_to': converters.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': converters.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'pools': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'vip_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'provider': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None}, 'is_visible': True,
'default': n_constants.ATTR_NOT_SPECIFIED},
'lb_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:values': ['ROUND_ROBIN',
'LEAST_CONNECTIONS',
'SOURCE_IP']},
'is_visible': True},
'members': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'health_monitors': {'allow_post': True, 'allow_put': True,
'default': None,
'validate': {'type:uuid_list': None},
'convert_to': converters.convert_to_list,
'is_visible': True},
'health_monitors_status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': converters.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'members': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [1, 65535]},
'convert_to': converters.convert_to_int,
'is_visible': True},
'weight': {'allow_post': True, 'allow_put': True,
'default': 1,
'validate': {'type:range': [0, 256]},
'convert_to': converters.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': converters.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'health_monitors': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'type': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'delay': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'is_visible': True},
'timeout': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'is_visible': True},
'max_retries': {'allow_post': True, 'allow_put': True,
'validate': {'type:range': [1, 10]},
'convert_to': converters.convert_to_int,
'is_visible': True},
'http_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': 'GET',
'is_visible': True},
'url_path': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '/',
'is_visible': True},
'expected_codes': {'allow_post': True, 'allow_put': True,
'validate': {
'type:regex':
r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'},
'default': '200',
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': converters.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'pools': {'allow_post': False, 'allow_put': False,
'is_visible': True}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'health_monitors': {
'parent': {'collection_name': 'pools',
'member_name': 'pool'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
}
}
}
lbaas_quota_opts = [
cfg.IntOpt('quota_vip',
default=10,
help=_('Number of vips allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_pool',
default=10,
help=_('Number of pools allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_member',
default=-1,
help=_('Number of pool members allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_health_monitor',
default=-1,
help=_('Number of health monitors allowed per tenant. '
'A negative value means unlimited.'))
]
cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS')
class Loadbalancer(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "LoadBalancing service"
@classmethod
def get_alias(cls):
return "lbaas"
@classmethod
def get_description(cls):
return "Extension for LoadBalancing service"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/neutron/LBaaS/API_1.0"
@classmethod
def get_updated(cls):
return "2012-10-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['health_monitors_status'] = 'health_monitor_status'
attr.PLURALS.update(plural_mappings)
action_map = {'pool': {'stats': 'GET'}}
resources = resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.LOADBALANCER,
action_map=action_map,
register_quota=True)
plugin = manager.NeutronManager.get_service_plugins()[
constants.LOADBALANCER]
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=LOADBALANCER_PREFIX,
attr_map=params)
resources.append(resource)
return resources
@classmethod
def get_plugin_interface(cls):
return LoadBalancerPluginBase
def update_attributes_map(self, attributes, extension_attrs_map=None):
super(Loadbalancer, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class LoadBalancerPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.LOADBALANCER
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return 'LoadBalancer service plugin'
@abc.abstractmethod
def get_vips(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vip(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_vip(self, context, vip):
pass
@abc.abstractmethod
def update_vip(self, context, id, vip):
pass
@abc.abstractmethod
def delete_vip(self, context, id):
pass
@abc.abstractmethod
def get_pools(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_pool(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_pool(self, context, pool):
pass
@abc.abstractmethod
def update_pool(self, context, id, pool):
pass
@abc.abstractmethod
def delete_pool(self, context, id):
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context, health_monitor, pool_id):
pass
@abc.abstractmethod
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, id, pool_id):
pass
@abc.abstractmethod
def get_members(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_member(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_member(self, context, member):
pass
@abc.abstractmethod
def update_member(self, context, id, member):
pass
@abc.abstractmethod
def delete_member(self, context, id):
pass
@abc.abstractmethod
def get_health_monitors(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_health_monitor(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_health_monitor(self, context, health_monitor):
pass
@abc.abstractmethod
def update_health_monitor(self, context, id, health_monitor):
pass
@abc.abstractmethod
def delete_health_monitor(self, context, id):
pass

View File

@ -24,11 +24,6 @@ import neutron_lbaas.drivers.common.agent_driver_base
import neutron_lbaas.drivers.octavia.driver
import neutron_lbaas.drivers.radware.base_v2_driver
import neutron_lbaas.extensions.loadbalancerv2
import neutron_lbaas.services.loadbalancer.agent.agent_manager
import neutron_lbaas.services.loadbalancer.drivers.haproxy.jinja_cfg
import neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver
import neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver
import neutron_lbaas.services.loadbalancer.drivers.radware.driver
def list_agent_opts():
@ -36,13 +31,9 @@ def list_agent_opts():
('DEFAULT',
itertools.chain(
neutron_lbaas.agent.agent.OPTS,
neutron_lbaas.services.loadbalancer.agent.agent_manager.OPTS,
neutron.agent.linux.interface.OPTS,
neutron.agent.common.config.INTERFACE_DRIVER_OPTS)
),
('haproxy',
neutron_lbaas.services.loadbalancer.drivers.haproxy.
namespace_driver.OPTS)
)
]
@ -67,25 +58,15 @@ def list_opts():
def list_service_opts():
return [
('radware',
neutron_lbaas.services.loadbalancer.drivers.radware.driver.
driver_opts),
('radwarev2',
neutron_lbaas.drivers.radware.base_v2_driver.driver_opts),
('radwarev2_debug',
neutron_lbaas.drivers.radware.base_v2_driver.driver_debug_opts),
('netscaler_driver',
neutron_lbaas.services.loadbalancer.drivers.netscaler.
netscaler_driver.NETSCALER_CC_OPTS),
('haproxy',
itertools.chain(
neutron.agent.common.config.INTERFACE_DRIVER_OPTS,
neutron_lbaas.agent.agent.OPTS,
neutron_lbaas.services.loadbalancer.drivers.haproxy.
namespace_driver.OPTS,
neutron_lbaas.services.loadbalancer.drivers.haproxy.jinja_cfg.
jinja_opts)
),
)),
('octavia',
neutron_lbaas.drivers.octavia.driver.OPTS)
]

View File

@ -1,65 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.common import config as common_config
from neutron.common import rpc as n_rpc
from oslo_config import cfg
from oslo_service import service
from neutron_lbaas._i18n import _
from neutron_lbaas.services.loadbalancer.agent import agent_manager as manager
from neutron_lbaas.services.loadbalancer import constants as l_const
OPTS = [
cfg.IntOpt(
'periodic_interval',
default=10,
help=_('Seconds between periodic task runs')
)
]
class LbaasAgentService(n_rpc.Service):
def start(self):
super(LbaasAgentService, self).start()
self.tg.add_timer(
cfg.CONF.periodic_interval,
self.manager.run_periodic_tasks,
None,
None
)
def main():
cfg.CONF.register_opts(OPTS)
cfg.CONF.register_opts(manager.OPTS)
# import interface options just in case the driver uses namespaces
cfg.CONF.register_opts(interface.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
mgr = manager.LbaasAgentManager(cfg.CONF)
svc = LbaasAgentService(
host=cfg.CONF.host,
topic=l_const.LOADBALANCER_AGENT,
manager=mgr
)
service.launch(cfg.CONF, svc).wait()

View File

@ -1,68 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import rpc as n_rpc
import oslo_messaging
class LbaasAgentApi(object):
"""Agent side of the Agent to Plugin RPC API."""
# history
# 1.0 Initial version
# 2.0 Generic API for agent based drivers
# - get_logical_device() handling changed on plugin side;
# - pool_deployed() and update_status() methods added;
def __init__(self, topic, context, host):
self.context = context
self.host = host
target = oslo_messaging.Target(topic=topic, version='2.0')
self.client = n_rpc.get_client(target)
def get_ready_devices(self):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_ready_devices', host=self.host)
def pool_destroyed(self, pool_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'pool_destroyed', pool_id=pool_id)
def pool_deployed(self, pool_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'pool_deployed', pool_id=pool_id)
def get_logical_device(self, pool_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_logical_device', pool_id=pool_id)
def update_status(self, obj_type, obj_id, status):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'update_status', obj_type=obj_type,
obj_id=obj_id, status=status)
def plug_vip_port(self, port_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'plug_vip_port', port_id=port_id,
host=self.host)
def unplug_vip_port(self, port_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'unplug_vip_port', port_id=port_id,
host=self.host)
def update_pool_stats(self, pool_id, stats):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'update_pool_stats', pool_id=pool_id,
stats=stats, host=self.host)

View File

@ -1,96 +0,0 @@
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AgentDeviceDriver(object):
"""Abstract device driver that defines the API required by LBaaS agent."""
@abc.abstractmethod
def get_name(self):
"""Returns unique name across all LBaaS device drivers."""
pass
@abc.abstractmethod
def deploy_instance(self, logical_config):
"""Fully deploys a loadbalancer instance from a given config."""
pass
@abc.abstractmethod
def undeploy_instance(self, pool_id, **kwargs):
"""Fully undeploys the loadbalancer instance."""
pass
@abc.abstractmethod
def get_stats(self, pool_id):
pass
def remove_orphans(self, known_pool_ids):
# Not all drivers will support this
raise NotImplementedError()
@abc.abstractmethod
def create_vip(self, vip):
pass
@abc.abstractmethod
def update_vip(self, old_vip, vip):
pass
@abc.abstractmethod
def delete_vip(self, vip):
pass
@abc.abstractmethod
def create_pool(self, pool):
pass
@abc.abstractmethod
def update_pool(self, old_pool, pool):
pass
@abc.abstractmethod
def delete_pool(self, pool):
pass
@abc.abstractmethod
def create_member(self, member):
pass
@abc.abstractmethod
def update_member(self, old_member, member):
pass
@abc.abstractmethod
def delete_member(self, member):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, health_monitor, pool_id):
pass
@abc.abstractmethod
def update_pool_health_monitor(self,
old_health_monitor,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, health_monitor, pool_id):
pass

View File

@ -1,348 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent import rpc as agent_rpc
from neutron import context as ncontext
from neutron.plugins.common import constants as np_const
from neutron.services import provider_configuration as provconfig
from neutron_lib import constants as n_const
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import importutils
from neutron_lbaas._i18n import _, _LE, _LI
from neutron_lbaas.services.loadbalancer.agent import agent_api
from neutron_lbaas.services.loadbalancer import constants as l_const
LOG = logging.getLogger(__name__)
DEVICE_DRIVERS = 'device_drivers'
OPTS = [
cfg.MultiStrOpt(
'device_driver',
default=['neutron_lbaas.services.loadbalancer.drivers'
'.haproxy.namespace_driver.HaproxyNSDriver'],
help=_('Drivers used to manage loadbalancing devices'),
),
]
class DeviceNotFoundOnAgent(n_exc.NotFound):
message = _('Unknown device with pool_id %(pool_id)s')
class LbaasAgentManager(periodic_task.PeriodicTasks):
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
target = oslo_messaging.Target(version='2.0')
def __init__(self, conf):
super(LbaasAgentManager, self).__init__(conf)
self.conf = conf
self.context = ncontext.get_admin_context_without_session()
self.plugin_rpc = agent_api.LbaasAgentApi(
l_const.LOADBALANCER_PLUGIN,
self.context,
self.conf.host
)
self._load_drivers()
self.agent_state = {
'binary': 'neutron-lbaas-agent',
'host': conf.host,
'topic': l_const.LOADBALANCER_AGENT,
'configurations': {'device_drivers': self.device_drivers.keys()},
'agent_type': n_const.AGENT_TYPE_LOADBALANCER,
'start_flag': True}
self.admin_state_up = True
self._setup_state_rpc()
self.needs_resync = False
# pool_id->device_driver_name mapping used to store known instances
self.instance_mapping = {}
def _load_drivers(self):
self.device_drivers = {}
for driver in self.conf.device_driver:
driver = provconfig.get_provider_driver_class(driver,
DEVICE_DRIVERS)
try:
driver_inst = importutils.import_object(
driver,
self.conf,
self.plugin_rpc
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % driver)
driver_name = driver_inst.get_name()
if driver_name not in self.device_drivers:
self.device_drivers[driver_name] = driver_inst
else:
msg = _('Multiple device drivers with the same name found: %s')
raise SystemExit(msg % driver_name)
def _setup_state_rpc(self):
self.state_rpc = agent_rpc.PluginReportStateAPI(
l_const.LOADBALANCER_PLUGIN)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
instance_count = len(self.instance_mapping)
self.agent_state['configurations']['instances'] = instance_count
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id, driver_name in self.instance_mapping.items():
driver = self.device_drivers[driver_name]
try:
stats = driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_LE('Error updating statistics on pool %s'),
pool_id)
self.needs_resync = True
def sync_state(self):
known_instances = set(self.instance_mapping.keys())
try:
ready_instances = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_instances - ready_instances:
self._destroy_pool(deleted_id)
for pool_id in ready_instances:
self._reload_pool(pool_id)
except Exception:
LOG.exception(_LE('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
def _get_driver(self, pool_id):
if pool_id not in self.instance_mapping:
raise DeviceNotFoundOnAgent(pool_id=pool_id)
driver_name = self.instance_mapping[pool_id]
return self.device_drivers[driver_name]
def _reload_pool(self, pool_id):
try:
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, np_const.ERROR)
return
self.device_drivers[driver_name].deploy_instance(logical_config)
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
LOG.exception(_LE('Unable to deploy instance for pool: %s'),
pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
driver = self._get_driver(pool_id)
try:
driver.undeploy_instance(pool_id, delete_namespace=True)
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_LE('Unable to destroy device for pool: %s'),
pool_id)
self.needs_resync = True
def remove_orphans(self):
for driver_name in self.device_drivers:
pool_ids = [pool_id for pool_id in self.instance_mapping
if self.instance_mapping[pool_id] == driver_name]
try:
self.device_drivers[driver_name].remove_orphans(pool_ids)
except NotImplementedError:
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device '
'driver %(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, np_const.ERROR)
def _update_status(self, obj_type, obj_id, admin_state_up):
if admin_state_up:
self.plugin_rpc.update_status(obj_type, obj_id, np_const.ACTIVE)
else:
self.plugin_rpc.update_status(obj_type, obj_id, l_const.DISABLED)
def create_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.create_vip(vip)
except Exception:
self._handle_failed_driver_call('create', 'vip', vip['id'],
driver.get_name())
else:
self._update_status('vip', vip['id'], vip['admin_state_up'])
def update_vip(self, context, old_vip, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.update_vip(old_vip, vip)
except Exception:
self._handle_failed_driver_call('update', 'vip', vip['id'],
driver.get_name())
else:
self._update_status('vip', vip['id'], vip['admin_state_up'])
def delete_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
driver.delete_vip(vip)
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], np_const.ERROR)
return
driver = self.device_drivers[driver_name]
try:
driver.create_pool(pool)
except Exception:
self._handle_failed_driver_call('create', 'pool', pool['id'],
driver.get_name())
else:
self.instance_mapping[pool['id']] = driver_name
self._update_status('pool', pool['id'], pool['admin_state_up'])
def update_pool(self, context, old_pool, pool):
driver = self._get_driver(pool['id'])
try:
driver.update_pool(old_pool, pool)
except Exception:
self._handle_failed_driver_call('update', 'pool', pool['id'],
driver.get_name())
else:
self._update_status('pool', pool['id'], pool['admin_state_up'])
def delete_pool(self, context, pool):
driver = self._get_driver(pool['id'])
driver.delete_pool(pool)
del self.instance_mapping[pool['id']]
def create_member(self, context, member):
driver = self._get_driver(member['pool_id'])
try:
driver.create_member(member)
except Exception:
self._handle_failed_driver_call('create', 'member', member['id'],
driver.get_name())
else:
self._update_status('member', member['id'],
member['admin_state_up'])
def update_member(self, context, old_member, member):
driver = self._get_driver(member['pool_id'])
try:
driver.update_member(old_member, member)
except Exception:
self._handle_failed_driver_call('update', 'member', member['id'],
driver.get_name())
else:
self._update_status('member', member['id'],
member['admin_state_up'])
def delete_member(self, context, member):
driver = self._get_driver(member['pool_id'])
driver.delete_member(member)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.create_pool_health_monitor(health_monitor, pool_id)
except Exception:
self._handle_failed_driver_call(
'create', 'health_monitor', assoc_id, driver.get_name())
else:
self._update_status('health_monitor', assoc_id,
health_monitor['admin_state_up'])
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.update_pool_health_monitor(old_health_monitor,
health_monitor,
pool_id)
except Exception:
self._handle_failed_driver_call(
'update', 'health_monitor', assoc_id, driver.get_name())
else:
self._update_status('health_monitor', assoc_id,
health_monitor['admin_state_up'])
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
driver.delete_pool_health_monitor(health_monitor, pool_id)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
# Copy keys because the dict is modified in the loop body
for pool_id in list(self.instance_mapping.keys()):
LOG.info(_LI("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
LOG.info(_LI("Agent_updated by server side %s!"), payload)

View File

@ -1,164 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sys
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import model_base
from neutron_lib import constants
from oslo_log import log as logging
import six
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from abc import abstractmethod
from neutron_lbaas._i18n import _LW
from neutron_lbaas.extensions import lbaas_agentscheduler
LOG = logging.getLogger(__name__)
class PoolLoadbalancerAgentBinding(model_base.BASEV2):
"""Represents binding between neutron loadbalancer pools and agents."""
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id", ondelete='CASCADE'),
primary_key=True)
agent = orm.relation(agents_db.Agent)
agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id",
ondelete='CASCADE'),
nullable=False)
class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin,
lbaas_agentscheduler
.LbaasAgentSchedulerPluginBase):
def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None):
query = context.session.query(PoolLoadbalancerAgentBinding)
query = query.options(joinedload('agent'))
binding = query.get(pool_id)
if (binding and self.is_eligible_agent(
active, binding.agent)):
return {'agent': self._make_agent_dict(binding.agent)}
def get_lbaas_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER)
if active is not None:
query = query.filter_by(admin_state_up=active)
if filters:
for key, value in six.iteritems(filters):
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [agent
for agent in query
if self.is_eligible_agent(active, agent)]
def list_pools_on_lbaas_agent(self, context, id):
query = context.session.query(PoolLoadbalancerAgentBinding.pool_id)
query = query.filter_by(agent_id=id)
pool_ids = [item[0] for item in query]
if pool_ids:
return {'pools': self.get_pools(context, filters={'id': pool_ids})}
else:
return {'pools': []}
def num_of_pools_on_lbaas_agent(self, context, id):
query = context.session.query(PoolLoadbalancerAgentBinding.pool_id)
query = query.filter_by(agent_id=id)
return query.count()
def get_lbaas_agent_candidates(self, device_driver, active_agents):
candidates = []
for agent in active_agents:
agent_conf = self.get_configuration_dict(agent)
if device_driver in agent_conf['device_drivers']:
candidates.append(agent)
return candidates
class SchedulerBase(object):
def schedule(self, plugin, context, pool, device_driver):
"""Schedule the pool to an active loadbalancer agent if there
is no enabled agent hosting it.
"""
with context.session.begin(subtransactions=True):
lbaas_agent = plugin.get_lbaas_agent_hosting_pool(
context, pool['id'])
if lbaas_agent:
LOG.debug('Pool %(pool_id)s has already been hosted'
' by lbaas agent %(agent_id)s',
{'pool_id': pool['id'],
'agent_id': lbaas_agent['id']})
return
active_agents = plugin.get_lbaas_agents(context, active=True)
if not active_agents:
LOG.warning(_LW('No active lbaas agents for pool %s'),
pool['id'])
return
candidates = plugin.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
LOG.warning(_LW('No lbaas agent supporting device driver %s'),
device_driver)
return
chosen_agent = self._schedule(candidates, plugin, context)
binding = PoolLoadbalancerAgentBinding()
binding.agent = chosen_agent
binding.pool_id = pool['id']
context.session.add(binding)
LOG.debug('Pool %(pool_id)s is scheduled to lbaas agent '
'%(agent_id)s',
{'pool_id': pool['id'],
'agent_id': chosen_agent['id']})
return chosen_agent
@abstractmethod
def _schedule(self, candidates, plugin, context):
pass
class ChanceScheduler(SchedulerBase):
def _schedule(self, candidates, plugin, context):
"""Allocate a loadbalancer agent for a vip in a random way."""
return random.choice(candidates)
class LeastPoolAgentScheduler(SchedulerBase):
def _schedule(self, candidates, plugin, context):
"""Pick an agent with least number of pools from candidates"""
current_min_pool_num = sys.maxsize
# SchedulerBase.schedule() already checks for empty candidates
for tmp_agent in candidates:
tmp_pool_num = plugin.num_of_pools_on_lbaas_agent(
context, tmp_agent['id'])
if current_min_pool_num > tmp_pool_num:
current_min_pool_num = tmp_pool_num
chosen_agent = tmp_agent
return chosen_agent

View File

@ -155,10 +155,6 @@ AGENT_TYPE_LOADBALANCERV2 = 'Loadbalancerv2 agent'
LOADBALANCER_PLUGINV2 = 'n-lbaasv2-plugin'
LOADBALANCER_AGENTV2 = 'n-lbaasv2_agent'
# LBasS V1 Agent Constants
LOADBALANCER_PLUGIN = 'n-lbaas-plugin'
LOADBALANCER_AGENT = 'n-lbaas_agent'
LOADBALANCER = "LOADBALANCER"
LOADBALANCERV2 = "LOADBALANCERV2"

View File

@ -1,48 +0,0 @@
A10 Networks LBaaS Driver
Installation info:
To use this driver, you must:
- Install the a10-neutron-lbaas module. (E.g.: 'pip install a10-neutron-lbaas')
- Create a driver config file, a sample of which is given below.
- Enable it in neutron.conf
- Restart neutron-server
Third-party CI info:
Contact info for any problems is: a10-openstack-ci at a10networks dot com
Or contact Doug Wiegley directly (IRC: dougwig)
Configuration file:
Create a configuration file with a list of A10 appliances, similar to the
file below, located at:
/etc/neutron/services/loadbalancer/a10networks/config.py
Or you can override that directory by setting the environment
variable A10_CONFIG_DIR.
Example config file:
devices = {
"ax1": {
"name": "ax1",
"host": "10.10.100.20",
"port": 443,
"protocol": "https",
"username": "admin",
"password": "a10",
"status": True,
"autosnat": False,
"api_version": "2.1",
"v_method": "LSI",
"max_instance": 5000,
"use_float": False,
"method": "hash"
},
"ax4": {
"host": "10.10.100.23",
"username": "admin",
"password": "a10",
},
}

View File

@ -1,176 +0,0 @@
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import a10_neutron_lbaas
from neutron.db import l3_db
from neutron.plugins.common import constants
from oslo_log import log as logging
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
VERSION = "1.0.0"
LOG = logging.getLogger(__name__)
# Most driver calls below are straight passthroughs to the A10 package
# 'a10_neutron_lbaas'. Any function that has not been fully abstracted
# into the openstack driver/plugin interface is NOT passed through, to
# make it obvious which hidden interfaces/db calls that we rely on.
class ThunderDriver(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
LOG.debug("A10Driver: init version=%s", VERSION)
self.plugin = plugin
# Map the string types to neutron classes/functions, in order to keep
# from reaching into the bowels of Neutron from anywhere but this file.
self.neutron_map = {
'member': {
'model': lb_db.Member,
'delete_func': self.plugin._delete_db_member,
},
'pool': {
'model': lb_db.Pool,
'delete_func': self.plugin._delete_db_pool,
},
'vip': {
'model': lb_db.Vip,
'delete_func': self.plugin._delete_db_vip,
},
}
LOG.debug("A10Driver: initializing, version=%s, lbaas_manager=%s",
VERSION, a10_neutron_lbaas.VERSION)
self.a10 = a10_neutron_lbaas.A10OpenstackLBV1(self)
# The following private helper methods are used by a10_neutron_lbaas,
# and reflect the neutron interfaces required by that package.
def _hm_binding_count(self, context, hm_id):
return context.session.query(lb_db.PoolMonitorAssociation).filter_by(
monitor_id=hm_id).join(lb_db.Pool).count()
def _member_count(self, context, member):
return context.session.query(lb_db.Member).filter_by(
tenant_id=member['tenant_id'],
address=member['address']).count()
def _member_get(self, context, member_id):
return self.plugin.get_member(context, member_id)
def _member_get_ip(self, context, member, use_float=False):
ip_address = member['address']
if use_float:
fip_qry = context.session.query(l3_db.FloatingIP)
if fip_qry.filter_by(fixed_ip_address=ip_address).count() > 0:
float_address = fip_qry.filter_by(
fixed_ip_address=ip_address).first()
ip_address = str(float_address.floating_ip_address)
return ip_address
def _pool_get_hm(self, context, hm_id):
return self.plugin.get_health_monitor(context, hm_id)
def _pool_get_tenant_id(self, context, pool_id):
pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id)
z = pool_qry.first()
if z:
return z.tenant_id
else:
return ''
def _pool_get_vip_id(self, context, pool_id):
pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id)
z = pool_qry.first()
if z:
return z.vip_id
else:
return ''
def _pool_total(self, context, tenant_id):
return context.session.query(lb_db.Pool).filter_by(
tenant_id=tenant_id).count()
def _vip_get(self, context, vip_id):
return self.plugin.get_vip(context, vip_id)
def _active(self, context, model_type, model_id):
self.plugin.update_status(context,
self.neutron_map[model_type]['model'],
model_id,
constants.ACTIVE)
def _failed(self, context, model_type, model_id):
self.plugin.update_status(context,
self.neutron_map[model_type]['model'],
model_id,
constants.ERROR)
def _db_delete(self, context, model_type, model_id):
self.neutron_map[model_type]['delete_func'](context, model_id)
def _hm_active(self, context, hm_id, pool_id):
self.plugin.update_pool_health_monitor(context, hm_id, pool_id,
constants.ACTIVE)
def _hm_failed(self, context, hm_id, pool_id):
self.plugin.update_pool_health_monitor(context, hm_id, pool_id,
constants.ERROR)
def _hm_db_delete(self, context, hm_id, pool_id):
self.plugin._delete_db_pool_health_monitor(context, hm_id, pool_id)
# Pass-through driver
def create_vip(self, context, vip):
self.a10.vip.create(context, vip)
def update_vip(self, context, old_vip, vip):
self.a10.vip.update(context, old_vip, vip)
def delete_vip(self, context, vip):
self.a10.vip.delete(context, vip)
def create_pool(self, context, pool):
self.a10.pool.create(context, pool)
def update_pool(self, context, old_pool, pool):
self.a10.pool.update(context, old_pool, pool)
def delete_pool(self, context, pool):
self.a10.pool.delete(context, pool)
def stats(self, context, pool_id):
return self.a10.pool.stats(context, pool_id)
def create_member(self, context, member):
self.a10.member.create(context, member)
def update_member(self, context, old_member, member):
self.a10.member.update(context, old_member, member)
def delete_member(self, context, member):
self.a10.member.delete(context, member)
def update_pool_health_monitor(self, context, old_hm, hm, pool_id):
self.a10.hm.update(context, old_hm, hm, pool_id)
def create_pool_health_monitor(self, context, hm, pool_id):
self.a10.hm.create(context, hm, pool_id)
def delete_pool_health_monitor(self, context, hm, pool_id):
self.a10.hm.delete(context, hm, pool_id)

View File

@ -1,134 +0,0 @@
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
#
# DEPRECATION WARNING. THIS ABSTRACT DRIVER IS FOR THE LBAAS V1 OBJECT
# MODEL AND SHOULD NO LONGER BE USED TO CREATE DRIVERS.
#
# PLEASE REFER TO driver_base.py and driver_mixins.py for the newest
# lbaas driver base classes.
#
@six.add_metaclass(abc.ABCMeta)
class LoadBalancerAbstractDriver(object):
"""Abstract lbaas driver that expose ~same API as lbaas plugin.
The configuration elements (Vip,Member,etc) are the dicts that
are returned to the tenant.
Get operations are not part of the API - it will be handled
by the lbaas plugin.
"""
@abc.abstractmethod
def create_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and set the Vip status to ACTIVE/ERROR according
to the backend call result
self.plugin.update_status(context, Vip, vip["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_vip(self, context, old_vip, vip):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Vip, id, constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and try to delete the Vip.
if the deletion was successful, delete the record from the database.
if the deletion has failed, set the Vip status to ERROR.
"""
pass
@abc.abstractmethod
def create_pool(self, context, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Pool, pool["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_pool(self, context, old_pool, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context,
Pool,
pool["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_pool(self, context, pool):
"""Driver can call the code below in order to delete the pool.
self.plugin._delete_db_pool(context, pool["id"])
or set the status to ERROR if deletion failed
"""
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_member(self, context, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member, member["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_member(self, context, old_member, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member,
member["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_member(self, context, member):
pass
@abc.abstractmethod
def update_pool_health_monitor(self, context,
old_health_monitor,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context,
health_monitor,
pool_id):
"""Driver may call the code below in order to update the status.
self.plugin.update_pool_health_monitor(context,
health_monitor["id"],
pool_id,
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
pass

View File

@ -1,444 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from neutron.common import rpc as n_rpc
from neutron.db import agents_db
from neutron.extensions import portbindings
from neutron.plugins.common import constants as np_const
from neutron.services import provider_configuration as provconf
from neutron_lib import constants as q_const
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from neutron_lbaas._i18n import _, _LW
from neutron_lbaas.db.loadbalancer import loadbalancer_db
from neutron_lbaas.extensions import lbaas_agentscheduler
from neutron_lbaas.services.loadbalancer import constants as l_const
from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
LOG = logging.getLogger(__name__)
POOL_SCHEDULERS = 'pool_schedulers'
AGENT_SCHEDULER_OPTS = [
cfg.StrOpt('loadbalancer_pool_scheduler_driver',
default='neutron_lbaas.services.loadbalancer.agent_scheduler'
'.ChanceScheduler',
help=_('Driver to use for scheduling '
'pool to a default loadbalancer agent')),
]
cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS)
class DriverNotSpecified(n_exc.NeutronException):
message = _("Device driver for agent should be specified "
"in plugin driver.")
class LoadBalancerCallbacks(object):
# history
# 1.0 Initial version
# 2.0 Generic API for agent based drivers
# - get_logical_device() handling changed;
# - pool_deployed() and update_status() methods added;
target = oslo_messaging.Target(version='2.0')
def __init__(self, plugin):
super(LoadBalancerCallbacks, self).__init__()
self.plugin = plugin
def get_ready_devices(self, context, host=None):
with context.session.begin(subtransactions=True):
agents = self.plugin.get_lbaas_agents(context,
filters={'host': [host]})
if not agents:
return []
elif len(agents) > 1:
LOG.warning(_LW('Multiple lbaas agents found on host %s'),
host)
pools = self.plugin.list_pools_on_lbaas_agent(context,
agents[0].id)
pool_ids = [pool['id'] for pool in pools['pools']]
qry = context.session.query(loadbalancer_db.Pool.id)
qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids))
qry = qry.filter(
loadbalancer_db.Pool.status.in_(
np_const.ACTIVE_PENDING_STATUSES))
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
return [id for id, in qry]
def get_logical_device(self, context, pool_id=None):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
retval = {}
retval['pool'] = self.plugin._make_pool_dict(pool)
if pool.vip:
retval['vip'] = self.plugin._make_vip_dict(pool.vip)
retval['vip']['port'] = (
self.plugin._core_plugin._make_port_dict(pool.vip.port)
)
for fixed_ip in retval['vip']['port']['fixed_ips']:
fixed_ip['subnet'] = (
self.plugin._core_plugin.get_subnet(
context,
fixed_ip['subnet_id']
)
)
retval['members'] = [
self.plugin._make_member_dict(m)
for m in pool.members if (
m.status in np_const.ACTIVE_PENDING_STATUSES or
m.status == np_const.INACTIVE)
]
retval['healthmonitors'] = [
self.plugin._make_health_monitor_dict(hm.healthmonitor)
for hm in pool.monitors
if hm.status in np_const.ACTIVE_PENDING_STATUSES
]
retval['driver'] = (
self.plugin.drivers[pool.provider.provider_name].device_driver)
return retval
def pool_deployed(self, context, pool_id):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
# set all resources to active
if pool.status in np_const.ACTIVE_PENDING_STATUSES:
pool.status = np_const.ACTIVE
if (pool.vip and pool.vip.status in
np_const.ACTIVE_PENDING_STATUSES):
pool.vip.status = np_const.ACTIVE
for m in pool.members:
if m.status in np_const.ACTIVE_PENDING_STATUSES:
m.status = np_const.ACTIVE
for hm in pool.monitors:
if hm.status in np_const.ACTIVE_PENDING_STATUSES:
hm.status = np_const.ACTIVE
def update_status(self, context, obj_type, obj_id, status):
model_mapping = {
'pool': loadbalancer_db.Pool,
'vip': loadbalancer_db.Vip,
'member': loadbalancer_db.Member,
'health_monitor': loadbalancer_db.PoolMonitorAssociation
}
if obj_type not in model_mapping:
raise n_exc.Invalid(_('Unknown object type: %s') % obj_type)
try:
if obj_type == 'health_monitor':
self.plugin.update_pool_health_monitor(
context, obj_id['monitor_id'], obj_id['pool_id'], status)
else:
self.plugin.update_status(
context, model_mapping[obj_type], obj_id, status)
except n_exc.NotFound:
# update_status may come from agent on an object which was
# already deleted from db with other request
LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s '
'not found in the DB, it was probably deleted '
'concurrently'),
{'obj_type': obj_type, 'obj_id': obj_id})
def pool_destroyed(self, context, pool_id=None):
"""Agent confirmation hook that a pool has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to plug.', port_id)
return
port['admin_state_up'] = True
port['device_owner'] = 'neutron:' + np_const.LOADBALANCER
port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
port[portbindings.HOST_ID] = host
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except n_exc.PortNotFound:
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
self.plugin.update_pool_stats(context, pool_id, data=stats)
class LoadBalancerAgentApi(object):
"""Plugin side of plugin to agent RPC API."""
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='2.0')
self.client = n_rpc.get_client(target)
def create_vip(self, context, vip, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_vip', vip=vip)
def update_vip(self, context, old_vip, vip, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_vip', old_vip=old_vip, vip=vip)
def delete_vip(self, context, vip, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_vip', vip=vip)
def create_pool(self, context, pool, host, driver_name):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_pool', pool=pool, driver_name=driver_name)
def update_pool(self, context, old_pool, pool, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_pool', old_pool=old_pool, pool=pool)
def delete_pool(self, context, pool, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_pool', pool=pool)
def create_member(self, context, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_member', member=member)
def update_member(self, context, old_member, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_member', old_member=old_member,
member=member)
def delete_member(self, context, member, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_member', member=member)
def create_pool_health_monitor(self, context, health_monitor, pool_id,
host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'create_pool_health_monitor',
health_monitor=health_monitor, pool_id=pool_id)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'update_pool_health_monitor',
old_health_monitor=old_health_monitor,
health_monitor=health_monitor, pool_id=pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id,
host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'delete_pool_health_monitor',
health_monitor=health_monitor, pool_id=pool_id)
def agent_updated(self, context, admin_state_up, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, 'agent_updated',
payload={'admin_state_up': admin_state_up})
class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver):
# name of device driver that should be used by the agent;
# vendor specific plugin drivers must override it;
device_driver = None
def __init__(self, plugin):
if not self.device_driver:
raise DriverNotSpecified()
self.agent_rpc = LoadBalancerAgentApi(l_const.LOADBALANCER_AGENT)
self.plugin = plugin
self._set_callbacks_on_plugin()
self.plugin.agent_notifiers.update(
{q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
pool_sched_driver = provconf.get_provider_driver_class(
cfg.CONF.loadbalancer_pool_scheduler_driver, POOL_SCHEDULERS)
self.pool_scheduler = importutils.import_object(pool_sched_driver)
def _set_callbacks_on_plugin(self):
# other agent based plugin driver might already set callbacks on plugin
if hasattr(self.plugin, 'agent_callbacks'):
return
self.plugin.agent_endpoints = [
LoadBalancerCallbacks(self.plugin),
agents_db.AgentExtRpcCallback(self.plugin)
]
self.plugin.conn = n_rpc.create_connection()
self.plugin.conn.create_consumer(
l_const.LOADBALANCER_PLUGIN,
self.plugin.agent_endpoints,
fanout=False)
self.plugin.conn.consume_in_threads()
def get_pool_agent(self, context, pool_id):
agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id)
if not agent:
raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id)
return agent['agent']
def create_vip(self, context, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
self.agent_rpc.create_vip(context, vip, agent['host'])
def update_vip(self, context, old_vip, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
if vip['status'] in np_const.ACTIVE_PENDING_STATUSES:
self.agent_rpc.update_vip(context, old_vip, vip, agent['host'])
else:
self.agent_rpc.delete_vip(context, vip, agent['host'])
def delete_vip(self, context, vip):
self.plugin._delete_db_vip(context, vip['id'])
agent = self.get_pool_agent(context, vip['pool_id'])
self.agent_rpc.delete_vip(context, vip, agent['host'])
def create_pool(self, context, pool):
agent = self.pool_scheduler.schedule(self.plugin, context, pool,
self.device_driver)
if not agent:
raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
self.agent_rpc.create_pool(context, pool, agent['host'],
self.device_driver)
def update_pool(self, context, old_pool, pool):
agent = self.get_pool_agent(context, pool['id'])
if pool['status'] in np_const.ACTIVE_PENDING_STATUSES:
self.agent_rpc.update_pool(context, old_pool, pool,
agent['host'])
else:
self.agent_rpc.delete_pool(context, pool, agent['host'])
def delete_pool(self, context, pool):
# get agent first to know host as binding will be deleted
# after pool is deleted from db
agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id'])
self.plugin._delete_db_pool(context, pool['id'])
if agent:
self.agent_rpc.delete_pool(context, pool, agent['agent']['host'])
def create_member(self, context, member):
agent = self.get_pool_agent(context, member['pool_id'])
self.agent_rpc.create_member(context, member, agent['host'])
def update_member(self, context, old_member, member):
agent = self.get_pool_agent(context, member['pool_id'])
# member may change pool id
if member['pool_id'] != old_member['pool_id']:
old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool(
context, old_member['pool_id'])
if old_pool_agent:
self.agent_rpc.delete_member(context, old_member,
old_pool_agent['agent']['host'])
self.agent_rpc.create_member(context, member, agent['host'])
else:
self.agent_rpc.update_member(context, old_member, member,
agent['host'])
def delete_member(self, context, member):
self.plugin._delete_db_member(context, member['id'])
agent = self.get_pool_agent(context, member['pool_id'])
self.agent_rpc.delete_member(context, member, agent['host'])
def create_pool_health_monitor(self, context, healthmon, pool_id):
# healthmon is not used here
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.create_pool_health_monitor(context, healthmon,
pool_id, agent['host'])
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.update_pool_health_monitor(context, old_health_monitor,
health_monitor, pool_id,
agent['host'])
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(
context, health_monitor['id'], pool_id
)
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.delete_pool_health_monitor(context, health_monitor,
pool_id, agent['host'])
def stats(self, context, pool_id):
pass

View File

@ -1,240 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as qconstants
from six import moves
from neutron_lbaas.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'scur',
constants.STATS_MAX_CONNECTIONS: 'smax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_CONNECTIONS: 'stot',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES
INACTIVE = qconstants.INACTIVE
def save_config(conf_path, logical_config, socket_path=None,
user_group='nogroup'):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(logical_config, socket_path=socket_path,
user_group=user_group))
data.extend(_build_defaults(logical_config))
data.extend(_build_frontend(logical_config))
data.extend(_build_backend(logical_config))
n_utils.replace_file(conf_path, '\n'.join(data))
def _build_global(config, socket_path=None, user_group='nogroup'):
opts = [
'daemon',
'user nobody',
'group %s' % user_group,
'log /dev/log local0',
'log /dev/log local1 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults(config):
opts = [
'log global',
'retries 3',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option tcplog',
'bind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
if not config['vip']['admin_state_up']:
opts.append('disabled')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if ((member['status'] in ACTIVE_PENDING_STATUSES or
member['status'] == INACTIVE)
and member['admin_state_up']):
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %s' % member['id']
opts.append(server)
if not config['pool']['admin_state_up']:
opts.append('disabled')
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for m in config['healthmonitors']:
# not checking the status of healthmonitor for two reasons:
# 1) status field is absent in HealthMonitor model
# 2) only active HealthMonitors are fetched with
# LoadBalancerCallbacks.get_logical_device
if m['admin_state_up']:
monitor = m
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and
config.get('members')):
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in moves.range(int(low), int(hi) + 1))
else:
retval.add(code)
return retval

View File

@ -1,422 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import socket
import netaddr
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as n_utils
from neutron.plugins.common import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lbaas._i18n import _, _LE, _LW
from neutron_lbaas.services.loadbalancer.agent import agent_device_driver
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
DRIVER_NAME = 'haproxy_ns'
STATE_PATH_DEFAULT = '$state_path/lbaas'
USER_GROUP_DEFAULT = 'nogroup'
OPTS = [
cfg.StrOpt(
'loadbalancer_state_path',
default=STATE_PATH_DEFAULT,
help=_('Location to store config and state files'),
deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path',
group='DEFAULT')],
),
cfg.StrOpt(
'user_group',
default=USER_GROUP_DEFAULT,
help=_('The user group'),
deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')],
),
cfg.IntOpt(
'send_gratuitous_arp',
default=3,
help=_('When delete and re-add the same vip, send this many '
'gratuitous ARPs to flush the ARP cache in the Router. '
'Set it below or equal to 0 to disable this feature.'),
)
]
cfg.CONF.register_opts(OPTS, 'haproxy')
class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
def __init__(self, conf, plugin_rpc):
self.conf = conf
self.state_path = conf.haproxy.loadbalancer_state_path
try:
vif_driver_class = n_utils.load_class_by_alias_or_classname(
'neutron.interface_drivers',
conf.interface_driver)
except ImportError:
with excutils.save_and_reraise_exception():
msg = (_('Error importing interface driver: %s')
% conf.interface_driver)
LOG.error(msg)
self.vif_driver = vif_driver_class(conf)
self.plugin_rpc = plugin_rpc
self.pool_to_port_id = {}
@classmethod
def get_name(cls):
return DRIVER_NAME
def create(self, logical_config):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
self._plug(namespace, logical_config['vip']['port'],
logical_config['vip']['address'])
self._spawn(logical_config)
def update(self, logical_config):
pool_id = logical_config['pool']['id']
pid_path = self._get_state_file_path(pool_id, 'pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(logical_config, extra_args)
def _spawn(self, logical_config, extra_cmd_args=()):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
conf_path = self._get_state_file_path(pool_id, 'conf')
pid_path = self._get_state_file_path(pool_id, 'pid')
sock_path = self._get_state_file_path(pool_id, 'sock')
user_group = self.conf.haproxy.user_group
hacfg.save_config(conf_path, logical_config, sock_path, user_group)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(namespace=namespace)
ns.netns.execute(cmd)
# remember the pool<>port mapping
self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
@n_utils.synchronized('haproxy-driver')
def undeploy_instance(self, pool_id, **kwargs):
cleanup_namespace = kwargs.get('cleanup_namespace', False)
delete_namespace = kwargs.get('delete_namespace', False)
namespace = get_ns_name(pool_id)
pid_path = self._get_state_file_path(pool_id, 'pid')
# kill the process
kill_pids_in_file(pid_path)
# unplug the ports
if pool_id in self.pool_to_port_id:
self._unplug(namespace, self.pool_to_port_id[pool_id])
# delete all devices from namespace;
# used when deleting orphans and port_id is not known for pool_id
if cleanup_namespace:
ns = ip_lib.IPWrapper(namespace=namespace)
for device in ns.get_devices(exclude_loopback=True):
self.vif_driver.unplug(device.name, namespace=namespace)
# remove the configuration directory
conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
if delete_namespace:
ns = ip_lib.IPWrapper(namespace=namespace)
ns.garbage_collect_namespace()
def exists(self, pool_id):
namespace = get_ns_name(pool_id)
root_ns = ip_lib.IPWrapper()
socket_path = self._get_state_file_path(pool_id, 'sock', False)
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock', False)
TYPE_BACKEND_REQUEST = 2
TYPE_SERVER_REQUEST = 4
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
pool_stats = self._get_backend_stats(parsed_stats)
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warning(_LW('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
TYPE_BACKEND_RESPONSE = '1'
for stats in parsed_stats:
if stats.get('type') == TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in hacfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
TYPE_SERVER_RESPONSE = '2'
res = {}
for stats in parsed_stats:
if stats.get('type') == TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warning(_LW('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
if not raw_values:
continue
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return res_stats
def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, pool_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _plug(self, namespace, port, vip_address, reuse_existing=True):
self.plugin_rpc.plug_vip_port(port['id'])
interface_name = self.vif_driver.get_device_name(Wrap(port))
if ip_lib.device_exists(interface_name, namespace=namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port['network_id'],
port['id'],
interface_name,
port['mac_address'],
namespace=namespace
)
cidrs = [
'%s/%s' % (ip['ip_address'],
netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
for ip in port['fixed_ips']
]
self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
# Haproxy socket binding to IPv6 VIP address will fail if this address
# is not yet ready(i.e tentative address).
if netaddr.IPAddress(vip_address).version == 6:
device = ip_lib.IPDevice(interface_name, namespace=namespace)
device.addr.wait_until_address_ready(vip_address)
gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
if not gw_ip:
host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', [])
for host_route in host_routes:
if host_route['destination'] == "0.0.0.0/0":
gw_ip = host_route['nexthop']
break
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
# When delete and re-add the same vip, we need to
# send gratuitous ARP to flush the ARP cache in the Router.
gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
if gratuitous_arp > 0:
for ip in port['fixed_ips']:
cmd_arping = ['arping', '-U',
'-I', interface_name,
'-c', gratuitous_arp,
ip['ip_address']]
ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
self.plugin_rpc.unplug_vip_port(port_id)
interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
def _is_active(self, logical_config):
# haproxy wil be unable to start without any active vip
if ('vip' not in logical_config or
(logical_config['vip']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['vip']['admin_state_up']):
return False
# not checking pool's admin_state_up to utilize haproxy ability to
# turn backend off instead of doing undeploy.
# in this case "ERROR 503: Service Unavailable" will be returned
if (logical_config['pool']['status'] not in
constants.ACTIVE_PENDING_STATUSES):
return False
return True
@n_utils.synchronized('haproxy-driver')
def deploy_instance(self, logical_config):
"""Deploys loadbalancer if necessary
:returns: True if loadbalancer was deployed, False otherwise
"""
# do actual deploy only if vip and pool are configured and active
if not logical_config or not self._is_active(logical_config):
return False
if self.exists(logical_config['pool']['id']):
self.update(logical_config)
else:
self.create(logical_config)
return True
def _refresh_device(self, pool_id):
logical_config = self.plugin_rpc.get_logical_device(pool_id)
# cleanup if the loadbalancer wasn't deployed (in case nothing to
# deploy or any errors)
if not self.deploy_instance(logical_config) and self.exists(pool_id):
self.undeploy_instance(pool_id)
def create_vip(self, vip):
self._refresh_device(vip['pool_id'])
def update_vip(self, old_vip, vip):
self._refresh_device(vip['pool_id'])
def delete_vip(self, vip):
self.undeploy_instance(vip['pool_id'])
def create_pool(self, pool):
# nothing to do here because a pool needs a vip to be useful
pass
def update_pool(self, old_pool, pool):
self._refresh_device(pool['id'])
def delete_pool(self, pool):
if self.exists(pool['id']):
self.undeploy_instance(pool['id'], delete_namespace=True)
def create_member(self, member):
self._refresh_device(member['pool_id'])
def update_member(self, old_member, member):
self._refresh_device(member['pool_id'])
def delete_member(self, member):
self._refresh_device(member['pool_id'])
def create_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def update_pool_health_monitor(self, old_health_monitor, health_monitor,
pool_id):
self._refresh_device(pool_id)
def delete_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def remove_orphans(self, known_pool_ids):
if not os.path.exists(self.state_path):
return
orphans = (pool_id for pool_id in os.listdir(self.state_path)
if pool_id not in known_pool_ids)
for pool_id in orphans:
if self.exists(pool_id):
self.undeploy_instance(pool_id, cleanup_namespace=True)
# NOTE (markmcclain) For compliance with interface.py which expects objects
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
def kill_pids_in_file(pid_path):
if os.path.exists(pid_path):
with open(pid_path, 'r') as pids:
for pid in pids:
pid = pid.strip()
try:
utils.execute(['kill', '-9', pid], run_as_root=True)
except RuntimeError:
LOG.exception(
_LE('Unable to kill haproxy process: %s'),
pid
)

View File

@ -1,23 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lbaas.services.loadbalancer.drivers.common \
import agent_driver_base
from neutron_lbaas.services.loadbalancer.drivers.haproxy \
import namespace_driver
class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase):
device_driver = namespace_driver.DRIVER_NAME

View File

@ -1,27 +0,0 @@
# Copyright 2014-2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron_lbaas._i18n import _LW
from neutron_lbaas.drivers.haproxy import synchronous_namespace_driver
LOG = logging.getLogger(__name__)
LOG.warning(_LW("This path has been deprecated. "
"Use neutron_lbaas.drivers.haproxy."
"synchronous_namespace_driver instead."))
class HaproxyNSDriver(synchronous_namespace_driver.HaproxyNSDriver):
pass

View File

@ -1,23 +0,0 @@
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron_lbaas._i18n import _LW
from neutron_lbaas.drivers import logging_noop
LOG = logging.getLogger(__name__)
LOG.warning(_LW("This path has been deprecated. "
"Use neutron_lbaas.drivers.logging_noop instead."))
__path__ = logging_noop.__path__

View File

@ -1,255 +0,0 @@
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from neutron_lbaas._i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
CONTENT_TYPE_HEADER = 'Content-type'
ACCEPT_HEADER = 'Accept'
AUTH_HEADER = 'Cookie'
DRIVER_HEADER = 'X-OpenStack-LBaaS'
TENANT_HEADER = 'X-Tenant-ID'
JSON_CONTENT_TYPE = 'application/json'
DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas'
NITRO_LOGIN_URI = 'nitro/v2/config/login'
class NCCException(n_exc.NeutronException):
"""Represents exceptions thrown by NSClient."""
CONNECTION_ERROR = 1
REQUEST_ERROR = 2
RESPONSE_ERROR = 3
UNKNOWN_ERROR = 4
def __init__(self, error, status=requests.codes.SERVICE_UNAVAILABLE):
self.message = _("NCC Error %d") % error
super(NCCException, self).__init__()
self.error = error
self.status = status
def is_not_found_exception(self):
if int(self.status) == requests.codes.NOT_FOUND:
return True
class NSClient(object):
"""Client to operate on REST resources of NetScaler Control Center."""
def __init__(self, service_uri, username, password,
ncc_cleanup_mode="False"):
if not service_uri:
LOG.exception(_LE("No NetScaler Control Center URI specified. "
"Cannot connect."))
raise NCCException(NCCException.CONNECTION_ERROR)
self.service_uri = service_uri.strip('/')
self.auth = None
self.cleanup_mode = False
if username and password:
self.username = username
self.password = password
if ncc_cleanup_mode.lower() == "true":
self.cleanup_mode = True
def create_resource(self, tenant_id, resource_path, object_name,
object_data):
"""Create a resource of NetScaler Control Center."""
return self._resource_operation('POST', tenant_id,
resource_path,
object_name=object_name,
object_data=object_data)
def is_login(self, resource_uri):
if 'login' in resource_uri.lower():
return True
else:
return False
def login(self):
"""Get session based login"""
login_obj = {"username": self.username, "password": self.password}
msg = "NetScaler driver login:" + repr(login_obj)
LOG.info(msg)
resp_status, result = self.create_resource("login", NITRO_LOGIN_URI,
"login", login_obj)
LOG.info(_LI("Response: status : %(status)s %result(result)s"), {
"status": resp_status, "result": result['body']})
result_body = jsonutils.loads(result['body'])
session_id = None
if result_body and "login" in result_body:
logins = result_body["login"]
if isinstance(logins, list):
login = logins[0]
else:
login = logins
if login and "sessionid" in login:
session_id = login["sessionid"]
if session_id:
LOG.info(_LI("Response: %(result)s"), {"result": result['body']})
LOG.info(
_LI("Session_id = %(session_id)s") %
{"session_id": session_id})
# Update sessin_id in auth
self.auth = "SessId=%s" % session_id
else:
raise NCCException(NCCException.RESPONSE_ERROR)
def retrieve_resource(self, tenant_id, resource_path, parse_response=True):
"""Retrieve a resource of NetScaler Control Center."""
return self._resource_operation('GET', tenant_id, resource_path)
def update_resource(self, tenant_id, resource_path, object_name,
object_data):
"""Update a resource of the NetScaler Control Center."""
return self._resource_operation('PUT', tenant_id,
resource_path,
object_name=object_name,
object_data=object_data)
def remove_resource(self, tenant_id, resource_path, parse_response=True):
"""Remove a resource of NetScaler Control Center."""
if self.cleanup_mode:
return True
else:
return self._resource_operation('DELETE', tenant_id, resource_path)
def _resource_operation(self, method, tenant_id, resource_path,
object_name=None, object_data=None):
resource_uri = "%s/%s" % (self.service_uri, resource_path)
if not self.auth and not self.is_login(resource_uri):
# Creating a session for the first time
self.login()
headers = self._setup_req_headers(tenant_id)
request_body = None
if object_data:
if isinstance(object_data, str):
request_body = object_data
else:
obj_dict = {object_name: object_data}
request_body = jsonutils.dumps(obj_dict)
try:
response_status, resp_dict = (self.
_execute_request(method,
resource_uri,
headers,
body=request_body))
except NCCException as e:
if e.status == requests.codes.NOT_FOUND and method == 'DELETE':
return 200, {}
else:
raise
return response_status, resp_dict
def _is_valid_response(self, response_status):
# when status is less than 400, the response is fine
return response_status < requests.codes.bad_request
def _setup_req_headers(self, tenant_id):
headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE,
CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE,
DRIVER_HEADER: DRIVER_HEADER_VALUE,
TENANT_HEADER: tenant_id,
AUTH_HEADER: self.auth}
return headers
def _get_response_dict(self, response):
response_dict = {'status': int(response.status_code),
'body': response.text,
'headers': response.headers}
if self._is_valid_response(int(response.status_code)):
if response.text:
response_dict['dict'] = response.json()
return response_dict
def _execute_request(self, method, resource_uri, headers, body=None):
service_uri_dict = {"service_uri": self.service_uri}
try:
response = requests.request(method, url=resource_uri,
headers=headers, data=body)
except requests.exceptions.SSLError:
LOG.exception(_LE("SSL error occurred while connecting "
"to %(service_uri)s"),
service_uri_dict)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.ConnectionError:
LOG.exception(_LE("Connection error occurred while connecting"
"to %(service_uri)s"), service_uri_dict)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.Timeout:
LOG.exception(
_LE("Request to %(service_uri)s timed out"), service_uri_dict)
raise NCCException(NCCException.CONNECTION_ERROR)
except (requests.exceptions.URLRequired,
requests.exceptions.InvalidURL,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema):
LOG.exception(_LE("Request did not specify a valid URL"))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.TooManyRedirects:
LOG.exception(_LE("Too many redirects occurred for request "))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.RequestException:
LOG.exception(
_LE("A request error while connecting to %(service_uri)s"),
service_uri_dict)
raise NCCException(NCCException.REQUEST_ERROR)
except Exception:
LOG.exception(
_LE("A unknown error occurred during request to"
" %(service_uri)s"), service_uri_dict)
raise NCCException(NCCException.UNKNOWN_ERROR)
resp_dict = self._get_response_dict(response)
resp_body = resp_dict['body']
LOG.info(_LI("Response: %(resp_body)s"), {"resp_body": resp_body})
response_status = resp_dict['status']
if response_status == requests.codes.unauthorized:
LOG.exception(_LE("Unable to login. Invalid credentials passed."
"for: %s"), self.service_uri)
if not self.is_login(resource_uri):
# Session expired, relogin and retry....
self.login()
# Retry the operation
headers.update({AUTH_HEADER: self.auth})
self._execute_request(method,
resource_uri,
headers,
body)
else:
raise NCCException(NCCException.RESPONSE_ERROR)
if not self._is_valid_response(response_status):
response_msg = resp_body
response_dict = {"method": method,
"url": resource_uri,
"response_status": response_status,
"response_msg": response_msg}
LOG.exception(_LE("Failed %(method)s operation on %(url)s "
"status code: %(response_status)s "
"message: %(response_msg)s"), response_dict)
raise NCCException(NCCException.RESPONSE_ERROR, response_status)
return response_status, resp_dict

View File

@ -1,469 +0,0 @@
# Copyright 2014 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.common import constants
from neutron_lib import constants as n_constants
from oslo_config import cfg
from oslo_log import log as logging
from neutron_lbaas._i18n import _, _LI
from neutron_lbaas.db.loadbalancer import loadbalancer_db
from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client
LOG = logging.getLogger(__name__)
NETSCALER_CC_OPTS = [
cfg.StrOpt(
'netscaler_ncc_uri',
help=_('The URL to reach the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_username',
help=_('Username to login to the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_password',
help=_('Password to login to the NetScaler Control Center Server.'),
)
]
cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver')
VIPS_RESOURCE = 'vips'
VIP_RESOURCE = 'vip'
POOLS_RESOURCE = 'pools'
POOL_RESOURCE = 'pool'
POOLMEMBERS_RESOURCE = 'members'
POOLMEMBER_RESOURCE = 'member'
MONITORS_RESOURCE = 'healthmonitors'
MONITOR_RESOURCE = 'healthmonitor'
POOLSTATS_RESOURCE = 'statistics'
PROV_SEGMT_ID = 'provider:segmentation_id'
PROV_NET_TYPE = 'provider:network_type'
DRIVER_NAME = 'netscaler_driver'
class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
"""NetScaler LBaaS Plugin driver class."""
def __init__(self, plugin):
self.plugin = plugin
ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri
ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username
ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password
self.client = ncc_client.NSClient(ncc_uri,
ncc_username,
ncc_password)
def create_vip(self, context, vip):
"""Create a vip on a NetScaler device."""
network_info = self._get_vip_network_info(context, vip)
ncc_vip = self._prepare_vip_for_creation(vip)
ncc_vip.update(network_info)
LOG.debug("NetScaler driver vip creation: %r", ncc_vip)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
VIP_RESOURCE, ncc_vip)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"],
status)
def update_vip(self, context, old_vip, vip):
"""Update a vip on a NetScaler device."""
update_vip = self._prepare_vip_for_update(vip)
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
LOG.debug("NetScaler driver vip %(vip_id)s update: %(vip_obj)r",
{"vip_id": vip["id"], "vip_obj": vip})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
VIP_RESOURCE, update_vip)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"],
status)
def delete_vip(self, context, vip):
"""Delete a vip on a NetScaler device."""
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
LOG.debug("NetScaler driver vip removal: %s", vip["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Vip,
vip["id"],
constants.ERROR)
else:
self.plugin._delete_db_vip(context, vip['id'])
def create_pool(self, context, pool):
"""Create a pool on a NetScaler device."""
network_info = self._get_pool_network_info(context, pool)
#allocate a snat port/ipaddress on the subnet if one doesn't exist
self._create_snatport_for_subnet_if_not_exists(context,
pool['tenant_id'],
pool['subnet_id'],
network_info)
ncc_pool = self._prepare_pool_for_creation(pool)
ncc_pool.update(network_info)
LOG.debug("NetScaler driver pool creation: %r", ncc_pool)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
POOL_RESOURCE, ncc_pool)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Pool,
ncc_pool["id"], status)
def update_pool(self, context, old_pool, pool):
"""Update a pool on a NetScaler device."""
ncc_pool = self._prepare_pool_for_update(pool)
resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
LOG.debug("NetScaler driver pool %(pool_id)s update: %(pool_obj)r",
{"pool_id": old_pool["id"], "pool_obj": ncc_pool})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
POOL_RESOURCE, ncc_pool)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Pool,
old_pool["id"], status)
def delete_pool(self, context, pool):
"""Delete a pool on a NetScaler device."""
resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
LOG.debug("NetScaler driver pool removal: %s", pool["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Pool,
pool["id"],
constants.ERROR)
else:
self.plugin._delete_db_pool(context, pool['id'])
self._remove_snatport_for_subnet_if_not_used(context,
pool['tenant_id'],
pool['subnet_id'])
def create_member(self, context, member):
"""Create a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_creation(member)
LOG.info(_LI("NetScaler driver poolmember creation: %r"),
ncc_member)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id,
POOLMEMBERS_RESOURCE,
POOLMEMBER_RESOURCE,
ncc_member)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Member,
member["id"], status)
def update_member(self, context, old_member, member):
"""Update a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_update(member)
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
LOG.debug("NetScaler driver poolmember %(member_id)s update: "
"%(member_obj)r",
{"member_id": old_member["id"],
"member_obj": ncc_member})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
POOLMEMBER_RESOURCE, ncc_member)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Member,
old_member["id"], status)
def delete_member(self, context, member):
"""Delete a pool member on a NetScaler device."""
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
LOG.debug("NetScaler driver poolmember removal: %s", member["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Member,
member["id"],
constants.ERROR)
else:
self.plugin._delete_db_member(context, member['id'])
def create_pool_health_monitor(self, context, health_monitor, pool_id):
"""Create a pool health monitor on a NetScaler device."""
ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor,
pool_id)
resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE)
LOG.debug("NetScaler driver healthmonitor creation for pool "
"%(pool_id)s: %(monitor_obj)r",
{"pool_id": pool_id, "monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE,
ncc_hm)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_pool_health_monitor(context,
health_monitor['id'],
pool_id,
status, "")
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
"""Update a pool health monitor on a NetScaler device."""
ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
resource_path = "%s/%s" % (MONITORS_RESOURCE,
old_health_monitor["id"])
LOG.debug("NetScaler driver healthmonitor %(monitor_id)s update: "
"%(monitor_obj)r",
{"monitor_id": old_health_monitor["id"],
"monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE, ncc_hm)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_pool_health_monitor(context,
old_health_monitor['id'],
pool_id,
status, "")
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
"""Delete a pool health monitor on a NetScaler device."""
resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE,
health_monitor["id"])
LOG.debug("NetScaler driver healthmonitor %(monitor_id)s"
"removal for pool %(pool_id)s",
{"monitor_id": health_monitor["id"],
"pool_id": pool_id})
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_pool_health_monitor(context,
health_monitor['id'],
pool_id,
constants.ERROR, "")
else:
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def stats(self, context, pool_id):
"""Retrieve pool statistics from the NetScaler device."""
resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
LOG.debug("NetScaler driver pool stats retrieval: %s", pool_id)
try:
stats = self.client.retrieve_resource(context.tenant_id,
resource_path)[1]
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Pool,
pool_id, constants.ERROR)
else:
return stats
def _prepare_vip_for_creation(self, vip):
creation_attrs = {
'id': vip['id'],
'tenant_id': vip['tenant_id'],
'protocol': vip['protocol'],
'address': vip['address'],
'protocol_port': vip['protocol_port'],
}
if 'session_persistence' in vip:
creation_attrs['session_persistence'] = vip['session_persistence']
update_attrs = self._prepare_vip_for_update(vip)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_vip_for_update(self, vip):
return {
'name': vip['name'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']
}
def _prepare_pool_for_creation(self, pool):
creation_attrs = {
'id': pool['id'],
'tenant_id': pool['tenant_id'],
'vip_id': pool['vip_id'],
'protocol': pool['protocol'],
'subnet_id': pool['subnet_id'],
}
update_attrs = self._prepare_pool_for_update(pool)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_pool_for_update(self, pool):
return {
'name': pool['name'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']
}
def _prepare_member_for_creation(self, member):
creation_attrs = {
'id': member['id'],
'tenant_id': member['tenant_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
}
update_attrs = self._prepare_member_for_update(member)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_member_for_update(self, member):
return {
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']
}
def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id):
creation_attrs = {
'id': health_monitor['id'],
'tenant_id': health_monitor['tenant_id'],
'type': health_monitor['type'],
}
update_attrs = self._prepare_healthmonitor_for_update(health_monitor)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_healthmonitor_for_update(self, health_monitor):
ncc_hm = {
'delay': health_monitor['delay'],
'timeout': health_monitor['timeout'],
'max_retries': health_monitor['max_retries'],
'admin_state_up': health_monitor['admin_state_up']
}
if health_monitor['type'] in ['HTTP', 'HTTPS']:
ncc_hm['http_method'] = health_monitor['http_method']
ncc_hm['url_path'] = health_monitor['url_path']
ncc_hm['expected_codes'] = health_monitor['expected_codes']
return ncc_hm
def _get_network_info(self, context, entity):
network_info = {}
subnet_id = entity['subnet_id']
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = self.plugin._core_plugin.get_network(context, network_id)
network_info['network_id'] = network_id
network_info['subnet_id'] = subnet_id
if PROV_NET_TYPE in network:
network_info['network_type'] = network[PROV_NET_TYPE]
if PROV_SEGMT_ID in network:
network_info['segmentation_id'] = network[PROV_SEGMT_ID]
return network_info
def _get_vip_network_info(self, context, vip):
network_info = self._get_network_info(context, vip)
network_info['port_id'] = vip['port_id']
return network_info
def _get_pool_network_info(self, context, pool):
return self._get_network_info(context, pool)
def _get_pools_on_subnet(self, context, tenant_id, subnet_id):
filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]}
return self.plugin.get_pools(context, filters=filter_dict)
def _get_snatport_for_subnet(self, context, tenant_id, subnet_id):
device_id = '_lb-snatport-' + subnet_id
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
LOG.debug("Filtering ports based on network_id=%(network_id)s, "
"tenant_id=%(tenant_id)s, device_id=%(device_id)s",
{'network_id': network_id,
'tenant_id': tenant_id,
'device_id': device_id})
filter_dict = {
'network_id': [network_id],
'tenant_id': [tenant_id],
'device_id': [device_id],
'device-owner': [DRIVER_NAME]
}
ports = self.plugin._core_plugin.get_ports(context,
filters=filter_dict)
if ports:
LOG.info(_LI("Found an existing SNAT port for subnet %s"),
subnet_id)
return ports[0]
LOG.info(_LI("Found no SNAT ports for subnet %s"), subnet_id)
def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
ip_address):
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != n_constants.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': tenant_id,
'name': '_lb-snatport-' + subnet_id,
'network_id': subnet['network_id'],
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '_lb-snatport-' + subnet_id,
'device_owner': DRIVER_NAME,
'fixed_ips': [fixed_ip],
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
LOG.info(_LI("Created SNAT port: %r"), port)
return port
def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if port:
self.plugin._core_plugin.delete_port(context, port['id'])
LOG.info(_LI("Removed SNAT port: %r"), port)
def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
subnet_id, network_info):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if not port:
LOG.info(_LI("No SNAT port found for subnet %s. Creating one..."),
subnet_id)
port = self._create_snatport_for_subnet(context, tenant_id,
subnet_id,
ip_address=None)
network_info['port_id'] = port['id']
network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
LOG.info(_LI("SNAT port: %r"), port)
def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
subnet_id):
pools = self._get_pools_on_subnet(context, tenant_id, subnet_id)
if not pools:
#No pools left on the old subnet.
#We can remove the SNAT port/ipaddress
self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
LOG.info(_LI("Removing SNAT port for subnet %s "
"as this is the last pool using it..."),
subnet_id)

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions
from neutron_lbaas._i18n import _
class RadwareLBaasException(exceptions.NeutronException):
message = _('An unknown exception occurred in Radware LBaaS provider.')
class AuthenticationMissing(RadwareLBaasException):
message = _('vDirect user/password missing. '
'Specify in configuration file, under [radware] section')
class WorkflowMissing(RadwareLBaasException):
message = _('Workflow %(workflow)s is missing on vDirect server. '
'Upload missing workflow')
class RESTRequestFailure(RadwareLBaasException):
message = _('REST request failed with status %(status)s. '
'Reason: %(reason)s, Description: %(description)s. '
'Success status codes are %(success_codes)s')
class UnsupportedEntityOperation(RadwareLBaasException):
message = _('%(operation)s operation is not supported for %(entity)s.')

View File

@ -1,99 +0,0 @@
# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lbaas.services.loadbalancer.drivers.vmware import models
def add_nsxv_edge_pool_mapping(context, pool_id, edge_id, edge_pool_id):
session = context.session
with session.begin(subtransactions=True):
mapping = models.NsxvEdgePoolMapping()
mapping.pool_id = pool_id
mapping.edge_id = edge_id
mapping.edge_pool_id = edge_pool_id
session.add(mapping)
def get_nsxv_edge_pool_mapping(context, pool_id):
return(context.session.query(models.NsxvEdgePoolMapping).
filter_by(pool_id=pool_id).first())
def get_nsxv_edge_pool_mapping_by_edge(context, edge_id):
return(context.session.query(models.NsxvEdgePoolMapping).
filter_by(edge_id=edge_id).all())
def delete_nsxv_edge_pool_mapping(context, pool_id):
session = context.session
mapping = (session.query(models.NsxvEdgePoolMapping).filter_by(
pool_id=pool_id))
for m in mapping:
session.delete(m)
def add_nsxv_edge_vip_mapping(context, pool_id, edge_id, edge_app_profile_id,
edge_vse_id, edge_fw_rule_id):
session = context.session
with session.begin(subtransactions=True):
mapping = models.NsxvEdgeVipMapping()
mapping.pool_id = pool_id
mapping.edge_id = edge_id
mapping.edge_app_profile_id = edge_app_profile_id
mapping.edge_vse_id = edge_vse_id
mapping.edge_fw_rule_id = edge_fw_rule_id
session.add(mapping)
def get_nsxv_edge_vip_mapping(context, pool_id):
return(context.session.query(models.NsxvEdgeVipMapping).
filter_by(pool_id=pool_id).first())
def delete_nsxv_edge_vip_mapping(context, pool_id):
session = context.session
mapping = (session.query(models.NsxvEdgeVipMapping).filter_by(
pool_id=pool_id))
for m in mapping:
session.delete(m)
def add_nsxv_edge_monitor_mapping(context, monitor_id, edge_id,
edge_monitor_id):
session = context.session
with session.begin(subtransactions=True):
mapping = models.NsxvEdgeMonitorMapping()
mapping.monitor_id = monitor_id
mapping.edge_id = edge_id
mapping.edge_monitor_id = edge_monitor_id
session.add(mapping)
def get_nsxv_edge_monitor_mapping(context, monitor_id, edge_id):
return(context.session.query(models.NsxvEdgeMonitorMapping).
filter_by(monitor_id=monitor_id, edge_id=edge_id).first())
def get_nsxv_edge_monitor_mapping_all(context, monitor_id):
return(context.session.query(models.NsxvEdgeMonitorMapping).
filter_by(monitor_id=monitor_id).all())
def delete_nsxv_edge_monitor_mapping(context, monitor_id, edge_id):
session = context.session
mapping = (session.query(models.NsxvEdgeMonitorMapping).filter_by(
monitor_id=monitor_id, edge_id=edge_id))
for m in mapping:
session.delete(m)

View File

@ -1,207 +0,0 @@
# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.common import constants
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.extensions import loadbalancer as lb_ext
from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
from neutron_lbaas.services.loadbalancer.drivers.vmware import db
class EdgeLoadbalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
self._plugin = plugin
@property
def _nsxv_driver(self):
return self._plugin._core_plugin.nsx_v
def create_pool_successful(self, context, pool, edge_id, edge_pool_id):
db.add_nsxv_edge_pool_mapping(
context, pool['id'], edge_id, edge_pool_id)
self.pool_successful(context, pool)
def delete_pool_successful(self, context, pool):
self._plugin._delete_db_pool(context, pool['id'])
db.delete_nsxv_edge_pool_mapping(context, pool['id'])
def pool_successful(self, context, pool):
self._plugin.update_status(
context, lb_db.Pool, pool['id'], constants.ACTIVE)
def pool_failed(self, context, pool):
self._plugin.update_status(
context, lb_db.Pool, pool['id'], constants.ERROR)
def create_pool(self, context, pool):
super(EdgeLoadbalancerDriver, self).create_pool(context, pool)
self._nsxv_driver.create_pool(context, pool)
def update_pool(self, context, old_pool, pool):
super(EdgeLoadbalancerDriver, self).update_pool(
context, old_pool, pool)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, old_pool['id'])
self._nsxv_driver.update_pool(
context, old_pool, pool, pool_mapping)
def delete_pool(self, context, pool):
vip_id = self._plugin.get_pool(context, pool['id']).get('vip_id', None)
if vip_id:
raise lb_ext.PoolInUse(pool_id=pool['id'])
else:
super(EdgeLoadbalancerDriver, self).delete_pool(context, pool)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool['id'])
self._nsxv_driver.delete_pool(context, pool, pool_mapping)
def create_vip_successful(self, context, vip, edge_id, app_profile_id,
edge_vip_id, edge_fw_rule_id):
db.add_nsxv_edge_vip_mapping(context, vip['pool_id'], edge_id,
app_profile_id, edge_vip_id,
edge_fw_rule_id)
self.vip_successful(context, vip)
def delete_vip_successful(self, context, vip):
db.delete_nsxv_edge_vip_mapping(context, vip['pool_id'])
self._plugin._delete_db_vip(context, vip['id'])
def vip_successful(self, context, vip):
self._plugin.update_status(
context, lb_db.Vip, vip['id'], constants.ACTIVE)
def vip_failed(self, context, vip):
self._plugin.update_status(
context, lb_db.Vip, vip['id'], constants.ERROR)
def create_vip(self, context, vip):
super(EdgeLoadbalancerDriver, self).create_vip(context, vip)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, vip['pool_id'])
self._nsxv_driver.create_vip(context, vip, pool_mapping)
def update_vip(self, context, old_vip, vip):
super(EdgeLoadbalancerDriver, self).update_vip(context, old_vip, vip)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, vip['pool_id'])
vip_mapping = db.get_nsxv_edge_vip_mapping(context, vip['pool_id'])
self._nsxv_driver.update_vip(context, old_vip, vip, pool_mapping,
vip_mapping)
def delete_vip(self, context, vip):
super(EdgeLoadbalancerDriver, self).delete_vip(context, vip)
vip_mapping = db.get_nsxv_edge_vip_mapping(context, vip['pool_id'])
self._nsxv_driver.delete_vip(context, vip, vip_mapping)
def member_successful(self, context, member):
self._plugin.update_status(
context, lb_db.Member, member['id'], constants.ACTIVE)
def member_failed(self, context, member):
self._plugin.update_status(
context, lb_db.Member, member['id'], constants.ERROR)
def create_member(self, context, member):
super(EdgeLoadbalancerDriver, self).create_member(context, member)
pool_mapping = db.get_nsxv_edge_pool_mapping(
context, member['pool_id'])
self._nsxv_driver.create_member(
context, member, pool_mapping)
def update_member(self, context, old_member, member):
super(EdgeLoadbalancerDriver, self).update_member(
context, old_member, member)
pool_mapping = db.get_nsxv_edge_pool_mapping(
context, member['pool_id'])
self._nsxv_driver.update_member(
context, old_member, member, pool_mapping)
def delete_member(self, context, member):
super(EdgeLoadbalancerDriver, self).delete_member(context, member)
pool_mapping = db.get_nsxv_edge_pool_mapping(
context, member['pool_id'])
self._nsxv_driver.delete_member(context, member, pool_mapping)
def create_pool_health_monitor_successful(self, context, health_monitor,
pool_id, edge_id, edge_mon_id):
db.add_nsxv_edge_monitor_mapping(
context, health_monitor['id'], edge_id, edge_mon_id)
self.pool_health_monitor_successful(context, health_monitor, pool_id)
def delete_pool_health_monitor_successful(self, context, health_monitor,
pool_id, mon_mapping):
db.delete_nsxv_edge_monitor_mapping(
context, health_monitor['id'], mon_mapping['edge_id'])
self._plugin._delete_db_pool_health_monitor(
context, health_monitor['id'], pool_id)
def pool_health_monitor_successful(self, context, health_monitor, pool_id):
self._plugin.update_pool_health_monitor(
context, health_monitor['id'], pool_id, constants.ACTIVE, '')
def pool_health_monitor_failed(self, context, health_monitor, pool_id):
self._plugin.update_pool_health_monitor(
context, health_monitor['id'], pool_id, constants.ERROR, '')
def create_pool_health_monitor(self, context, health_monitor, pool_id):
super(EdgeLoadbalancerDriver, self).create_pool_health_monitor(
context, health_monitor, pool_id)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id)
mon_mapping = db.get_nsxv_edge_monitor_mapping(
context, health_monitor['id'], pool_mapping['edge_id'])
self._nsxv_driver.create_pool_health_monitor(
context, health_monitor, pool_id, pool_mapping, mon_mapping)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
super(EdgeLoadbalancerDriver, self).update_pool_health_monitor(
context, old_health_monitor, health_monitor, pool_id)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id)
mon_mapping = db.get_nsxv_edge_monitor_mapping(
context, health_monitor['id'], pool_mapping['edge_id'])
self._nsxv_driver.update_pool_health_monitor(
context, old_health_monitor, health_monitor, pool_id, mon_mapping)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
super(EdgeLoadbalancerDriver, self).delete_pool_health_monitor(
context, health_monitor, pool_id)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id)
edge_id = pool_mapping['edge_id']
mon_mapping = db.get_nsxv_edge_monitor_mapping(
context, health_monitor['id'], edge_id)
self._nsxv_driver.delete_pool_health_monitor(
context, health_monitor, pool_id, pool_mapping, mon_mapping)
def stats(self, context, pool_id):
super(EdgeLoadbalancerDriver, self).stats(context, pool_id)
pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id)
return self._nsxv_driver.stats(context, pool_id, pool_mapping)
def is_edge_in_use(self, context, edge_id):
pool_mappings = db.get_nsxv_edge_pool_mapping_by_edge(context, edge_id)
if pool_mappings:
return True
return False

View File

@ -1,57 +0,0 @@
# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
import sqlalchemy as sql
class NsxvEdgePoolMapping(model_base.BASEV2):
"""Represents the connection between Edges and pools."""
__tablename__ = 'nsxv_edge_pool_mappings'
pool_id = sql.Column(sql.String(36),
sql.ForeignKey('pools.id', ondelete='CASCADE'),
primary_key=True)
edge_id = sql.Column(sql.String(36), nullable=False)
edge_pool_id = sql.Column(sql.String(36), nullable=False)
class NsxvEdgeVipMapping(model_base.BASEV2):
"""Represents the connection between Edges and VIPs."""
__tablename__ = 'nsxv_edge_vip_mappings'
pool_id = sql.Column(sql.String(36),
sql.ForeignKey('pools.id', ondelete='CASCADE'),
primary_key=True)
edge_id = sql.Column(sql.String(36), nullable=False)
edge_app_profile_id = sql.Column(sql.String(36), nullable=False)
edge_vse_id = sql.Column(sql.String(36), nullable=False)
edge_fw_rule_id = sql.Column(sql.String(36), nullable=False)
class NsxvEdgeMonitorMapping(model_base.BASEV2):
"""Represents the connection between Edges and pool monitors."""
__tablename__ = 'nsxv_edge_monitor_mappings'
__table_args__ = (sql.schema.UniqueConstraint(
'monitor_id', 'edge_id',
name='uniq_nsxv_edge_monitor_mappings'),)
monitor_id = sql.Column(sql.String(36),
sql.ForeignKey('healthmonitors.id',
ondelete='CASCADE'),
primary_key=True)
edge_id = sql.Column(sql.String(36), nullable=False, primary_key=True)
edge_monitor_id = sql.Column(sql.String(36), nullable=False)

View File

@ -12,7 +12,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
from neutron.api.v2 import attributes as attrs
from neutron.api.v2 import base as napi_base
@ -26,359 +28,32 @@ from neutron.services import provider_configuration as pconf
from neutron.services import service_base
from neutron_lib import constants as n_constants
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
from neutron_lbaas._i18n import _LI, _LE
from neutron_lbaas import agent_scheduler as agent_scheduler_v2
import neutron_lbaas.common.cert_manager
from neutron_lbaas.common.tls_utils import cert_parser
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldbv2
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.extensions import l7
from neutron_lbaas.extensions import lb_graph as lb_graph_ext
from neutron_lbaas.extensions import lbaas_agentschedulerv2
from neutron_lbaas.extensions import loadbalancer as lb_ext
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.extensions import sharedpools
from neutron_lbaas.services.loadbalancer import agent_scheduler
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.get_backend()
def verify_lbaas_mutual_exclusion():
"""Verifies lbaas v1 and lbaas v2 cannot be active concurrently."""
plugins = set([LoadBalancerPlugin.__name__, LoadBalancerPluginv2.__name__])
cfg_sps = set([sp.split('.')[-1] for sp in cfg.CONF.service_plugins])
if len(plugins.intersection(cfg_sps)) >= 2:
msg = _LE("Cannot have service plugins %(v1)s and %(v2)s active at "
"the same time!") % {'v1': LoadBalancerPlugin.__name__,
'v2': LoadBalancerPluginv2.__name__}
LOG.error(msg)
raise SystemExit(1)
def add_provider_configuration(type_manager, service_type):
type_manager.add_provider_configuration(
service_type,
pconf.ProviderConfiguration('neutron_lbaas'))
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
path_prefix = lb_ext.LOADBALANCER_PREFIX
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
add_provider_configuration(
self.service_type_manager, constants.LOADBALANCER)
self._load_drivers()
super(LoadBalancerPlugin, self).subscribe()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# NOTE(blogan): this method MUST be called after
# service_base.load_drivers to correctly verify
verify_lbaas_mutual_exclusion()
ctx = ncontext.get_admin_context()
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
LOG.error(_LE("Delete associated loadbalancer pools before "
"removing providers %s"), list(lost_providers))
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_LE("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_LE("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != n_constants.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
# This validation is because the new API version also has a resource
# called pool and these attributes have to be optional in the old API
# so they are not required attributes of the new. Its complicated.
if pool['pool']['lb_method'] == n_constants.ATTR_NOT_SPECIFIED:
raise loadbalancerv2.RequiredAttributeNotSpecified(
attr_name='lb_method')
if pool['pool']['subnet_id'] == n_constants.ATTR_NOT_SPECIFIED:
raise loadbalancerv2.RequiredAttributeNotSpecified(
attr_name='subnet_id')
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
# need to add provider name to pool dict,
# because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
try:
driver.create_pool(context, p)
except lb_ext.NoEligibleBackend:
# that should catch cases when backend of any kind
# is not available (agent, appliance, etc)
self.update_status(context, ldb.Pool,
p['id'], constants.ERROR,
"No eligible backend")
raise lb_ext.NoEligibleBackend(pool_id=p['id'])
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
try:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(
context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
except Exception:
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_LE('Failed to delete pool %s, putting it in ERROR '
'state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,
id, constants.ERROR)
def delete_pool(self, context, id):
# check for delete conditions and update the status
# within a transaction to avoid a race
with context.session.begin(subtransactions=True):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
self._ensure_pool_delete_conditions(context, id)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def _validate_hm_parameters(self, delay, timeout):
if delay < timeout:
raise lb_ext.DelayOrTimeoutInvalid()
def create_health_monitor(self, context, health_monitor):
new_hm = health_monitor['health_monitor']
self._validate_hm_parameters(new_hm['delay'], new_hm['timeout'])
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
new_hm = health_monitor['health_monitor']
old_hm = self.get_health_monitor(context, id)
delay = new_hm.get('delay', old_hm.get('delay'))
timeout = new_hm.get('timeout', old_hm.get('timeout'))
self._validate_hm_parameters(delay, timeout)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_pool_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
"""Implementation of the Neutron Loadbalancer Service Plugin.
@ -421,10 +96,6 @@ class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCERV2, self)
# NOTE(blogan): this method MUST be called after
# service_base.load_drivers to correctly verify
verify_lbaas_mutual_exclusion()
ctx = ncontext.get_admin_context()
# stop service in case provider was removed, but resources were not
self._check_orphan_loadbalancer_associations(ctx, self.drivers.keys())

View File

@ -43,6 +43,11 @@ if [ "$testenv" = "apiv2" ]; then
esac
fi
if [ "$testenv" = "apiv1" ]; then
# Temporary until job is removed
exit 0
fi
function generate_testr_results {
# Give job user rights to access tox logs
sudo -H -u "$owner" chmod o+rw .

View File

@ -1,75 +0,0 @@
# Copyright 2013 IBM Corp.
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from tempest.lib.common.utils import data_utils
from neutron_lbaas.tests.tempest.v1.api import base
class LBaaSAgentSchedulerTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List pools the given LBaaS agent is hosting.
Show a LBaaS agent hosting the given pool.
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [networki-feature-enabled] section of
etc/tempest.conf:
api_extensions
"""
@classmethod
def resource_setup(cls):
super(LBaaSAgentSchedulerTestJSON, cls).resource_setup()
if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
msg = "LBaaS Agent Scheduler Extension not enabled."
raise cls.skipException(msg)
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
pool_name = data_utils.rand_name('pool-')
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
"HTTP", cls.subnet)
@test.attr(type='smoke')
@test.idempotent_id('e5ea8b15-4f44-4350-963c-e0fcb533ee79')
def test_list_pools_on_lbaas_agent(self):
found = False
body = self.admin_client.list_agents(
agent_type="Loadbalancer agent")
agents = body['agents']
for a in agents:
msg = 'Load Balancer agent expected'
self.assertEqual(a['agent_type'], 'Loadbalancer agent', msg)
body = (
self.admin_client.list_pools_hosted_by_one_lbaas_agent(
a['id']))
pools = body['pools']
if self.pool['id'] in [p['id'] for p in pools]:
found = True
msg = 'Unable to find Load Balancer agent hosting pool'
self.assertTrue(found, msg)
@test.attr(type='smoke')
@test.idempotent_id('e2745593-fd79-4b98-a262-575fd7865796')
def test_show_lbaas_agent_hosting_pool(self):
body = self.admin_client.show_lbaas_agent_hosting_pool(
self.pool['id'])
self.assertEqual('Loadbalancer agent', body['agent']['agent_type'])

View File

@ -1,117 +0,0 @@
# Copyright 2014 Mirantis.inc
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_lbaas.tests.tempest.v1.api import base
class LoadBalancerAdminTestJSON(base.BaseAdminNetworkTest):
"""
Test admin actions for load balancer.
Create VIP for another tenant
Create health monitor for another tenant
"""
@classmethod
def resource_setup(cls):
super(LoadBalancerAdminTestJSON, cls).resource_setup()
if not test.is_extension_enabled('lbaas', 'network'):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
cls.force_tenant_isolation = True
manager = cls.get_client_manager()
cls.client = manager.network_client
cls.tenant_id = manager.credentials.tenant_id
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
"ROUND_ROBIN", "HTTP", cls.subnet)
@test.attr(type='smoke')
@decorators.idempotent_id('6b0a20d8-4fcd-455e-b54f-ec4db5199518')
def test_create_vip_as_admin_for_another_tenant(self):
name = data_utils.rand_name('vip-')
body = self.admin_client.create_pool(
name=data_utils.rand_name('pool-'),
lb_method="ROUND_ROBIN",
protocol="HTTP",
subnet_id=self.subnet['id'],
tenant_id=self.tenant_id)
pool = body['pool']
self.addCleanup(self.admin_client.delete_pool, pool['id'])
body = self.admin_client.create_vip(name=name,
protocol="HTTP",
protocol_port=80,
subnet_id=self.subnet['id'],
pool_id=pool['id'],
tenant_id=self.tenant_id)
vip = body['vip']
self.addCleanup(self.admin_client.delete_vip, vip['id'])
self.assertIsNotNone(vip['id'])
self.assertEqual(self.tenant_id, vip['tenant_id'])
body = self.client.show_vip(vip['id'])
show_vip = body['vip']
self.assertEqual(vip['id'], show_vip['id'])
self.assertEqual(vip['name'], show_vip['name'])
@test.attr(type='smoke')
@decorators.idempotent_id('74552cfc-ab78-4fb6-825b-f67bca379921')
def test_create_health_monitor_as_admin_for_another_tenant(self):
body = (
self.admin_client.create_health_monitor(delay=4,
max_retries=3,
type="TCP",
timeout=1,
tenant_id=self.tenant_id))
health_monitor = body['health_monitor']
self.addCleanup(self.admin_client.delete_health_monitor,
health_monitor['id'])
self.assertIsNotNone(health_monitor['id'])
self.assertEqual(self.tenant_id, health_monitor['tenant_id'])
body = self.client.show_health_monitor(health_monitor['id'])
show_health_monitor = body['health_monitor']
self.assertEqual(health_monitor['id'], show_health_monitor['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('266a192d-3c22-46c4-a8fb-802450301e82')
def test_create_pool_from_admin_user_other_tenant(self):
body = self.admin_client.create_pool(
name=data_utils.rand_name('pool-'),
lb_method="ROUND_ROBIN",
protocol="HTTP",
subnet_id=self.subnet['id'],
tenant_id=self.tenant_id)
pool = body['pool']
self.addCleanup(self.admin_client.delete_pool, pool['id'])
self.assertIsNotNone(pool['id'])
self.assertEqual(self.tenant_id, pool['tenant_id'])
@test.attr(type='smoke')
@decorators.idempotent_id('158bb272-b9ed-4cfc-803c-661dac46f783')
def test_create_member_from_admin_user_other_tenant(self):
body = self.admin_client.create_member(address="10.0.9.47",
protocol_port=80,
pool_id=self.pool['id'],
tenant_id=self.tenant_id)
member = body['member']
self.addCleanup(self.admin_client.delete_member, member['id'])
self.assertIsNotNone(member['id'])
self.assertEqual(self.tenant_id, member['tenant_id'])

View File

@ -1,104 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest import test
from neutron_lbaas.tests.tempest.v1.api import base
class QuotasTest(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
list quotas for tenants who have non-default quota values
show quotas for a specified tenant
update quotas for a specified tenant
reset quotas to default values for a specified tenant
v2.0 of the API is assumed.
It is also assumed that the per-tenant quota extension API is configured
in /etc/neutron/neutron.conf as follows:
quota_driver = neutron.db.quota_db.DbQuotaDriver
"""
@classmethod
def skip_checks(cls):
super(QuotasTest, cls).skip_checks()
if not test.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
tenant = self.identity_admin_client.create_tenant(
name=test_tenant,
description=test_description)
tenant_id = tenant['tenant']['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
# Change quotas for tenant
quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
def safe_reset_quotas(tenant_id):
try:
self.admin_client.reset_quotas(tenant_id)
except exceptions.NotFound:
pass
self.addCleanup(safe_reset_quotas, tenant_id)
for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Confirm our tenant is listed among tenants with non default quotas
non_default_quotas = self.admin_client.list_quotas()
found = False
for qs in non_default_quotas['quotas']:
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
# Confirm from API quotas were changed as requested for tenant
quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
self.admin_client.reset_quotas(tenant_id)
non_default_quotas = self.admin_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
@test.attr(type='gate')
def test_quotas(self):
new_quotas = {'network': 0, 'security_group': 0}
self._check_quotas(new_quotas)
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
self._check_quotas(new_quotas)

View File

@ -1,475 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron_lbaas.tests.tempest.v1.api import clients
CONF = config.CONF
class BaseNetworkTest(test.BaseTestCase):
"""
Base class for the Neutron tests that use the Tempest Neutron REST client
Per the Neutron API Guide, API v1.x was removed from the source code tree
(docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
following options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
Finally, it is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
neutron as True
"""
force_tenant_isolation = False
credentials = ['primary']
# Default to ipv4.
_ip_version = 4
@classmethod
def get_client_manager(cls, credential_type=None, roles=None,
force_new=None):
manager = test.BaseTestCase.get_client_manager(
credential_type=credential_type,
roles=roles,
force_new=force_new)
# Neutron uses a different clients manager than the one in the Tempest
return clients.Manager(manager.credentials)
@classmethod
def skip_checks(cls):
# Create no network resources for these test.
cls.set_network_resources()
super(BaseNetworkTest, cls).resource_setup()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
raise cls.skipException("IPv6 Tests are disabled.")
@classmethod
def setup_credentials(cls):
# Create no network resources for these test.
cls.set_network_resources()
super(BaseNetworkTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseNetworkTest, cls).setup_clients()
cls.client = cls.os.network_client
@classmethod
def resource_setup(cls):
cls.networks = []
cls.shared_networks = []
cls.subnets = []
cls.ports = []
cls.routers = []
cls.pools = []
cls.vips = []
cls.members = []
cls.health_monitors = []
cls.vpnservices = []
cls.ikepolicies = []
cls.floating_ips = []
cls.metering_labels = []
cls.metering_label_rules = []
cls.fw_rules = []
cls.fw_policies = []
cls.ipsecpolicies = []
cls.ethertype = "IPv" + str(cls._ip_version)
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
# Clean up ipsec policies
for ipsecpolicy in cls.ipsecpolicies:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_ipsecpolicy,
ipsecpolicy['id'])
# Clean up firewall policies
for fw_policy in cls.fw_policies:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_firewall_policy,
fw_policy['id'])
# Clean up firewall rules
for fw_rule in cls.fw_rules:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_firewall_rule,
fw_rule['id'])
# Clean up ike policies
for ikepolicy in cls.ikepolicies:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_ikepolicy,
ikepolicy['id'])
# Clean up vpn services
for vpnservice in cls.vpnservices:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_vpnservice,
vpnservice['id'])
# Clean up floating IPs
for floating_ip in cls.floating_ips:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_floatingip,
floating_ip['id'])
# Clean up routers
for router in cls.routers:
test_utils.call_and_ignore_notfound_exc(
cls.delete_router,
router)
# Clean up health monitors
for health_monitor in cls.health_monitors:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_health_monitor,
health_monitor['id'])
# Clean up members
for member in cls.members:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_member,
member['id'])
# Clean up vips
for vip in cls.vips:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_vip,
vip['id'])
# Clean up pools
for pool in cls.pools:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_pool,
pool['id'])
# Clean up metering label rules
for metering_label_rule in cls.metering_label_rules:
test_utils.call_and_ignore_notfound_exc(
cls.admin_client.delete_metering_label_rule,
metering_label_rule['id'])
# Clean up metering labels
for metering_label in cls.metering_labels:
test_utils.call_and_ignore_notfound_exc(
cls.admin_client.delete_metering_label,
metering_label['id'])
# Clean up ports
for port in cls.ports:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_port,
port['id'])
# Clean up subnets
for subnet in cls.subnets:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_subnet,
subnet['id'])
# Clean up networks
for network in cls.networks:
test_utils.call_and_ignore_notfound_exc(
cls.client.delete_network,
network['id'])
# Clean up shared networks
for network in cls.shared_networks:
test_utils.call_and_ignore_notfound_exc(
cls.admin_client.delete_network,
network['id'])
super(BaseNetworkTest, cls).resource_cleanup()
@classmethod
def create_network(cls, network_name=None):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name('test-network-')
body = cls.client.create_network(name=network_name)
network = body['network']
cls.networks.append(network)
return network
@classmethod
def create_shared_network(cls, network_name=None):
network_name = network_name or data_utils.rand_name('sharednetwork-')
post_body = {'name': network_name, 'shared': True}
body = cls.admin_client.create_network(**post_body)
network = body['network']
cls.shared_networks.append(network)
return network
@classmethod
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
ip_version=None, client=None, **kwargs):
"""Wrapper utility that returns a test subnet."""
# allow tests to use admin client
if not client:
client = cls.client
# The cidr and mask_bits depend on the ip version.
ip_version = ip_version if ip_version is not None else cls._ip_version
gateway_not_set = gateway == ''
if ip_version == 4:
cidr = cidr or netaddr.IPNetwork(CONF.network.project_network_cidr)
mask_bits = mask_bits or CONF.network.project_network_mask_bits
elif ip_version == 6:
cidr = (
cidr or netaddr.IPNetwork(
CONF.network.project_network_v6_cidr))
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
for subnet_cidr in cidr.subnet(mask_bits):
if gateway_not_set:
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
else:
gateway_ip = gateway
try:
body = client.create_subnet(
network_id=network['id'],
cidr=str(subnet_cidr),
ip_version=ip_version,
gateway_ip=gateway_ip,
**kwargs)
break
except lib_exc.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
subnet = body['subnet']
cls.subnets.append(subnet)
return subnet
@classmethod
def create_port(cls, network, **kwargs):
"""Wrapper utility that returns a test port."""
body = cls.client.create_port(network_id=network['id'],
**kwargs)
port = body['port']
cls.ports.append(port)
return port
@classmethod
def update_port(cls, port, **kwargs):
"""Wrapper utility that updates a test port."""
body = cls.client.update_port(port['id'],
**kwargs)
return body['port']
@classmethod
def create_router(cls, router_name=None, admin_state_up=False,
external_network_id=None, enable_snat=None,
**kwargs):
ext_gw_info = {}
if external_network_id:
ext_gw_info['network_id'] = external_network_id
if enable_snat:
ext_gw_info['enable_snat'] = enable_snat
body = cls.client.create_router(
router_name, external_gateway_info=ext_gw_info,
admin_state_up=admin_state_up, **kwargs)
router = body['router']
cls.routers.append(router)
return router
@classmethod
def create_floatingip(cls, external_network_id):
"""Wrapper utility that returns a test floating IP."""
body = cls.client.create_floatingip(
floating_network_id=external_network_id)
fip = body['floatingip']
cls.floating_ips.append(fip)
return fip
@classmethod
def create_pool(cls, name, lb_method, protocol, subnet):
"""Wrapper utility that returns a test pool."""
body = cls.client.create_pool(
name=name,
lb_method=lb_method,
protocol=protocol,
subnet_id=subnet['id'])
pool = body['pool']
cls.pools.append(pool)
return pool
@classmethod
def update_pool(cls, name):
"""Wrapper utility that returns a test pool."""
body = cls.client.update_pool(name=name)
pool = body['pool']
return pool
@classmethod
def create_vip(cls, name, protocol, protocol_port, subnet, pool):
"""Wrapper utility that returns a test vip."""
body = cls.client.create_vip(name=name,
protocol=protocol,
protocol_port=protocol_port,
subnet_id=subnet['id'],
pool_id=pool['id'])
vip = body['vip']
cls.vips.append(vip)
return vip
@classmethod
def update_vip(cls, name):
body = cls.client.update_vip(name=name)
vip = body['vip']
return vip
@classmethod
def create_member(cls, protocol_port, pool, ip_version=None):
"""Wrapper utility that returns a test member."""
ip_version = ip_version if ip_version is not None else cls._ip_version
member_address = "fd00::abcd" if ip_version == 6 else "10.0.9.46"
body = cls.client.create_member(address=member_address,
protocol_port=protocol_port,
pool_id=pool['id'])
member = body['member']
cls.members.append(member)
return member
@classmethod
def update_member(cls, admin_state_up):
body = cls.client.update_member(admin_state_up=admin_state_up)
member = body['member']
return member
@classmethod
def create_health_monitor(cls, delay, max_retries, Type, timeout):
"""Wrapper utility that returns a test health monitor."""
body = cls.client.create_health_monitor(delay=delay,
max_retries=max_retries,
type=Type,
timeout=timeout)
health_monitor = body['health_monitor']
cls.health_monitors.append(health_monitor)
return health_monitor
@classmethod
def update_health_monitor(cls, admin_state_up):
body = cls.client.update_vip(admin_state_up=admin_state_up)
health_monitor = body['health_monitor']
return health_monitor
@classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
interface = cls.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
return interface
@classmethod
def create_vpnservice(cls, subnet_id, router_id):
"""Wrapper utility that returns a test vpn service."""
body = cls.client.create_vpnservice(
subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
name=data_utils.rand_name("vpnservice-"))
vpnservice = body['vpnservice']
cls.vpnservices.append(vpnservice)
return vpnservice
@classmethod
def create_ikepolicy(cls, name):
"""Wrapper utility that returns a test ike policy."""
body = cls.client.create_ikepolicy(name=name)
ikepolicy = body['ikepolicy']
cls.ikepolicies.append(ikepolicy)
return ikepolicy
@classmethod
def create_firewall_rule(cls, action, protocol):
"""Wrapper utility that returns a test firewall rule."""
body = cls.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action=action,
protocol=protocol)
fw_rule = body['firewall_rule']
cls.fw_rules.append(fw_rule)
return fw_rule
@classmethod
def create_firewall_policy(cls):
"""Wrapper utility that returns a test firewall policy."""
body = cls.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy = body['firewall_policy']
cls.fw_policies.append(fw_policy)
return fw_policy
@classmethod
def delete_router(cls, router):
body = cls.client.list_router_interfaces(router['id'])
interfaces = body['ports']
for i in interfaces:
try:
cls.client.remove_router_interface_with_subnet_id(
router['id'], i['fixed_ips'][0]['subnet_id'])
except lib_exc.NotFound:
pass
cls.client.delete_router(router['id'])
@classmethod
def create_ipsecpolicy(cls, name):
"""Wrapper utility that returns a test ipsec policy."""
body = cls.client.create_ipsecpolicy(name=name)
ipsecpolicy = body['ipsecpolicy']
cls.ipsecpolicies.append(ipsecpolicy)
return ipsecpolicy
class BaseAdminNetworkTest(BaseNetworkTest):
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseAdminNetworkTest, cls).setup_clients()
cls.admin_client = cls.os_adm.network_client
cls.identity_admin_client = cls.os_adm.tenants_client
@classmethod
def create_metering_label(cls, name, description):
"""Wrapper utility that returns a test metering label."""
body = cls.admin_client.create_metering_label(
description=description,
name=data_utils.rand_name("metering-label"))
metering_label = body['metering_label']
cls.metering_labels.append(metering_label)
return metering_label
@classmethod
def create_metering_label_rule(cls, remote_ip_prefix, direction,
metering_label_id):
"""Wrapper utility that returns a test metering label rule."""
body = cls.admin_client.create_metering_label_rule(
remote_ip_prefix=remote_ip_prefix, direction=direction,
metering_label_id=metering_label_id)
metering_label_rule = body['metering_label_rule']
cls.metering_label_rules.append(metering_label_rule)
return metering_label_rule

View File

@ -1,87 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import cred_provider
from tempest import config
from tempest.lib.services.identity.v2.tenants_client import TenantsClient
from tempest import manager
from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import \
NetworkClientJSON
CONF = config.CONF
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_identity_clients()
self.network_client = NetworkClientJSON(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region,
}
params.update(self.default_params_with_timeout_values)
params_v2_admin = params.copy()
params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
self.tenants_client = TenantsClient(
self.auth_provider, **params_v2_admin)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)

View File

@ -1,454 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_lbaas.tests.tempest.v1.api import base
class LoadBalancerTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create vIP, and Pool
show vIP
list vIP
update vIP
delete vIP
update pool
delete pool
show pool
list pool
health monitoring operations
"""
@classmethod
def resource_setup(cls):
super(LoadBalancerTestJSON, cls).resource_setup()
if not test.is_extension_enabled('lbaas', 'network'):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
pool_name = data_utils.rand_name('pool-')
vip_name = data_utils.rand_name('vip-')
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
"HTTP", cls.subnet)
cls.vip = cls.create_vip(name=vip_name,
protocol="HTTP",
protocol_port=80,
subnet=cls.subnet,
pool=cls.pool)
cls.member = cls.create_member(80, cls.pool, cls._ip_version)
cls.member_address = ("10.0.9.47" if cls._ip_version == 4
else "2015::beef")
cls.health_monitor = cls.create_health_monitor(delay=4,
max_retries=3,
Type="TCP",
timeout=1)
def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs):
create_obj = getattr(self.client, 'create_' + obj_name)
delete_obj = getattr(self.client, 'delete_' + obj_name)
list_objs = getattr(self.client, 'list_' + obj_name + 's')
body = create_obj(**kwargs)
obj = body[obj_name]
self.addCleanup(delete_obj, obj['id'])
for key, value in obj.items():
# It is not relevant to filter by all arguments. That is why
# there is a list of attr to except
if key not in attr_exceptions:
body = list_objs(**{key: value})
objs = [v[key] for v in body[obj_name + 's']]
self.assertIn(value, objs)
@test.attr(type='smoke')
@decorators.idempotent_id('c96dbfab-4a80-4e74-a535-e950b5bedd47')
def test_list_vips(self):
# Verify the vIP exists in the list of all vIPs
body = self.client.list_vips()
vips = body['vips']
self.assertIn(self.vip['id'], [v['id'] for v in vips])
@test.attr(type='smoke')
@decorators.idempotent_id('b8853f65-5089-4e69-befd-041a143427ff')
def test_list_vips_with_filter(self):
name = data_utils.rand_name('vip-')
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method="ROUND_ROBIN",
protocol="HTTPS",
subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
attr_exceptions = ['status', 'session_persistence',
'status_description']
self._check_list_with_filter(
'vip', attr_exceptions, name=name, protocol="HTTPS",
protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'],
description=data_utils.rand_name('description-'),
admin_state_up=False)
@test.attr(type='smoke')
@decorators.idempotent_id('27f56083-9af9-4a48-abe9-ca1bcc6c9035')
def test_create_update_delete_pool_vip(self):
# Creates a vip
name = data_utils.rand_name('vip-')
address = self.subnet['allocation_pools'][0]['end']
body = self.client.create_pool(
name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
pool = body['pool']
body = self.client.create_vip(name=name,
protocol="HTTP",
protocol_port=80,
subnet_id=self.subnet['id'],
pool_id=pool['id'],
address=address)
vip = body['vip']
vip_id = vip['id']
# Confirm VIP's address correctness with a show
body = self.client.show_vip(vip_id)
vip = body['vip']
self.assertEqual(address, vip['address'])
# Verification of vip update
new_name = "New_vip"
new_description = "New description"
persistence_type = "HTTP_COOKIE"
update_data = {"session_persistence": {
"type": persistence_type}}
body = self.client.update_vip(vip_id,
name=new_name,
description=new_description,
connection_limit=10,
admin_state_up=False,
**update_data)
updated_vip = body['vip']
self.assertEqual(new_name, updated_vip['name'])
self.assertEqual(new_description, updated_vip['description'])
self.assertEqual(10, updated_vip['connection_limit'])
self.assertFalse(updated_vip['admin_state_up'])
self.assertEqual(persistence_type,
updated_vip['session_persistence']['type'])
self.client.delete_vip(vip['id'])
self.client.wait_for_resource_deletion('vip', vip['id'])
# Verification of pool update
new_name = "New_pool"
body = self.client.update_pool(pool['id'],
name=new_name,
description="new_description",
lb_method='LEAST_CONNECTIONS')
updated_pool = body['pool']
self.assertEqual(new_name, updated_pool['name'])
self.assertEqual('new_description', updated_pool['description'])
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
self.client.delete_pool(pool['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('0435a95e-1d19-4d90-9e9f-3b979e9ad089')
def test_show_vip(self):
# Verifies the details of a vip
body = self.client.show_vip(self.vip['id'])
vip = body['vip']
for key, value in vip.items():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.vip[key], value)
@test.attr(type='smoke')
@decorators.idempotent_id('6e7a7d31-8451-456d-b24a-e50479ce42a7')
def test_show_pool(self):
# Here we need to new pool without any dependence with vips
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
# Verifies the details of a pool
body = self.client.show_pool(pool['id'])
shown_pool = body['pool']
for key, value in pool.items():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(value, shown_pool[key])
@test.attr(type='smoke')
@decorators.idempotent_id('d1ab1ffa-e06a-487f-911f-56418cb27727')
def test_list_pools(self):
# Verify the pool exists in the list of all pools
body = self.client.list_pools()
pools = body['pools']
self.assertIn(self.pool['id'], [p['id'] for p in pools])
@test.attr(type='smoke')
@decorators.idempotent_id('27cc4c1a-caac-4273-b983-2acb4afaad4f')
def test_list_pools_with_filters(self):
attr_exceptions = ['status', 'vip_id', 'members', 'provider',
'status_description']
self._check_list_with_filter(
'pool', attr_exceptions, name=data_utils.rand_name("pool-"),
lb_method="ROUND_ROBIN", protocol="HTTPS",
subnet_id=self.subnet['id'],
description=data_utils.rand_name('description-'),
admin_state_up=False)
@test.attr(type='smoke')
@decorators.idempotent_id('282d0dfd-5c3a-4c9b-b39c-c99782f39193')
def test_list_members(self):
# Verify the member exists in the list of all members
body = self.client.list_members()
members = body['members']
self.assertIn(self.member['id'], [m['id'] for m in members])
@test.attr(type='smoke')
@decorators.idempotent_id('243b5126-24c6-4879-953e-7c7e32d8a57f')
def test_list_members_with_filters(self):
attr_exceptions = ['status', 'status_description']
self._check_list_with_filter('member', attr_exceptions,
address=self.member_address,
protocol_port=80,
pool_id=self.pool['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('fb833ee8-9e69-489f-b540-a409762b78b2')
def test_create_update_delete_member(self):
# Creates a member
body = self.client.create_member(address=self.member_address,
protocol_port=80,
pool_id=self.pool['id'])
member = body['member']
# Verification of member update
body = self.client.update_member(member['id'],
admin_state_up=False)
updated_member = body['member']
self.assertFalse(updated_member['admin_state_up'])
# Verification of member delete
self.client.delete_member(member['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('893cd71f-a7dd-4485-b162-f6ab9a534914')
def test_show_member(self):
# Verifies the details of a member
body = self.client.show_member(self.member['id'])
member = body['member']
for key, value in member.items():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.member[key], value)
@test.attr(type='smoke')
@decorators.idempotent_id('8e5822c5-68a4-4224-8d6c-a617741ebc2d')
def test_list_health_monitors(self):
# Verify the health monitor exists in the list of all health monitors
body = self.client.list_health_monitors()
health_monitors = body['health_monitors']
self.assertIn(self.health_monitor['id'],
[h['id'] for h in health_monitors])
@test.attr(type='smoke')
@decorators.idempotent_id('49bac58a-511c-4875-b794-366698211d25')
def test_list_health_monitors_with_filters(self):
attr_exceptions = ['status', 'status_description', 'pools']
self._check_list_with_filter('health_monitor', attr_exceptions,
delay=5, max_retries=4, type="TCP",
timeout=2)
@test.attr(type='smoke')
@decorators.idempotent_id('e8ce05c4-d554-4d1e-a257-ad32ce134bb5')
def test_create_update_delete_health_monitor(self):
# Creates a health_monitor
body = self.client.create_health_monitor(delay=4,
max_retries=3,
type="TCP",
timeout=1)
health_monitor = body['health_monitor']
# Verification of health_monitor update
body = (self.client.update_health_monitor
(health_monitor['id'],
admin_state_up=False))
updated_health_monitor = body['health_monitor']
self.assertFalse(updated_health_monitor['admin_state_up'])
# Verification of health_monitor delete
body = self.client.delete_health_monitor(health_monitor['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('d3e1aebc-06c2-49b3-9816-942af54012eb')
def test_create_health_monitor_http_type(self):
hm_type = "HTTP"
body = self.client.create_health_monitor(delay=4,
max_retries=3,
type=hm_type,
timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
self.assertEqual(hm_type, health_monitor['type'])
@test.attr(type='smoke')
@decorators.idempotent_id('0eff9f67-90fb-4bb1-b4ed-c5fda99fff0c')
def test_update_health_monitor_http_method(self):
body = self.client.create_health_monitor(delay=4,
max_retries=3,
type="HTTP",
timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
body = (self.client.update_health_monitor
(health_monitor['id'],
http_method="POST",
url_path="/home/user",
expected_codes="290"))
updated_health_monitor = body['health_monitor']
self.assertEqual("POST", updated_health_monitor['http_method'])
self.assertEqual("/home/user", updated_health_monitor['url_path'])
self.assertEqual("290", updated_health_monitor['expected_codes'])
@test.attr(type='smoke')
@decorators.idempotent_id('08e126ab-1407-483f-a22e-b11cc032ca7c')
def test_show_health_monitor(self):
# Verifies the details of a health_monitor
body = self.client.show_health_monitor(self.health_monitor['id'])
health_monitor = body['health_monitor']
for key, value in health_monitor.items():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.health_monitor[key], value)
@test.attr(type='smoke')
@decorators.idempotent_id('87f7628e-8918-493d-af50-0602845dbb5b')
def test_associate_disassociate_health_monitor_with_pool(self):
# Verify that a health monitor can be associated with a pool
self.client.associate_health_monitor_with_pool(
self.health_monitor['id'], self.pool['id'])
body = self.client.show_health_monitor(
self.health_monitor['id'])
health_monitor = body['health_monitor']
body = self.client.show_pool(self.pool['id'])
pool = body['pool']
self.assertIn(pool['id'],
[p['pool_id'] for p in health_monitor['pools']])
self.assertIn(health_monitor['id'], pool['health_monitors'])
# Verify that a health monitor can be disassociated from a pool
(self.client.disassociate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
body = self.client.show_pool(self.pool['id'])
pool = body['pool']
body = self.client.show_health_monitor(
self.health_monitor['id'])
health_monitor = body['health_monitor']
self.assertNotIn(health_monitor['id'], pool['health_monitors'])
self.assertNotIn(pool['id'],
[p['pool_id'] for p in health_monitor['pools']])
@test.attr(type='smoke')
@decorators.idempotent_id('525fc7dc-be24-408d-938d-822e9783e027')
def test_get_lb_pool_stats(self):
# Verify the details of pool stats
body = self.client.list_lb_pool_stats(self.pool['id'])
stats = body['stats']
self.assertIn("bytes_in", stats)
self.assertIn("total_connections", stats)
self.assertIn("active_connections", stats)
self.assertIn("bytes_out", stats)
@test.attr(type='smoke')
@decorators.idempotent_id('66236be2-5121-4047-8cde-db4b83b110a5')
def test_update_list_of_health_monitors_associated_with_pool(self):
(self.client.associate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
self.client.update_health_monitor(
self.health_monitor['id'], admin_state_up=False)
body = self.client.show_pool(self.pool['id'])
health_monitors = body['pool']['health_monitors']
for health_monitor_id in health_monitors:
body = self.client.show_health_monitor(health_monitor_id)
self.assertFalse(body['health_monitor']['admin_state_up'])
(self.client.disassociate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
@test.attr(type='smoke')
@decorators.idempotent_id('44ec9b40-b501-41e2-951f-4fc673b15ac0')
def test_update_admin_state_up_of_pool(self):
self.client.update_pool(self.pool['id'],
admin_state_up=False)
body = self.client.show_pool(self.pool['id'])
pool = body['pool']
self.assertFalse(pool['admin_state_up'])
@test.attr(type='smoke')
@decorators.idempotent_id('466a9d4c-37c6-4ea2-b807-133437beb48c')
def test_show_vip_associated_with_pool(self):
body = self.client.show_pool(self.pool['id'])
pool = body['pool']
body = self.client.show_vip(pool['vip_id'])
vip = body['vip']
self.assertEqual(self.vip['name'], vip['name'])
self.assertEqual(self.vip['id'], vip['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('7b97694e-69d0-4151-b265-e1052a465aa8')
def test_show_members_associated_with_pool(self):
body = self.client.show_pool(self.pool['id'])
members = body['pool']['members']
for member_id in members:
body = self.client.show_member(member_id)
self.assertIsNotNone(body['member']['status'])
self.assertEqual(member_id, body['member']['id'])
self.assertIsNotNone(body['member']['admin_state_up'])
@test.attr(type='smoke')
@decorators.idempotent_id('73ed6f27-595b-4b2c-969c-dbdda6b8ab34')
def test_update_pool_related_to_member(self):
# Create new pool
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
new_pool = body['pool']
self.addCleanup(self.client.delete_pool, new_pool['id'])
# Update member with new pool's id
body = self.client.update_member(self.member['id'],
pool_id=new_pool['id'])
# Confirm with show that pool_id change
body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(member['pool_id'], new_pool['id'])
# Update member with old pool id, this is needed for clean up
body = self.client.update_member(self.member['id'],
pool_id=self.pool['id'])
@test.attr(type='smoke')
@decorators.idempotent_id('cf63f071-bbe3-40ba-97a0-a33e11923162')
def test_update_member_weight(self):
self.client.update_member(self.member['id'],
weight=2)
body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(2, member['weight'])
@decorators.skip_because(bug="1402007")
class LoadBalancerIpV6TestJSON(LoadBalancerTestJSON):
_ip_version = 6

View File

@ -23,7 +23,7 @@ from neutron import context
with mock.patch.dict(sys.modules, {'brocade_neutron_lbaas': mock.Mock()}):
from neutron_lbaas.drivers.brocade import driver_v2 as driver
from neutron_lbaas.services.loadbalancer import data_models
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
class FakeModel(object):
@ -72,7 +72,7 @@ class LoadBalancerManagerTest(ManagerTest):
class TestBrocadeLoadBalancerDriver(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
test_db_loadbalancerv2.LbaasPluginDbTestCase):
def _create_fake_models(self):
id = 'name-001'

View File

@ -407,8 +407,7 @@ class TestHaproxyNSDriver(base.BaseTestCase):
namespace='ns1')
@mock.patch('neutron.common.utils.ensure_dir')
@mock.patch('neutron_lbaas.services.loadbalancer.drivers.haproxy.'
'jinja_cfg.save_config')
@mock.patch('neutron_lbaas.drivers.haproxy.jinja_cfg.save_config')
@mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
def test_spawn(self, ip_wrap, jinja_save, ensure_dir):
mock_ns = ip_wrap.return_value

View File

@ -17,7 +17,7 @@ from neutron import context
from neutron_lbaas.drivers.logging_noop import driver
from neutron_lbaas.services.loadbalancer import data_models
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
log_path = ('neutron_lbaas.drivers.logging_noop.driver.LOG')
@ -127,7 +127,7 @@ class LoadBalancerManagerTest(ManagerTestWithUpdates):
class TestLoggingNoopLoadBalancerDriver(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
test_db_loadbalancerv2.LbaasPluginDbTestCase):
def _create_fake_models(self):
id = 'name-001'

View File

@ -18,7 +18,7 @@ import mock
from neutron import context as ncontext
from neutron_lbaas.drivers.vmware import edge_driver_v2
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
DUMMY_CERT = {'id': 'fake_id'}
@ -71,7 +71,7 @@ class ManagerTest(object):
class TestVMWareEdgeLoadBalancerDriverV2(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
test_db_loadbalancerv2.LbaasPluginDbTestCase):
def setUp(self):
super(TestVMWareEdgeLoadBalancerDriverV2, self).setUp()

View File

@ -1,44 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron_lbaas.services.loadbalancer.agent import agent
from neutron_lbaas.tests import base
class TestLbaasService(base.BaseTestCase):
def test_start(self):
with mock.patch.object(
agent.n_rpc.Service, 'start'
) as mock_start:
mgr = mock.Mock()
cfg.CONF.periodic_interval = mock.Mock(return_value=10)
agent_service = agent.LbaasAgentService('host', 'topic', mgr)
agent_service.start()
self.assertTrue(mock_start.called)
def test_main(self):
logging_str = 'neutron.agent.common.config.setup_logging'
with mock.patch(logging_str), \
mock.patch.object(agent.service, 'launch') as mock_launch, \
mock.patch('sys.argv'), \
mock.patch.object(agent.manager, 'LbaasAgentManager'), \
mock.patch.object(cfg.CONF, 'register_opts'):
agent.main()
mock_launch.assert_called_once_with(mock.ANY, mock.ANY)

View File

@ -1,443 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron.plugins.common import constants
from neutron_lbaas.services.loadbalancer.agent import agent_manager as manager
from neutron_lbaas.services.loadbalancer import constants as l_const
from neutron_lbaas.tests import base
class TestManager(base.BaseTestCase):
def setUp(self):
super(TestManager, self).setUp()
mock_conf = mock.Mock()
mock_conf.device_driver = ['devdriver']
self.mock_importer = mock.patch.object(manager, 'importutils').start()
rpc_mock_cls = mock.patch(
'neutron_lbaas.services.loadbalancer.agent.agent_api.LbaasAgentApi'
).start()
# disable setting up periodic state reporting
mock_conf.AGENT.report_interval = 0
self.mgr = manager.LbaasAgentManager(mock_conf)
self.rpc_mock = rpc_mock_cls.return_value
self.log = mock.patch.object(manager, 'LOG').start()
self.driver_mock = mock.Mock()
self.mgr.device_drivers = {'devdriver': self.driver_mock}
instance_mapping = collections.OrderedDict([('1', 'devdriver'),
('2', 'devdriver')])
self.mgr.instance_mapping = instance_mapping
self.mgr.needs_resync = False
def test_initialize_service_hook(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.initialize_service_hook(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_needs_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = True
self.mgr.periodic_resync(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_no_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = False
self.mgr.periodic_resync(mock.Mock())
self.assertFalse(sync.called)
def test_collect_stats(self):
self.mgr.collect_stats(mock.Mock())
self.rpc_mock.update_pool_stats.assert_has_calls([
mock.call('1', mock.ANY),
mock.call('2', mock.ANY)
], any_order=True)
def test_collect_stats_exception(self):
self.driver_mock.get_stats.side_effect = Exception
self.mgr.collect_stats(mock.Mock())
self.assertFalse(self.rpc_mock.called)
self.assertTrue(self.mgr.needs_resync)
self.assertTrue(self.log.exception.called)
def _sync_state_helper(self, ready, reloaded, destroyed):
with mock.patch.object(self.mgr, '_reload_pool') as reload, \
mock.patch.object(self.mgr, '_destroy_pool') as destroy:
self.rpc_mock.get_ready_devices.return_value = ready
self.mgr.sync_state()
self.assertEqual(len(reloaded), len(reload.mock_calls))
self.assertEqual(len(destroyed), len(destroy.mock_calls))
reload.assert_has_calls([mock.call(i) for i in reloaded],
any_order=True)
destroy.assert_has_calls([mock.call(i) for i in destroyed],
any_order=True)
self.assertFalse(self.mgr.needs_resync)
def test_sync_state_all_known(self):
self._sync_state_helper(['1', '2'], ['1', '2'], [])
def test_sync_state_all_unknown(self):
self.mgr.instance_mapping = {}
self._sync_state_helper(['1', '2'], ['1', '2'], [])
def test_sync_state_destroy_all(self):
self._sync_state_helper([], [], ['1', '2'])
def test_sync_state_both(self):
self.mgr.instance_mapping = {'1': 'devdriver'}
self._sync_state_helper(['2'], ['2'], ['1'])
def test_sync_state_exception(self):
self.rpc_mock.get_ready_devices.side_effect = Exception
self.mgr.sync_state()
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_reload_pool(self):
config = {'driver': 'devdriver'}
self.rpc_mock.get_logical_device.return_value = config
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.driver_mock.deploy_instance.assert_called_once_with(config)
self.assertIn(pool_id, self.mgr.instance_mapping)
self.rpc_mock.pool_deployed.assert_called_once_with(pool_id)
def test_reload_pool_driver_not_found(self):
config = {'driver': 'unknown_driver'}
self.rpc_mock.get_logical_device.return_value = config
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.assertTrue(self.log.error.called)
self.assertFalse(self.driver_mock.deploy_instance.called)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_deployed.called)
def test_reload_pool_exception_on_driver(self):
config = {'driver': 'devdriver'}
self.rpc_mock.get_logical_device.return_value = config
self.driver_mock.deploy_instance.side_effect = Exception
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.driver_mock.deploy_instance.assert_called_once_with(config)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_deployed.called)
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_destroy_pool(self):
pool_id = '1'
self.assertIn(pool_id, self.mgr.instance_mapping)
self.mgr._destroy_pool(pool_id)
self.driver_mock.undeploy_instance.assert_called_once_with(
pool_id, delete_namespace=True)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.rpc_mock.pool_destroyed.assert_called_once_with(pool_id)
self.assertFalse(self.mgr.needs_resync)
def test_destroy_pool_exception_on_driver(self):
pool_id = '1'
self.assertIn(pool_id, self.mgr.instance_mapping)
self.driver_mock.undeploy_instance.side_effect = Exception
self.mgr._destroy_pool(pool_id)
self.driver_mock.undeploy_instance.assert_called_once_with(
pool_id, delete_namespace=True)
self.assertIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_destroyed.called)
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_get_driver_unknown_device(self):
self.assertRaises(manager.DeviceNotFoundOnAgent,
self.mgr._get_driver, 'unknown')
def test_remove_orphans(self):
self.mgr.remove_orphans()
self.driver_mock.remove_orphans.assert_called_once_with(['1', '2'])
def test_create_vip(self):
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.mgr.create_vip(mock.Mock(), vip)
self.driver_mock.create_vip.assert_called_once_with(vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ACTIVE)
def test_create_vip_with_admin_down(self):
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False}
self.mgr.create_vip(mock.Mock(), vip)
self.driver_mock.create_vip.assert_called_once_with(vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
l_const.DISABLED)
def test_create_vip_failed(self):
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.driver_mock.create_vip.side_effect = Exception
self.mgr.create_vip(mock.Mock(), vip)
self.driver_mock.create_vip.assert_called_once_with(vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ERROR)
def test_update_vip(self):
old_vip = {'id': 'id1', 'admin_state_up': True}
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.mgr.update_vip(mock.Mock(), old_vip, vip)
self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ACTIVE)
def test_update_vip_with_admin_down(self):
old_vip = {'id': 'id1', 'admin_state_up': True}
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False}
self.mgr.update_vip(mock.Mock(), old_vip, vip)
self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
l_const.DISABLED)
def test_update_vip_failed(self):
old_vip = {'id': 'id1', 'admin_state_up': True}
vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.driver_mock.update_vip.side_effect = Exception
self.mgr.update_vip(mock.Mock(), old_vip, vip)
self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ERROR)
def test_delete_vip(self):
vip = {'id': 'id1', 'pool_id': '1'}
self.mgr.delete_vip(mock.Mock(), vip)
self.driver_mock.delete_vip.assert_called_once_with(vip)
def test_create_pool(self):
pool = {'id': 'id1', 'admin_state_up': True}
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
self.driver_mock.create_pool.assert_called_once_with(pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ACTIVE)
self.assertIn(pool['id'], self.mgr.instance_mapping)
def test_create_pool_with_admin_down(self):
pool = {'id': 'id1', 'admin_state_up': False}
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
self.driver_mock.create_pool.assert_called_once_with(pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
l_const.DISABLED)
self.assertIn(pool['id'], self.mgr.instance_mapping)
def test_create_pool_failed(self):
pool = {'id': 'id1', 'admin_state_up': True}
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
self.driver_mock.create_pool.side_effect = Exception
self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
self.driver_mock.create_pool.assert_called_once_with(pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ERROR)
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
def test_update_pool(self):
old_pool = {'id': '1', 'admin_state_up': True}
pool = {'id': '1', 'admin_state_up': True}
self.mgr.update_pool(mock.Mock(), old_pool, pool)
self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ACTIVE)
def test_update_pool_with_admin_down(self):
old_pool = {'id': '1', 'admin_state_up': True}
pool = {'id': '1', 'admin_state_up': False}
self.mgr.update_pool(mock.Mock(), old_pool, pool)
self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
l_const.DISABLED)
def test_update_pool_failed(self):
old_pool = {'id': '1', 'admin_state_up': True}
pool = {'id': '1', 'admin_state_up': True}
self.driver_mock.update_pool.side_effect = Exception
self.mgr.update_pool(mock.Mock(), old_pool, pool)
self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ERROR)
def test_delete_pool(self):
pool = {'id': '1'}
self.assertIn(pool['id'], self.mgr.instance_mapping)
self.mgr.delete_pool(mock.Mock(), pool)
self.driver_mock.delete_pool.assert_called_once_with(pool)
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
def test_create_member(self):
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.mgr.create_member(mock.Mock(), member)
self.driver_mock.create_member.assert_called_once_with(member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ACTIVE)
def test_create_member_with_admin_down(self):
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False}
self.mgr.create_member(mock.Mock(), member)
self.driver_mock.create_member.assert_called_once_with(member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
l_const.DISABLED)
def test_create_member_failed(self):
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.driver_mock.create_member.side_effect = Exception
self.mgr.create_member(mock.Mock(), member)
self.driver_mock.create_member.assert_called_once_with(member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ERROR)
def test_update_member(self):
old_member = {'id': 'id1', 'admin_state_up': True}
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.mgr.update_member(mock.Mock(), old_member, member)
self.driver_mock.update_member.assert_called_once_with(old_member,
member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ACTIVE)
def test_update_member_with_admin_down(self):
old_member = {'id': 'id1', 'admin_state_up': True}
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False}
self.mgr.update_member(mock.Mock(), old_member, member)
self.driver_mock.update_member.assert_called_once_with(old_member,
member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
l_const.DISABLED)
def test_update_member_failed(self):
old_member = {'id': 'id1', 'admin_state_up': True}
member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True}
self.driver_mock.update_member.side_effect = Exception
self.mgr.update_member(mock.Mock(), old_member, member)
self.driver_mock.update_member.assert_called_once_with(old_member,
member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ERROR)
def test_delete_member(self):
member = {'id': 'id1', 'pool_id': '1'}
self.mgr.delete_member(mock.Mock(), member)
self.driver_mock.delete_member.assert_called_once_with(member)
def test_create_monitor(self):
monitor = {'id': 'id1', 'admin_state_up': True}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.create_pool_health_monitor.assert_called_once_with(
monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ACTIVE)
def test_create_monitor_with_admin_down(self):
monitor = {'id': 'id1', 'admin_state_up': False}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.create_pool_health_monitor.assert_called_once_with(
monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
l_const.DISABLED)
def test_create_monitor_failed(self):
monitor = {'id': 'id1', 'admin_state_up': True}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.driver_mock.create_pool_health_monitor.side_effect = Exception
self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.create_pool_health_monitor.assert_called_once_with(
monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ERROR)
def test_update_monitor(self):
monitor = {'id': 'id1', 'admin_state_up': True}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
self.driver_mock.update_pool_health_monitor.assert_called_once_with(
monitor, monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ACTIVE)
def test_update_monitor_with_admin_down(self):
monitor = {'id': 'id1', 'admin_state_up': False}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
self.driver_mock.update_pool_health_monitor.assert_called_once_with(
monitor, monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
l_const.DISABLED)
def test_update_monitor_failed(self):
monitor = {'id': 'id1', 'admin_state_up': True}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.driver_mock.update_pool_health_monitor.side_effect = Exception
self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
self.driver_mock.update_pool_health_monitor.assert_called_once_with(
monitor, monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ERROR)
def test_delete_monitor(self):
monitor = {'id': 'id1'}
self.mgr.delete_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.delete_pool_health_monitor.assert_called_once_with(
monitor, '1')
def test_agent_disabled(self):
payload = {'admin_state_up': False}
self.mgr.agent_updated(mock.Mock(), payload)
self.driver_mock.undeploy_instance.assert_has_calls(
[mock.call('1', delete_namespace=True),
mock.call('2', delete_namespace=True)],
any_order=True
)

View File

@ -1,76 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lbaas.services.loadbalancer.agent import agent_api as api
from neutron_lbaas.tests import base
class TestApiCache(base.BaseTestCase):
def setUp(self):
super(TestApiCache, self).setUp()
self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host')
def test_init(self):
self.assertEqual('host', self.api.host)
self.assertEqual(mock.sentinel.context, self.api.context)
def _test_method(self, method, **kwargs):
add_host = ('get_ready_devices', 'plug_vip_port', 'unplug_vip_port',
'update_pool_stats')
expected_kwargs = copy.copy(kwargs)
if method in add_host:
expected_kwargs['host'] = self.api.host
with mock.patch.object(self.api.client, 'call') as rpc_mock, \
mock.patch.object(self.api.client, 'prepare') as prepare_mock:
prepare_mock.return_value = self.api.client
rpc_mock.return_value = 'foo'
rv = getattr(self.api, method)(**kwargs)
self.assertEqual('foo', rv)
prepare_args = {}
prepare_mock.assert_called_once_with(**prepare_args)
rpc_mock.assert_called_once_with(mock.sentinel.context, method,
**expected_kwargs)
def test_get_ready_devices(self):
self._test_method('get_ready_devices')
def test_get_logical_device(self):
self._test_method('get_logical_device', pool_id='pool_id')
def test_pool_destroyed(self):
self._test_method('pool_destroyed', pool_id='pool_id')
def test_pool_deployed(self):
self._test_method('pool_deployed', pool_id='pool_id')
def test_update_status(self):
self._test_method('update_status', obj_type='type', obj_id='id',
status='status')
def test_plug_vip_port(self):
self._test_method('plug_vip_port', port_id='port_id')
def test_unplug_vip_port(self):
self._test_method('unplug_vip_port', port_id='port_id')
def test_update_pool_stats(self):
self._test_method('update_pool_stats', pool_id='id', stats='stats')

View File

@ -1,180 +0,0 @@
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from neutron import context
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
with mock.patch.dict(sys.modules, {'a10_neutron_lbaas': mock.Mock()}):
from neutron_lbaas.services.loadbalancer.drivers.a10networks \
import driver_v1
def fake_model(id):
return {
'id': id,
'tenant_id': "tennant-was-a-great-doctor"
}
def fake_member(id):
return {
'id': id,
'tenant_id': "vippyvip",
'address': '1.1.1.1'
}
class TestA10ThunderDriver(test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestA10ThunderDriver, self).setUp()
self.context = context.get_admin_context()
self.plugin = mock.Mock()
self.driver = driver_v1.ThunderDriver(self.plugin)
self.driver.a10 = mock.Mock()
self.m = fake_model('p1')
def test__hm_binding_count(self):
n = self.driver._hm_binding_count(self.context, 'hm01')
self.assertEqual(0, n)
def test__member_count(self):
self.m = fake_member('mem1')
n = self.driver._member_count(self.context, self.m)
self.assertEqual(0, n)
def test__member_get_ip(self):
self.m = fake_member('mem1')
z = self.driver._member_get_ip(self.context, self.m, False)
self.assertEqual('1.1.1.1', z)
z = self.driver._member_get_ip(self.context, self.m, True)
self.assertEqual('1.1.1.1', z)
def test__pool_get_hm(self):
self.driver._pool_get_hm(self.context, 'hm01')
self.plugin.get_health_monitor.assert_called_once_with(
self.context, 'hm01')
def test__pool_get_tenant_id(self):
z = self.driver._pool_get_tenant_id(self.context, 'pool1')
self.assertEqual('', z)
def test__pool_get_vip_id(self):
z = self.driver._pool_get_vip_id(self.context, 'pool1')
self.assertEqual('', z)
def test__pool_total(self):
n = self.driver._pool_total(self.context,
tenant_id='whatareyoudoingdave')
self.assertEqual(0, n)
def test__active(self):
self.driver._active(self.context, 'vip', 'vip1')
self.plugin.update_status.assert_called_once_with(
self.context, lb_db.Vip, 'vip1', 'ACTIVE')
def test__failed(self):
self.driver._failed(self.context, 'vip', 'vip2-1-2')
self.plugin.update_status.assert_called_once_with(
self.context, lb_db.Vip, 'vip2-1-2', 'ERROR')
def test__db_delete(self):
self.driver._db_delete(self.context, 'pool', 'myid0101')
self.plugin._delete_db_pool.assert_called_once_with(
self.context, 'myid0101')
def test__hm_active(self):
self.driver._hm_active(self.context, 'hm01', 'pool1')
self.plugin.update_pool_health_monitor.assert_called_once_with(
self.context, 'hm01', 'pool1', 'ACTIVE')
def test__hm_failed(self):
self.driver._hm_failed(self.context, 'hm01', 'pool1')
self.plugin.update_pool_health_monitor.assert_called_once_with(
self.context, 'hm01', 'pool1', 'ERROR')
def test__hm_db_delete(self):
self.driver._hm_db_delete(self.context, 'hm01', 'pool2')
self.plugin._delete_db_pool_health_monitor.assert_called_once_with(
self.context, 'hm01', 'pool2')
def test_create_vip(self):
self.driver.create_vip(self.context, self.m)
self.driver.a10.vip.create.assert_called_once_with(
self.context, self.m)
def test_update_vip(self):
self.driver.update_vip(self.context, self.m, self.m)
self.driver.a10.vip.update.assert_called_once_with(
self.context, self.m, self.m)
def test_delete_vip(self):
self.driver.delete_vip(self.context, self.m)
self.driver.a10.vip.delete.assert_called_once_with(
self.context, self.m)
def test_create_pool(self):
self.driver.create_pool(self.context, self.m)
self.driver.a10.pool.create.assert_called_once_with(
self.context, self.m)
def test_update_pool(self):
self.driver.update_pool(self.context, self.m, self.m)
self.driver.a10.pool.update.assert_called_once_with(
self.context, self.m, self.m)
def test_delete_pool(self):
self.driver.delete_pool(self.context, self.m)
self.driver.a10.pool.delete.assert_called_once_with(
self.context, self.m)
def test_stats(self):
self.driver.stats(self.context, self.m['id'])
self.driver.a10.pool.stats.assert_called_once_with(
self.context, self.m['id'])
def test_create_member(self):
self.driver.create_member(self.context, self.m)
self.driver.a10.member.create.assert_called_once_with(
self.context, self.m)
def test_update_member(self):
self.driver.update_member(self.context, self.m, self.m)
self.driver.a10.member.update.assert_called_once_with(
self.context, self.m, self.m)
def test_delete_member(self):
self.driver.delete_member(self.context, self.m)
self.driver.a10.member.delete.assert_called_once_with(
self.context, self.m)
def test_update_pool_health_monitor(self):
self.driver.update_pool_health_monitor(self.context, self.m, self.m,
'pool1')
self.driver.a10.hm.update.assert_called_once_with(
self.context, self.m, self.m, 'pool1')
def test_create_pool_health_monitor(self):
self.driver.create_pool_health_monitor(self.context, self.m, 'pool1')
self.driver.a10.hm.create.assert_called_once_with(
self.context, self.m, 'pool1')
def test_delete_pool_health_monitor(self):
self.driver.delete_pool_health_monitor(self.context, self.m, 'pool1')
self.driver.a10.hm.delete.assert_called_once_with(
self.context, self.m, 'pool1')

View File

@ -1,294 +0,0 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
RET_PERSISTENCE = {
'type': 'HTTP_COOKIE',
'cookie_name': 'HTTP_COOKIE'}
HASHSEED_ORDERED_CODES = list({'404', '405', '500'})
PIPED_CODES = '|'.join(HASHSEED_ORDERED_CODES)
RET_MONITOR = {
'id': 'sample_monitor_id_1',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/index.html',
'expected_codes': PIPED_CODES,
'admin_state_up': True}
RET_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'admin_state_up': True,
'provisioning_status': 'ACTIVE'}
RET_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'admin_state_up': True,
'provisioning_status': 'ACTIVE'}
RET_POOL = {
'id': 'sample_pool_id_1',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_1, RET_MEMBER_2],
'health_monitor': RET_MONITOR,
'session_persistence': RET_PERSISTENCE,
'admin_state_up': True,
'provisioning_status': 'ACTIVE'}
RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem'}
RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2'}
RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3'}
RET_LISTENER = {
'id': 'sample_listener_id_1',
'protocol_port': '80',
'protocol': 'HTTP',
'protocol_mode': 'http',
'default_pool': RET_POOL,
'connection_limit': 98}
RET_LISTENER_TLS = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol_mode': 'HTTP',
'protocol': 'TERMINATED_HTTPS',
'default_pool': RET_POOL,
'connection_limit': 98,
'default_tls_container_id': 'cont_id_1',
'default_tls_path': '/v2/sample_loadbalancer_id_1/cont_id_1.pem',
'default_tls_container': RET_DEF_TLS_CONT}
RET_LISTENER_TLS_SNI = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol_mode': 'http',
'protocol': 'TERMINATED_HTTPS',
'default_pool': RET_POOL,
'connection_limit': 98,
'default_tls_container_id': 'cont_id_1',
'default_tls_path': '/v2/sample_loadbalancer_id_1/cont_id_1.pem',
'default_tls_container': RET_DEF_TLS_CONT,
'crt_dir': '/v2/sample_loadbalancer_id_1',
'sni_container_ids': ['cont_id_2', 'cont_id_3'],
'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2]}
RET_LB = {
'name': 'test-lb',
'vip_address': '10.0.0.2',
'listeners': [RET_LISTENER],
'pools': [RET_POOL]}
RET_LB_TLS = {
'name': 'test-lb',
'vip_address': '10.0.0.2',
'listeners': [RET_LISTENER_TLS],
'pools': [RET_POOL]}
RET_LB_TLS_SNI = {
'name': 'test-lb',
'vip_address': '10.0.0.2',
'listeners': [RET_LISTENER_TLS_SNI],
'pools': [RET_POOL]}
def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, tls=False, sni=False):
proto = 'HTTP' if proto is None else proto
in_lb = collections.namedtuple(
'loadbalancer', 'id, name, vip_address, protocol, vip_port, '
'listeners, pools')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
vip_address='10.0.0.2',
protocol=proto,
vip_port=sample_vip_port_tuple(),
listeners=[sample_listener_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type,
tls=tls,
sni=sni)],
pools=[sample_pool_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type)]
)
def sample_vip_port_tuple():
vip_port = collections.namedtuple('vip_port', 'fixed_ips')
ip_address = collections.namedtuple('ip_address', 'ip_address')
in_address = ip_address(ip_address='10.0.0.2')
return vip_port(fixed_ips=[in_address])
def sample_listener_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, tls=False, sni=False):
proto = 'HTTP' if proto is None else proto
port = '443' if proto is 'HTTPS' or proto is 'TERMINATED_HTTPS' else '80'
in_listener = collections.namedtuple(
'listener', 'id, tenant_id, protocol_port, protocol, default_pool, '
'connection_limit, admin_state_up, default_tls_container_id, '
'sni_container_ids, default_tls_container, '
'sni_containers, loadbalancer_id')
return in_listener(
id='sample_listener_id_1',
tenant_id='sample_tenant_id',
protocol_port=port,
protocol=proto,
default_pool=sample_pool_tuple(
proto=proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type),
connection_limit=98,
admin_state_up=True,
default_tls_container_id='cont_id_1' if tls else '',
sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [],
default_tls_container=sample_tls_container_tuple(
id='cont_id_1', certificate='--imapem1--\n',
private_key='--imakey1--\n', intermediates=[
'--imainter1--\n', '--imainter1too--\n'],
primary_cn='fakeCNM'
) if tls else '',
sni_containers=[
sample_tls_sni_container_tuple(
tls_container_id='cont_id_2',
tls_container=sample_tls_container_tuple(
id='cont_id_2', certificate='--imapem2--\n',
private_key='--imakey2--\n', intermediates=[
'--imainter2--\n', '--imainter2too--\n'],
primary_cn='fakeCN')),
sample_tls_sni_container_tuple(
tls_container_id='cont_id_3',
tls_container=sample_tls_container_tuple(
id='cont_id_3', certificate='--imapem3--\n',
private_key='--imakey3--\n', intermediates=[
'--imainter3--\n', '--imainter3too--\n'],
primary_cn='fakeCN2'))]
if sni else [],
loadbalancer_id='sample_loadbalancer_id_1'
)
def sample_tls_sni_container_tuple(tls_container=None, tls_container_id=None):
sc = collections.namedtuple('sni_container', 'tls_container,'
'tls_container_id')
return sc(tls_container=tls_container, tls_container_id=tls_container_id)
def sample_tls_container_tuple(id='cont_id_1', certificate=None,
private_key=None, intermediates=None,
primary_cn=None):
intermediates = intermediates or []
sc = collections.namedtuple(
'tls_cert',
'id, certificate, private_key, intermediates, primary_cn')
return sc(id=id, certificate=certificate, private_key=private_key,
intermediates=intermediates or [], primary_cn=primary_cn)
def sample_pool_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, hm_admin_state=True):
proto = 'HTTP' if proto is None else proto
in_pool = collections.namedtuple(
'pool', 'id, protocol, lb_algorithm, members, healthmonitor,'
'session_persistence, admin_state_up, provisioning_status')
mon = (sample_health_monitor_tuple(proto=proto, admin_state=hm_admin_state)
if monitor is True else None)
persis = sample_session_persistence_tuple(
persistence_type=persistence_type) if persistence is True else None
return in_pool(
id='sample_pool_id_1',
protocol=proto,
lb_algorithm='ROUND_ROBIN',
members=[sample_member_tuple('sample_member_id_1', '10.0.0.99'),
sample_member_tuple('sample_member_id_2', '10.0.0.98')],
healthmonitor=mon,
session_persistence=persis,
admin_state_up=True,
provisioning_status='ACTIVE')
def sample_member_tuple(id, ip, admin_state_up=True, status='ACTIVE'):
in_member = collections.namedtuple('member',
'id, address, protocol_port, '
'weight, subnet_id, '
'admin_state_up, provisioning_status')
return in_member(
id=id,
address=ip,
protocol_port=82,
weight=13,
subnet_id='10.0.0.1/24',
admin_state_up=admin_state_up,
provisioning_status=status)
def sample_session_persistence_tuple(persistence_type=None):
spersistence = collections.namedtuple('SessionPersistence',
'type, cookie_name')
pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type
return spersistence(type=pt,
cookie_name=pt)
def sample_health_monitor_tuple(proto='HTTP', admin_state=True):
proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto
monitor = collections.namedtuple(
'monitor', 'id, type, delay, timeout, max_retries, http_method, '
'url_path, expected_codes, admin_state_up')
return monitor(id='sample_monitor_id_1', type=proto, delay=30,
timeout=31, max_retries=3, http_method='GET',
url_path='/index.html', expected_codes='500, 405, 404',
admin_state_up=admin_state)
def sample_base_expected_config(backend, frontend=None):
if frontend is None:
frontend = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" option forwardfor\n"
" bind 10.0.0.2:80\n"
" mode http\n"
" default_backend sample_pool_id_1\n\n")
return ("# Configuration for test-lb\n"
"global\n"
" daemon\n"
" user nobody\n"
" group nogroup\n"
" log /dev/log local0\n"
" log /dev/log local1 notice\n"
" stats socket /sock_path mode 0666 level user\n\n"
"defaults\n"
" log global\n"
" retries 3\n"
" option redispatch\n"
" timeout connect 5000\n"
" timeout client 50000\n"
" timeout server 50000\n\n" + frontend + backend)

View File

@ -1,232 +0,0 @@
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lbaas.services.loadbalancer.drivers.haproxy import cfg
from neutron_lbaas.tests import base
class TestHaproxyCfg(base.BaseTestCase):
def test_save_config(self):
with mock.patch('neutron_lbaas.services.loadbalancer.'
'drivers.haproxy.cfg._build_global') as b_g, \
mock.patch('neutron_lbaas.services.loadbalancer.'
'drivers.haproxy.cfg._build_defaults') as b_d, \
mock.patch('neutron_lbaas.services.loadbalancer.'
'drivers.haproxy.cfg._build_frontend') as b_f, \
mock.patch('neutron_lbaas.services.loadbalancer.'
'drivers.haproxy.cfg._build_backend') as b_b, \
mock.patch('neutron.common.utils.replace_file') as replace:
test_config = ['globals', 'defaults', 'frontend', 'backend']
b_g.return_value = [test_config[0]]
b_d.return_value = [test_config[1]]
b_f.return_value = [test_config[2]]
b_b.return_value = [test_config[3]]
cfg.save_config('test_path', mock.Mock())
replace.assert_called_once_with('test_path',
'\n'.join(test_config))
def test_build_global(self):
expected_opts = ['global',
'\tdaemon',
'\tuser nobody',
'\tgroup test_group',
'\tlog /dev/log local0',
'\tlog /dev/log local1 notice',
'\tstats socket test_path mode 0666 level user']
opts = cfg._build_global(mock.Mock(), 'test_path', 'test_group')
self.assertEqual(expected_opts, list(opts))
def test_build_defaults(self):
expected_opts = ['defaults',
'\tlog global',
'\tretries 3',
'\toption redispatch',
'\ttimeout connect 5000',
'\ttimeout client 50000',
'\ttimeout server 50000']
opts = cfg._build_defaults(mock.Mock())
self.assertEqual(expected_opts, list(opts))
def test_build_frontend(self):
test_config = {'vip': {'id': 'vip_id',
'protocol': 'HTTP',
'port': {'fixed_ips': [
{'ip_address': '10.0.0.2'}]
},
'protocol_port': 80,
'connection_limit': 2000,
'admin_state_up': True,
},
'pool': {'id': 'pool_id'}}
expected_opts = ['frontend vip_id',
'\toption tcplog',
'\tbind 10.0.0.2:80',
'\tmode http',
'\tdefault_backend pool_id',
'\tmaxconn 2000',
'\toption forwardfor']
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
test_config['vip']['connection_limit'] = -1
expected_opts.remove('\tmaxconn 2000')
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
test_config['vip']['admin_state_up'] = False
expected_opts.append('\tdisabled')
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_build_backend(self):
test_config = {'pool': {'id': 'pool_id',
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'admin_state_up': True},
'members': [{'status': 'ACTIVE',
'admin_state_up': True,
'id': 'member1_id',
'address': '10.0.0.3',
'protocol_port': 80,
'weight': 1},
{'status': 'INACTIVE',
'admin_state_up': True,
'id': 'member2_id',
'address': '10.0.0.4',
'protocol_port': 80,
'weight': 1},
{'status': 'PENDING_CREATE',
'admin_state_up': True,
'id': 'member3_id',
'address': '10.0.0.5',
'protocol_port': 80,
'weight': 1}],
'healthmonitors': [{'admin_state_up': True,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP'}],
'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
expected_opts = ['backend pool_id',
'\tmode http',
'\tbalance roundrobin',
'\toption forwardfor',
'\ttimeout check 2s',
'\tcookie SRV insert indirect nocache',
'\tserver member1_id 10.0.0.3:80 weight 1 '
'check inter 3s fall 4 cookie member1_id',
'\tserver member2_id 10.0.0.4:80 weight 1 '
'check inter 3s fall 4 cookie member2_id',
'\tserver member3_id 10.0.0.5:80 weight 1 '
'check inter 3s fall 4 cookie member3_id']
opts = cfg._build_backend(test_config)
self.assertEqual(expected_opts, list(opts))
test_config['pool']['admin_state_up'] = False
expected_opts.append('\tdisabled')
opts = cfg._build_backend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_get_server_health_option(self):
test_config = {'healthmonitors': [{'admin_state_up': False,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP',
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200'}]}
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['admin_state_up'] = True
expected = (' check inter 3s fall 4', ['timeout check 2s'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['type'] = 'HTTPS'
expected = (' check inter 3s fall 4',
['timeout check 2s',
'option httpchk GET /',
'http-check expect rstatus 200',
'option ssl-hello-chk'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
def test_has_http_cookie_persistence(self):
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertTrue(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
def test_get_session_persistence(self):
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertEqual(['stick-table type ip size 10k', 'stick on src'],
cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}},
'members': []}
self.assertEqual([], cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertEqual([], cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}},
'members': [{'id': 'member1_id'}]}
self.assertEqual(['cookie SRV insert indirect nocache'],
cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'test'}}}
self.assertEqual(['appsession test len 56 timeout 3h'],
cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}}
self.assertEqual([], cfg._get_session_persistence(config))
config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}}
self.assertEqual([], cfg._get_session_persistence(config))
def test_expand_expected_codes(self):
exp_codes = ''
self.assertEqual(set([]), cfg._expand_expected_codes(exp_codes))
exp_codes = '200'
self.assertEqual(set(['200']), cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201'
self.assertEqual(set(['200', '201']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201,202'
self.assertEqual(set(['200', '201', '202']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '200-202'
self.assertEqual(set(['200', '201', '202']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '200-202, 205'
self.assertEqual(set(['200', '201', '202', '205']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201-203'
self.assertEqual(set(['200', '201', '202', '203']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201-203, 205'
self.assertEqual(set(['200', '201', '202', '203', '205']),
cfg._expand_expected_codes(exp_codes))
exp_codes = '201-200, 205'
self.assertEqual(set(['205']), cfg._expand_expected_codes(exp_codes))

View File

@ -1,495 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from neutron_lbaas.common.cert_manager import cert_manager
from neutron_lbaas.common.tls_utils import cert_parser
from neutron_lbaas.services.loadbalancer import data_models
from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg
from neutron_lbaas.tests.unit.services.loadbalancer.drivers.haproxy.\
sample_configs import sample_configs
class TestHaproxyCfg(base.BaseTestCase):
def test_save_config(self):
with mock.patch('neutron_lbaas.services.loadbalancer.drivers.haproxy.'
'jinja_cfg.render_loadbalancer_obj') as r_t, \
mock.patch('neutron.common.utils.replace_file') as replace:
r_t.return_value = 'fake_rendered_template'
lb = mock.Mock()
jinja_cfg.save_config('test_conf_path', lb, 'test_sock_path',
'nogroup',
'fake_state_path')
r_t.assert_called_once_with(lb,
'nogroup',
'test_sock_path',
'fake_state_path')
replace.assert_called_once_with('test_conf_path',
'fake_rendered_template')
def test_get_template(self):
template = jinja_cfg._get_template()
self.assertEqual('haproxy.loadbalancer.j2', template.name)
def test_render_template_tls_termination(self):
lb = sample_configs.sample_loadbalancer_tuple(
proto='TERMINATED_HTTPS', tls=True, sni=True)
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" redirect scheme https if !{ ssl_fc }\n"
" maxconn 98\n"
" option forwardfor\n"
" bind 10.0.0.2:443"
" ssl crt /v2/sample_listener_id_1/fakeCNM.pem"
" crt /v2/sample_listener_id_1\n"
" mode http\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" server sample_member_id_1 10.0.0.99:82"
" weight 13 check inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82"
" weight 13 check inter 30s fall 3 cookie "
"sample_member_id_2\n\n"
% sample_configs.PIPED_CODES)
with mock.patch('os.makedirs'):
with mock.patch('os.listdir'):
with mock.patch.object(jinja_cfg, 'n_utils'):
with mock.patch.object(
jinja_cfg, '_process_tls_certificates') as crt:
crt.return_value = {
'tls_cert': lb.listeners[0]
.default_tls_container,
'sni_certs': [lb.listeners[0]
.sni_containers[0].tls_container]}
rendered_obj = jinja_cfg.render_loadbalancer_obj(
lb, 'nogroup',
'/sock_path',
'/v2')
self.assertEqual(
sample_configs.sample_base_expected_config(
frontend=fe, backend=be),
rendered_obj)
def test_render_template_tls_termination_no_sni(self):
lb = sample_configs.sample_loadbalancer_tuple(
proto='TERMINATED_HTTPS', tls=True)
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" redirect scheme https if !{ ssl_fc }\n"
" maxconn 98\n"
" option forwardfor\n"
" bind 10.0.0.2:443"
" ssl crt /v2/sample_listener_id_1/fakeCNM.pem\n"
" mode http\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" server sample_member_id_1 10.0.0.99:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n"
% sample_configs.PIPED_CODES)
with mock.patch('os.makedirs'):
with mock.patch('neutron.common.utils.replace_file'):
with mock.patch('os.listdir'):
with mock.patch.object(jinja_cfg, 'n_utils'):
with mock.patch.object(
jinja_cfg, '_process_tls_certificates') as crt:
crt.return_value = {
'tls_cert': lb.listeners[0]
.default_tls_container,
'sni_certs': []}
rendered_obj = jinja_cfg.render_loadbalancer_obj(
lb, 'nogroup',
'/sock_path',
'/v2')
self.assertEqual(
sample_configs.sample_base_expected_config(
frontend=fe, backend=be),
rendered_obj)
def test_render_template_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" server sample_member_id_1 10.0.0.99:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n"
% sample_configs.PIPED_CODES)
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(),
'nogroup', '/sock_path', '/v2')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_render_template_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:443\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" option ssl-hello-chk\n"
" server sample_member_id_1 10.0.0.99:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 "
"weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n"
% sample_configs.PIPED_CODES)
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTPS'),
'nogroup', '/sock_path', '/v2')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_monitor_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
proto='HTTP', monitor=False),
'nogroup', '/sock_path', '/v2')
self.assertEqual(sample_configs.sample_base_expected_config(
backend=be), rendered_obj)
def test_render_template_no_monitor_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:443\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
proto='HTTPS', monitor=False),
'nogroup', '/sock_path', '/v2')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_persistence_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:443\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" server sample_member_id_1 10.0.0.99:82 weight 13\n"
" server sample_member_id_2 10.0.0.98:82 weight 13\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
proto='HTTPS', monitor=False, persistence=False),
'nogroup', '/sock_path', '/v2')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_persistence_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" server sample_member_id_1 10.0.0.99:82 weight 13\n"
" server sample_member_id_2 10.0.0.98:82 weight 13\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
proto='HTTP', monitor=False, persistence=False),
'nogroup', '/sock_path', '/v2')
self.assertEqual(sample_configs.sample_base_expected_config(
backend=be), rendered_obj)
def test_render_template_sourceip_persistence(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" stick-table type ip size 10k\n"
" stick on src\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" server sample_member_id_1 10.0.0.99:82 "
"weight 13 check inter 30s fall 3\n"
" server sample_member_id_2 10.0.0.98:82 "
"weight 13 check inter 30s fall 3\n\n"
% sample_configs.PIPED_CODES)
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
persistence_type='SOURCE_IP'),
'nogroup', '/sock_path', '/v2')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_render_template_appsession_persistence(self):
with mock.patch('os.makedirs') as md:
with mock.patch.object(jinja_cfg, 'n_utils'):
md.return_value = '/data/dirs/'
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" appsession APP_COOKIE len 56 timeout 3h\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus %s\n"
" server sample_member_id_1 10.0.0.99:82 "
"weight 13 check inter 30s fall 3\n"
" server sample_member_id_2 10.0.0.98:82 "
"weight 13 check inter 30s fall 3\n\n"
% sample_configs.PIPED_CODES)
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
persistence_type='APP_COOKIE'),
'nogroup', '/sock_path',
'/v2')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_retrieve_crt_path(self):
with mock.patch('os.makedirs'):
with mock.patch('os.path.isdir') as isdir:
with mock.patch.object(jinja_cfg, '_retrieve_crt_path') as rcp:
isdir.return_value = True
rcp.return_value = '/v2/loadbalancers/lb_id_1/' \
'cont_id_1.pem'
ret = jinja_cfg._retrieve_crt_path(
'/v2/loadbalancers', 'lb_id_1', 'cont_id_1')
self.assertEqual(
'/v2/loadbalancers/lb_id_1/cont_id_1.pem', ret)
def test_store_listener_crt(self):
l = sample_configs.sample_listener_tuple(tls=True, sni=True)
with mock.patch('os.makedirs'):
with mock.patch('neutron.common.utils.replace_file'):
ret = jinja_cfg._store_listener_crt(
'/v2/loadbalancers', l, l.default_tls_container)
self.assertEqual(
'/v2/loadbalancers/sample_listener_id_1/fakeCNM.pem',
ret)
def test_process_tls_certificates(self):
sl = sample_configs.sample_listener_tuple(tls=True, sni=True)
tls = data_models.TLSContainer(primary_cn='fakeCN',
certificate='imaCert',
private_key='imaPrivateKey',
intermediates=['imainter1',
'imainter2'])
cert = mock.Mock(spec=cert_manager.Cert)
cert.get_private_key.return_value = tls.private_key
cert.get_certificate.return_value = tls.certificate
cert.get_intermediates.return_value = tls.intermediates
with mock.patch.object(jinja_cfg, '_map_cert_tls_container') as map, \
mock.patch.object(jinja_cfg,
'_store_listener_crt') as store_cert, \
mock.patch.object(cert_parser,
'get_host_names') as get_host_names, \
mock.patch.object(jinja_cfg,
'CERT_MANAGER_PLUGIN') as cert_mgr:
map.return_value = tls
cert_mgr_mock = mock.Mock(spec=cert_manager.CertManager)
cert_mgr_mock.get_cert.return_value = cert
cert_mgr.CertManager.return_value = cert_mgr_mock
get_host_names.return_value = {'cn': 'fakeCN'}
jinja_cfg._process_tls_certificates(sl)
# Ensure get_cert is called three times
calls_certs = [
mock.call(sl.default_tls_container.id),
mock.call('cont_id_2'),
mock.call('cont_id_3')]
cert_mgr_mock.get_cert.call_args_list == calls_certs
# Ensure store_cert is called three times
calls_ac = [mock.call('/v2/',
'sample_listener_id_1',
tls),
mock.call('/v2/',
'sample_listener_id_1',
tls),
mock.call('/v2/',
'sample_listener_id_1',
tls)]
store_cert.call_args_list == calls_ac
def test_get_primary_cn(self):
cert = mock.MagicMock()
with mock.patch.object(cert_parser, 'get_host_names') as cp:
cp.return_value = {'cn': 'fakeCN'}
cn = jinja_cfg._get_primary_cn(cert.get_certificate())
self.assertEqual('fakeCN', cn)
def test_map_cert_tls_container(self):
tls = data_models.TLSContainer(primary_cn='fakeCN',
certificate='imaCert',
private_key='imaPrivateKey',
intermediates=['imainter1',
'imainter2'])
cert = mock.MagicMock()
cert.get_private_key.return_value = tls.private_key
cert.get_certificate.return_value = tls.certificate
cert.get_intermediates.return_value = tls.intermediates
cert.get_private_key_passphrase.return_value = 'passphrase'
with mock.patch.object(cert_parser, 'get_host_names') as cp:
with mock.patch.object(cert_parser, 'dump_private_key') as dp:
cp.return_value = {'cn': 'fakeCN'}
dp.return_value = 'imaPrivateKey'
self.assertEqual(tls.primary_cn,
jinja_cfg._map_cert_tls_container(
cert).primary_cn)
self.assertEqual(tls.certificate,
jinja_cfg._map_cert_tls_container(
cert).certificate)
self.assertEqual(tls.private_key,
jinja_cfg._map_cert_tls_container(
cert).private_key)
self.assertEqual(tls.intermediates,
jinja_cfg._map_cert_tls_container(
cert).intermediates)
def test_build_pem(self):
expected = 'imainter\nimainter2\nimacert\nimakey'
tls_tupe = sample_configs.sample_tls_container_tuple(
certificate='imacert', private_key='imakey',
intermediates=['imainter', 'imainter2'])
self.assertEqual(expected, jinja_cfg._build_pem(tls_tupe))
def test_transform_session_persistence(self):
in_persistence = sample_configs.sample_session_persistence_tuple()
ret = jinja_cfg._transform_session_persistence(in_persistence)
self.assertEqual(sample_configs.RET_PERSISTENCE, ret)
def test_transform_health_monitor(self):
in_persistence = sample_configs.sample_health_monitor_tuple()
ret = jinja_cfg._transform_health_monitor(in_persistence)
self.assertEqual(sample_configs.RET_MONITOR, ret)
def test_transform_member(self):
in_member = sample_configs.sample_member_tuple('sample_member_id_1',
'10.0.0.99')
ret = jinja_cfg._transform_member(in_member)
self.assertEqual(sample_configs.RET_MEMBER_1, ret)
def test_transform_pool(self):
in_pool = sample_configs.sample_pool_tuple()
ret = jinja_cfg._transform_pool(in_pool)
self.assertEqual(sample_configs.RET_POOL, ret)
def test_transform_pool_admin_state_down(self):
in_pool = sample_configs.sample_pool_tuple(hm_admin_state=False)
ret = jinja_cfg._transform_pool(in_pool)
result = sample_configs.RET_POOL
result['health_monitor'] = ''
self.assertEqual(result, ret)
def test_transform_listener(self):
in_listener = sample_configs.sample_listener_tuple()
ret = jinja_cfg._transform_listener(in_listener, '/v2')
self.assertEqual(sample_configs.RET_LISTENER, ret)
def test_transform_loadbalancer(self):
in_lb = sample_configs.sample_loadbalancer_tuple()
ret = jinja_cfg._transform_loadbalancer(in_lb, '/v2')
self.assertEqual(sample_configs.RET_LB, ret)
def test_include_member(self):
ret = jinja_cfg._include_member(
sample_configs.sample_member_tuple('sample_member_id_1',
'10.0.0.99'))
self.assertTrue(ret)
def test_include_member_invalid_status(self):
ret = jinja_cfg._include_member(
sample_configs.sample_member_tuple('sample_member_id_1',
'10.0.0.99', status='PENDING'))
self.assertFalse(ret)
def test_include_member_invalid_admin_state(self):
ret = jinja_cfg._include_member(
sample_configs.sample_member_tuple('sample_member_id_1',
'10.0.0.99',
admin_state_up=False))
self.assertFalse(ret)
def test_expand_expected_codes(self):
exp_codes = ''
self.assertEqual(set([]), jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200'
self.assertEqual(set(['200']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201'
self.assertEqual(set(['200', '201']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201,202'
self.assertEqual(set(['200', '201', '202']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200-202'
self.assertEqual(set(['200', '201', '202']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200-202, 205'
self.assertEqual(set(['200', '201', '202', '205']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201-203'
self.assertEqual(set(['200', '201', '202', '203']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '200, 201-203, 205'
self.assertEqual(set(['200', '201', '202', '203', '205']),
jinja_cfg._expand_expected_codes(exp_codes))
exp_codes = '201-200, 205'
self.assertEqual(set(['205']),
jinja_cfg._expand_expected_codes(exp_codes))

View File

@ -1,571 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import exceptions
import six
from neutron_lbaas.services.loadbalancer.drivers.haproxy \
import namespace_driver
from neutron_lbaas.tests import base
class TestHaproxyNSDriver(base.BaseTestCase):
def setUp(self):
super(TestHaproxyNSDriver, self).setUp()
conf = mock.Mock()
conf.haproxy.loadbalancer_state_path = '/the/path'
conf.interface_driver = 'intdriver'
conf.haproxy.user_group = 'test_group'
conf.haproxy.send_gratuitous_arp = 3
self.conf = conf
self.rpc_mock = mock.Mock()
with mock.patch(
'neutron.common.utils.load_class_by_alias_or_classname'):
self.driver = namespace_driver.HaproxyNSDriver(
conf,
self.rpc_mock
)
self.vif_driver = mock.Mock()
self.driver.vif_driver = self.vif_driver
self.fake_config = {
'pool': {'id': 'pool_id', 'status': 'ACTIVE',
'admin_state_up': True},
'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
'address': '10.0.0.2',
'status': 'ACTIVE', 'admin_state_up': True}
}
def _ip_mock_call(self, ns=None):
kwargs = {}
if ns:
kwargs['namespace'] = ns
return mock.call(**kwargs)
def test_get_name(self):
self.assertEqual(namespace_driver.DRIVER_NAME, self.driver.get_name())
def test_create(self):
with mock.patch.object(self.driver, '_plug') as plug:
with mock.patch.object(self.driver, '_spawn') as spawn:
self.driver.create(self.fake_config)
plug.assert_called_once_with(
'qlbaas-pool_id', {'id': 'port_id'}, '10.0.0.2'
)
spawn.assert_called_once_with(self.fake_config)
def test_update(self):
with mock.patch.object(self.driver, '_get_state_file_path') as gsp, \
mock.patch.object(self.driver, '_spawn') as spawn, \
mock.patch.object(six.moves.builtins, 'open') as mock_open:
mock_open.return_value = ['5']
self.driver.update(self.fake_config)
mock_open.assert_called_once_with(gsp.return_value, 'r')
spawn.assert_called_once_with(self.fake_config, ['-sf', '5'])
def test_spawn(self):
with mock.patch.object(namespace_driver.hacfg,
'save_config') as mock_save, \
mock.patch.object(self.driver,
'_get_state_file_path') as gsp, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
gsp.side_effect = lambda x, y: y
self.driver._spawn(self.fake_config)
mock_save.assert_called_once_with('conf', self.fake_config,
'sock', 'test_group')
cmd = ['haproxy', '-f', 'conf', '-p', 'pid']
ip_wrap.assert_has_calls([
self._ip_mock_call('qlbaas-pool_id'),
mock.call().netns.execute(cmd)
])
def test_undeploy_instance(self):
with mock.patch.object(self.driver, '_get_state_file_path') as gsp, \
mock.patch.object(namespace_driver,
'kill_pids_in_file') as kill, \
mock.patch.object(self.driver, '_unplug') as unplug, \
mock.patch('neutron.agent.linux.ip_lib.'
'IPWrapper') as ip_wrap, \
mock.patch('os.path.isdir') as isdir, \
mock.patch('shutil.rmtree') as rmtree:
gsp.side_effect = lambda x, y: '/pool/' + y
self.driver.pool_to_port_id['pool_id'] = 'port_id'
isdir.return_value = True
self.driver.undeploy_instance('pool_id', delete_namespace=True)
kill.assert_called_once_with('/pool/pid')
unplug.assert_called_once_with('qlbaas-pool_id', 'port_id')
isdir.assert_called_once_with('/pool')
rmtree.assert_called_once_with('/pool')
ip_wrap.assert_has_calls([
self._ip_mock_call('qlbaas-pool_id'),
mock.call().garbage_collect_namespace()
])
def test_undeploy_instance_with_ns_cleanup(self):
with mock.patch.object(self.driver, '_get_state_file_path'), \
mock.patch.object(self.driver, 'vif_driver') as vif, \
mock.patch.object(namespace_driver, 'kill_pids_in_file'), \
mock.patch('neutron.agent.linux.ip_lib.'
'IPWrapper') as ip_wrap, \
mock.patch('os.path.isdir'), \
mock.patch('shutil.rmtree'):
device = mock.Mock()
device_name = 'port_device'
device.name = device_name
ip_wrap.return_value.get_devices.return_value = [device]
self.driver.undeploy_instance('pool_id', cleanup_namespace=True)
vif.unplug.assert_called_once_with(device_name,
namespace='qlbaas-pool_id')
def test_remove_orphans(self):
with mock.patch.object(self.driver, 'exists') as exists, \
mock.patch.object(self.driver,
'undeploy_instance') as undeploy, \
mock.patch('os.listdir') as listdir, \
mock.patch('os.path.exists'):
known = ['known1', 'known2']
unknown = ['unknown1', 'unknown2']
listdir.return_value = known + unknown
exists.side_effect = lambda x: x == 'unknown2'
self.driver.remove_orphans(known)
undeploy.assert_called_once_with('unknown2',
cleanup_namespace=True)
def test_exists(self):
with mock.patch.object(self.driver, '_get_state_file_path') as gsp, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap, \
mock.patch('socket.socket'), \
mock.patch('os.path.exists') as path_exists:
gsp.side_effect = lambda x, y, z: '/pool/' + y
ip_wrap.return_value.netns.exists.return_value = True
path_exists.return_value = True
self.driver.exists('pool_id')
ip_wrap.assert_has_calls([
self._ip_mock_call(),
mock.call().netns.exists('qlbaas-pool_id')
])
self.assertTrue(self.driver.exists('pool_id'))
def test_get_stats(self):
raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,'
'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,'
'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,'
'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,'
'check_status,check_code,check_duration,hrsp_1xx,'
'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
'8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
'10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,'
'224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,'
'1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,'
'0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,'
'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n')
raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,'
'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,'
'status,weight,act,bck,chkfail,chkdown,lastchg,'
'downtime,qlimit,pid,iid,sid,throttle,lbtot,'
'tracked,type,rate,rate_lim,rate_max,check_status,'
'check_code,check_duration,hrsp_1xx,hrsp_2xx,'
'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,'
'\n')
with mock.patch.object(self.driver, '_get_state_file_path') as gsp, \
mock.patch('socket.socket') as socket, \
mock.patch('os.path.exists') as path_exists:
gsp.side_effect = lambda x, y, z: '/pool/' + y
path_exists.return_value = True
socket.return_value = socket
socket.recv.return_value = raw_stats
exp_stats = {'connection_errors': '0',
'active_connections': '3',
'current_sessions': '3',
'bytes_in': '7764',
'max_connections': '4',
'max_sessions': '4',
'bytes_out': '2365',
'response_errors': '0',
'total_sessions': '10',
'total_connections': '10',
'members': {
'32a6c2a3-420a-44c3-955d-86bd2fc6871e': {
'status': 'ACTIVE',
'health': 'L7OK',
'failed_checks': '0'
},
'd9aea044-8867-4e80-9875-16fb808fa0f9': {
'status': 'INACTIVE',
'health': 'L4CON',
'failed_checks': '9'
}
}
}
stats = self.driver.get_stats('pool_id')
self.assertEqual(exp_stats, stats)
socket.recv.return_value = raw_stats_empty
self.assertEqual({'members': {}}, self.driver.get_stats('pool_id'))
path_exists.return_value = False
socket.reset_mock()
self.assertEqual({}, self.driver.get_stats('pool_id'))
self.assertFalse(socket.called)
def test_plug(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1'}}]}
test_address = '10.0.0.2'
with mock.patch('neutron.agent.linux.ip_lib.device_exists') as dev_exists, \
mock.patch('netaddr.IPNetwork') as ip_net, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port, test_address)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
cmd_arping = ['arping', '-U', '-I',
'test_interface', '-c',
self.conf.haproxy.send_gratuitous_arp, '10.0.0.2']
ip_wrap.assert_has_calls([
self._ip_mock_call('test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False),
mock.call().netns.execute(cmd_arping, check_exit_code=False),
])
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port,
test_address, False)
def test_plug_not_send_gratuitous_arp(self):
self.conf.haproxy.send_gratuitous_arp = 0
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1'}}]}
test_address = '10.0.0.2'
with mock.patch('neutron.agent.linux.ip_lib.device_exists') as dev_exists, \
mock.patch('netaddr.IPNetwork') as ip_net, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port, test_address)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
expected = [
self._ip_mock_call('test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False)]
self.assertEqual(expected, ip_wrap.mock_calls)
def test_plug_no_gw(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24'}}]}
test_address = '10.0.0.2'
with mock.patch('neutron.agent.linux.ip_lib.device_exists') as dev_exists, \
mock.patch('netaddr.IPNetwork') as ip_net, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port, test_address)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
self.assertFalse(ip_wrap.called)
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port,
test_address, False)
def test_plug_gw_in_host_routes(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'host_routes':
[{'destination': '0.0.0.0/0',
'nexthop': '10.0.0.1'}]}}]}
test_address = '10.0.0.2'
with mock.patch('neutron.agent.linux.ip_lib.device_exists') as dev_exists, \
mock.patch('netaddr.IPNetwork') as ip_net, \
mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port, test_address)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
ip_wrap.assert_has_calls([
self._ip_mock_call('test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False),
])
def test_unplug(self):
self.vif_driver.get_device_name.return_value = 'test_interface'
self.driver._unplug('test_ns', 'port_id')
self.rpc_mock.unplug_vip_port.assert_called_once_with('port_id')
self.vif_driver.unplug('test_interface', namespace='test_ns')
def test_kill_pids_in_file(self):
with mock.patch('os.path.exists') as path_exists, \
mock.patch.object(six.moves.builtins, 'open') as mock_open, \
mock.patch('neutron.agent.linux.utils.'
'execute') as mock_execute, \
mock.patch.object(namespace_driver.LOG,
'exception') as mock_log:
file_mock = mock.MagicMock()
mock_open.return_value = file_mock
file_mock.__enter__.return_value = file_mock
file_mock.__iter__.return_value = iter(['123'])
path_exists.return_value = False
namespace_driver.kill_pids_in_file('test_path')
path_exists.assert_called_once_with('test_path')
self.assertFalse(mock_open.called)
self.assertFalse(mock_execute.called)
path_exists.return_value = True
mock_execute.side_effect = RuntimeError
namespace_driver.kill_pids_in_file('test_path')
self.assertTrue(mock_log.called)
mock_execute.assert_called_once_with(
['kill', '-9', '123'], run_as_root=True)
def test_get_state_file_path(self):
with mock.patch('os.makedirs') as mkdir:
path = self.driver._get_state_file_path('pool_id', 'conf')
self.assertEqual('/the/path/pool_id/conf', path)
mkdir.assert_called_once_with('/the/path/pool_id', 0o755)
def test_deploy_instance(self):
with mock.patch.object(self.driver, 'exists') as exists:
with mock.patch.object(self.driver, 'update') as update:
self.driver.deploy_instance(self.fake_config)
exists.assert_called_once_with(self.fake_config['pool']['id'])
update.assert_called_once_with(self.fake_config)
def test_deploy_instance_non_existing(self):
with mock.patch.object(self.driver, 'exists') as exists:
with mock.patch.object(self.driver, 'create') as create:
exists.return_value = False
self.driver.deploy_instance(self.fake_config)
exists.assert_called_once_with(self.fake_config['pool']['id'])
create.assert_called_once_with(self.fake_config)
def test_deploy_instance_vip_status_non_active(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['vip']['status'] = 'NON_ACTIVE'
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_vip_admin_state_down(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['vip']['admin_state_up'] = False
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_no_vip(self):
with mock.patch.object(self.driver, 'exists') as exists:
del self.fake_config['vip']
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_pool_status_non_active(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['pool']['status'] = 'NON_ACTIVE'
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_pool_admin_state_down(self):
with mock.patch.object(self.driver, 'exists') as exists:
with mock.patch.object(self.driver, 'update') as update:
self.fake_config['pool']['admin_state_up'] = False
self.driver.deploy_instance(self.fake_config)
exists.assert_called_once_with(self.fake_config['pool']['id'])
update.assert_called_once_with(self.fake_config)
def test_refresh_device(self):
with mock.patch.object(self.driver, 'deploy_instance') as deploy, \
mock.patch.object(self.driver,
'undeploy_instance') as undeploy:
pool_id = 'pool_id1'
self.driver._refresh_device(pool_id)
self.rpc_mock.get_logical_device.assert_called_once_with(pool_id)
deploy.assert_called_once_with(
self.rpc_mock.get_logical_device.return_value)
self.assertFalse(undeploy.called)
def test_refresh_device_not_deployed(self):
with mock.patch.object(self.driver, 'deploy_instance') as deploy, \
mock.patch.object(self.driver, 'exists') as exists, \
mock.patch.object(self.driver,
'undeploy_instance') as undeploy:
pool_id = 'pool_id1'
deploy.return_value = False
exists.return_value = True
self.driver._refresh_device(pool_id)
undeploy.assert_called_once_with(pool_id)
def test_refresh_device_non_existing(self):
with mock.patch.object(self.driver, 'deploy_instance') as deploy, \
mock.patch.object(self.driver, 'exists') as exists, \
mock.patch.object(self.driver,
'undeploy_instance') as undeploy:
pool_id = 'pool_id1'
deploy.return_value = False
exists.return_value = False
self.driver._refresh_device(pool_id)
self.assertFalse(undeploy.called)
def test_create_vip(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_vip({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_update_vip(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_vip({}, {'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_delete_vip(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
self.driver.delete_vip({'pool_id': '1'})
undeploy.assert_called_once_with('1')
def test_create_pool(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_pool({'id': '1'})
self.assertFalse(refresh.called)
def test_update_pool(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_pool({}, {'id': '1'})
refresh.assert_called_once_with('1')
def test_delete_pool_existing(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy, \
mock.patch.object(self.driver, 'exists') as exists:
exists.return_value = True
self.driver.delete_pool({'id': '1'})
undeploy.assert_called_once_with('1', delete_namespace=True)
def test_delete_pool_non_existing(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy, \
mock.patch.object(self.driver, 'exists') as exists:
exists.return_value = False
self.driver.delete_pool({'id': '1'})
self.assertFalse(undeploy.called)
def test_create_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_member({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_update_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_member({}, {'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_delete_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.delete_member({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_create_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_pool_health_monitor('', '1')
refresh.assert_called_once_with('1')
def test_update_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_pool_health_monitor('', '', '1')
refresh.assert_called_once_with('1')
def test_delete_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.delete_pool_health_monitor('', '1')
refresh.assert_called_once_with('1')

View File

@ -1,215 +0,0 @@
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit import testlib_api
import requests
from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client
from neutron_lbaas.services.loadbalancer.drivers.netscaler \
import netscaler_driver
NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers'
'.netscaler.ncc_client.NSClient')
TESTURI_SCHEME = 'http'
TESTURI_HOSTNAME = '1.1.1.1'
TESTURI_PORT = 4433
TESTURI_PATH = '/ncc_service/1.0'
TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME,
TESTURI_PORT, TESTURI_PATH)
TEST_USERNAME = 'user211'
TEST_PASSWORD = '@30xHl5cT'
TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1'
TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
class TestNSClient(testlib_api.WebTestCase):
"""A Unit test for the NetScaler NCC client module."""
def setUp(self):
self.log = mock.patch.object(ncc_client, 'LOG').start()
super(TestNSClient, self).setUp()
# mock the requests.request function call
self.request_method_mock = mock.Mock()
requests.request = self.request_method_mock
self.testclient = self._get_nsclient()
self.testclient.login = mock.Mock()
self.testclient.login.side_effect = self.mock_auth_func(
self.testclient)
nfe_mock = mock.patch.object(
ncc_client.NCCException, "is_not_found_exception").start()
nfe_mock.return_value = True
def mock_auth_func(self, ncc_test_client):
ncc_test_client.auth = "SessId=123456789"
def test_instantiate_nsclient_with_empty_uri(self):
"""Asserts that a call with empty URI will raise an exception."""
self.assertRaises(ncc_client.NCCException, ncc_client.NSClient,
'', TEST_USERNAME, TEST_PASSWORD)
def test_create_resource_with_no_connection(self):
"""Asserts that a call with no connection will raise an exception."""
# mock a connection object that fails to establish a connection
self.request_method_mock.side_effect = (
requests.exceptions.ConnectionError())
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource() and assert that
# it raises an exception
self.assertRaises(ncc_client.NCCException,
self.testclient.create_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_create_resource_with_error(self):
"""Asserts that a failed create call raises an exception."""
# create a mock object to represent a valid http response
# with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unavailable
fake_response.headers = []
requests.request.return_value = fake_response
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource
# and assert that it raises the expected exception.
self.assertRaises(ncc_client.NCCException,
self.testclient.create_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_create_resource(self):
"""Asserts that a correct call will succeed."""
# obtain the mock object that corresponds to the call of request()
fake_response = requests.Response()
fake_response.status_code = requests.codes.created
fake_response.headers = []
self.request_method_mock.return_value = fake_response
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource()
self.testclient.create_resource(TEST_TENANT_ID, resource_path,
resource_name, resource_body)
# assert that request() was called
# with the expected params.
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
self.request_method_mock.assert_called_once_with(
'POST',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def test_update_resource_with_error(self):
"""Asserts that a failed update call raises an exception."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unavailable
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_update()
# call method under test: update_resource() and
# assert that it raises the expected exception.
self.assertRaises(ncc_client.NCCException,
self.testclient.update_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_update_resource(self):
"""Asserts that a correct update call will succeed."""
# create a valid http response with a successful status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.ok
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_update()
# call method under test: update_resource.
self.testclient.update_resource(TEST_TENANT_ID, resource_path,
resource_name, resource_body)
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
# assert that requests.request() was called with the
# expected params.
self.request_method_mock.assert_called_once_with(
'PUT',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def test_delete_resource_with_error(self):
"""Asserts that a failed delete call raises an exception."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unavailable
fake_response.headers = []
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
# call method under test: create_resource
self.assertRaises(ncc_client.NCCException,
self.testclient.remove_resource,
TEST_TENANT_ID, resource_path)
def test_delete_resource(self):
"""Asserts that a correct delete call will succeed."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.ok
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
# call method under test: create_resource
self.testclient.remove_resource(TEST_TENANT_ID, resource_path)
# assert that httplib.HTTPConnection request() was called with the
# expected params
self.request_method_mock.assert_called_once_with(
'DELETE',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def _get_nsclient(self):
return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD)
def _get_testvip_httpbody_for_create(self):
body = {
'name': 'vip1',
'address': '10.0.0.3',
'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d',
'protocol': 'HTTP',
'protocol_port': 80,
'admin_state_up': True,
}
return body
def _get_testvip_httpbody_for_update(self):
body = {}
body['name'] = 'updated vip1'
body['admin_state_up'] = False
return body

View File

@ -1,786 +0,0 @@
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron import manager
from neutron.plugins.common import constants
from neutron_lib import exceptions
from neutron_lbaas.db.loadbalancer import loadbalancer_db
from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client
from neutron_lbaas.services.loadbalancer.drivers.netscaler \
import netscaler_driver
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
LBAAS_DRIVER_CLASS = ('neutron_lbaas.services.loadbalancer.drivers'
'.netscaler.netscaler_driver'
'.NetScalerPluginDriver')
NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers'
'.netscaler.ncc_client'
'.NSClient')
LBAAS_PROVIDER_NAME = 'netscaler'
LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' %
(LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS))
#Test data
TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d'
TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee'
TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c'
TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180'
TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e'
TESTPOOL_SNATIP_ADDRESS = '10.0.0.50'
TESTPOOL_SNAT_PORT = {
'id': TESTPOOL_PORT_ID,
'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}]
}
TESTVIP_IP = '10.0.1.100'
TESTMEMBER_IP = '10.0.0.5'
class TestLoadBalancerPluginBase(test_db_loadbalancer
.LoadBalancerPluginDbTestCase):
def setUp(self):
# mock the NSClient class (REST client)
client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start()
#mock the REST methods of the NSClient class
self.client_mock_instance = client_mock_cls.return_value
self.create_resource_mock = self.client_mock_instance.create_resource
self.create_resource_mock.side_effect = mock_create_resource_func
self.update_resource_mock = self.client_mock_instance.update_resource
self.update_resource_mock.side_effect = mock_update_resource_func
self.retrieve_resource_mock = (self.client_mock_instance
.retrieve_resource)
self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func
self.remove_resource_mock = self.client_mock_instance.remove_resource
self.remove_resource_mock.side_effect = mock_remove_resource_func
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=LBAAS_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestNetScalerPluginDriver(TestLoadBalancerPluginBase):
"""Unit tests for the NetScaler LBaaS driver module."""
def setUp(self):
mock.patch.object(netscaler_driver, 'LOG').start()
super(TestNetScalerPluginDriver, self).setUp()
self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = (
netscaler_driver.NetScalerPluginDriver(self.plugin_instance))
self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME]
self.context = context.get_admin_context()
def test_create_vip(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testvip = self._build_testvip_contents(subnet['subnet'],
pool['pool'])
expectedvip = self._build_expectedvip_contents(
testvip,
subnet['subnet'])
# mock the LBaaS plugin update_status().
self._mock_update_status()
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test
self.driver.create_vip(self.context, testvip)
# First, assert that create_resource was called once
# with expected params.
self.create_resource_mock.assert_called_once_with(
None,
netscaler_driver.VIPS_RESOURCE,
netscaler_driver.VIP_RESOURCE,
expectedvip)
#Finally, assert that the vip object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
expectedvip['id'],
constants.ACTIVE)
def test_create_vip_without_connection(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testvip = self._build_testvip_contents(subnet['subnet'],
pool['pool'])
expectedvip = self._build_expectedvip_contents(
testvip,
subnet['subnet'])
errorcode = ncc_client.NCCException.CONNECTION_ERROR
self.create_resource_mock.side_effect = (
ncc_client.NCCException(errorcode))
# mock the plugin's update_status()
self._mock_update_status()
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_vip(self.context, testvip)
# First, assert that update_resource was called once
# with expected params.
self.create_resource_mock.assert_called_once_with(
None,
netscaler_driver.VIPS_RESOURCE,
netscaler_driver.VIP_RESOURCE,
expectedvip)
#Finally, assert that the vip object is in ERROR state
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
testvip['id'],
constants.ERROR)
def test_update_vip(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
updated_vip = self._build_updated_testvip_contents(
vip['vip'],
subnet['subnet'],
pool['pool'])
expectedvip = self._build_updated_expectedvip_contents(
updated_vip,
subnet['subnet'],
pool['pool'])
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test
self.driver.update_vip(self.context, updated_vip,
updated_vip)
vip_resource_path = "%s/%s" % (
(netscaler_driver.VIPS_RESOURCE,
vip['vip']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(
None,
vip_resource_path,
netscaler_driver.VIP_RESOURCE,
expectedvip))
#Finally, assert that the vip object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
vip['vip']['id'],
constants.ACTIVE)
def test_delete_vip(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.vip(pool=pool, subnet=subnet) as vip, \
mock.patch.object(self.driver.plugin,
'_delete_db_vip') as mock_delete_db_vip:
mock_delete_db_vip.return_value = None
#reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test
self.driver.delete_vip(self.context, vip['vip'])
vip_resource_path = "%s/%s" % (
(netscaler_driver.VIPS_RESOURCE,
vip['vip']['id']))
# Assert that remove_resource() was called once
# with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, vip_resource_path))
def test_create_pool(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_ports') as mock_get_ports, \
mock.patch.object(self.driver.plugin._core_plugin,
'create_port') as mock_create_port:
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.return_value = TESTPOOL_SNAT_PORT
testpool = self._build_testpool_contents(subnet['subnet'])
expectedpool = self._build_expectedpool_contents(testpool,
subnet['subnet'])
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# mock the plugin's update_status()
self._mock_update_status()
# execute the method under test
self.driver.create_pool(self.context, testpool)
# First, assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(None,
netscaler_driver.POOLS_RESOURCE,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
expectedpool['id'],
constants.ACTIVE)
def test_create_pool_with_error(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_ports') as mock_get_ports, \
mock.patch.object(self.driver.plugin._core_plugin,
'create_port') as mock_create_port:
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.return_value = TESTPOOL_SNAT_PORT
errorcode = ncc_client.NCCException.CONNECTION_ERROR
self.create_resource_mock.side_effect = (ncc_client
.NCCException(errorcode))
testpool = self._build_testpool_contents(subnet['subnet'])
expectedpool = self._build_expectedpool_contents(testpool,
subnet['subnet'])
# mock the plugin's update_status()
self._mock_update_status()
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_pool(self.context, testpool)
# Also assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(None,
netscaler_driver.POOLS_RESOURCE,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is in ERROR state
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
expectedpool['id'],
constants.ERROR)
def test_create_pool_with_snatportcreate_failure(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_ports') as mock_get_ports, \
mock.patch.object(self.driver.plugin._core_plugin,
'create_port') as mock_create_port:
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.side_effect = exceptions.NeutronException()
testpool = self._build_testpool_contents(subnet['subnet'])
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.assertRaises(exceptions.NeutronException,
self.driver.create_pool,
self.context, testpool)
def test_update_pool(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
updated_pool = self._build_updated_testpool_contents(
pool['pool'],
subnet['subnet'])
expectedpool = self._build_updated_expectedpool_contents(
updated_pool,
subnet['subnet'])
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test.
self.driver.update_pool(self.context, pool['pool'],
updated_pool)
pool_resource_path = "%s/%s" % (
(netscaler_driver.POOLS_RESOURCE,
pool['pool']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(None,
pool_resource_path,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
pool['pool']['id'],
constants.ACTIVE)
def test_delete_pool(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool, \
mock.patch.object(self.driver.plugin._core_plugin,
'delete_port') as mock_delete_port, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_ports') as mock_get_ports, \
mock.patch.object(self.driver.plugin,
'get_pools') as mock_get_pools, \
mock.patch.object(self.driver.plugin,
'_delete_db_pool') as mock_delete_db_pool:
mock_delete_port.return_value = None
mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}]
mock_get_pools.return_value = []
mock_delete_db_pool.return_value = None
#reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test.
self.driver.delete_pool(self.context, pool['pool'])
pool_resource_path = "%s/%s" % (
(netscaler_driver.POOLS_RESOURCE,
pool['pool']['id']))
# Assert that delete_resource was called
# once with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, pool_resource_path))
def test_create_member(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testmember = self._build_testmember_contents(pool['pool'])
expectedmember = self._build_expectedmember_contents(
testmember)
# mock the plugin's update_status()
self._mock_update_status()
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_member(self.context, testmember)
# First, assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(
None,
netscaler_driver.POOLMEMBERS_RESOURCE,
netscaler_driver.POOLMEMBER_RESOURCE,
expectedmember))
#Finally, assert that the member object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Member,
expectedmember['id'],
constants.ACTIVE)
def test_update_member(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.member(pool_id=pool['pool']['id']) as member:
updatedmember = (self._build_updated_testmember_contents(
member['member']))
expectedmember = (self
._build_updated_expectedmember_contents(
updatedmember))
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test
self.driver.update_member(self.context,
member['member'],
updatedmember)
member_resource_path = "%s/%s" % (
(netscaler_driver.POOLMEMBERS_RESOURCE,
member['member']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(
None,
member_resource_path,
netscaler_driver.POOLMEMBER_RESOURCE,
expectedmember))
#Finally, assert that the member object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Member,
member['member']['id'],
constants.ACTIVE)
def test_delete_member(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.member(pool_id=pool['pool']['id']) as member, \
mock.patch.object(self.driver.plugin,
'_delete_db_member') as mock_delete_db_member:
mock_delete_db_member.return_value = None
# reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test
self.driver.delete_member(self.context,
member['member'])
member_resource_path = "%s/%s" % (
(netscaler_driver.POOLMEMBERS_RESOURCE,
member['member']['id']))
# Assert that delete_resource was called once
# with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, member_resource_path))
def test_create_pool_health_monitor(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testhealthmonitor = self._build_testhealthmonitor_contents(
pool['pool'])
expectedhealthmonitor = (
self._build_expectedhealthmonitor_contents(
testhealthmonitor))
with mock.patch.object(self.driver.plugin,
'update_pool_health_monitor') as mhm:
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_pool_health_monitor(self.context,
testhealthmonitor,
pool['pool']['id'])
# First, assert that create_resource was called once
# with expected params.
resource_path = "%s/%s/%s" % (
netscaler_driver.POOLS_RESOURCE,
pool['pool']['id'],
netscaler_driver.MONITORS_RESOURCE)
(self.create_resource_mock
.assert_called_once_with(
None,
resource_path,
netscaler_driver.MONITOR_RESOURCE,
expectedhealthmonitor))
# Finally, assert that the healthmonitor object is
# now ACTIVE.
(mhm.assert_called_once_with(
mock.ANY,
expectedhealthmonitor['id'],
pool['pool']['id'],
constants.ACTIVE, ""))
def test_update_pool_health_monitor(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.health_monitor(
pool_id=pool['pool']['id']
) as (health_monitor):
updatedhealthmonitor = (
self._build_updated_testhealthmonitor_contents(
health_monitor['health_monitor']))
expectedhealthmonitor = (
self._build_updated_expectedhealthmonitor_contents(
updatedhealthmonitor))
with mock.patch.object(self.driver.plugin,
'update_pool_health_monitor')as mhm:
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test.
self.driver.update_pool_health_monitor(
self.context,
health_monitor['health_monitor'],
updatedhealthmonitor,
pool['pool']['id'])
monitor_resource_path = "%s/%s" % (
(netscaler_driver.MONITORS_RESOURCE,
health_monitor['health_monitor']['id']))
# First, assert that update_resource was called once
# with expected params.
self.update_resource_mock.assert_called_once_with(
None,
monitor_resource_path,
netscaler_driver.MONITOR_RESOURCE,
expectedhealthmonitor)
#Finally, assert that the member object is now ACTIVE
(mhm.assert_called_once_with(
mock.ANY,
health_monitor['health_monitor']['id'],
pool['pool']['id'],
constants.ACTIVE, ""))
def test_delete_pool_health_monitor(self):
with self.subnet() as subnet, \
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet') as mock_get_subnet:
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.health_monitor(
pool_id=pool['pool']['id']) as health_monitor, \
mock.patch.object(self.driver.plugin,
'_delete_db_pool_health_monitor') as \
mock_delete_db_monitor:
mock_delete_db_monitor.return_value = None
# reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test.
self.driver.delete_pool_health_monitor(
self.context,
health_monitor['health_monitor'],
pool['pool']['id'])
monitor_resource_path = "%s/%s/%s/%s" % (
netscaler_driver.POOLS_RESOURCE,
pool['pool']['id'],
netscaler_driver.MONITORS_RESOURCE,
health_monitor['health_monitor']['id'])
# Assert that delete_resource was called once
# with expected params.
self.remove_resource_mock.assert_called_once_with(
None,
monitor_resource_path)
def _build_testvip_contents(self, subnet, pool):
vip_obj = dict(id=TESTVIP_ID,
name='testvip',
description='a test vip',
tenant_id=self._tenant_id,
subnet_id=subnet['id'],
address=TESTVIP_IP,
port_id=TESTVIP_PORT_ID,
pool_id=pool['id'],
protocol='HTTP',
protocol_port=80,
connection_limit=1000,
admin_state_up=True,
status='PENDING_CREATE',
status_description='')
return vip_obj
def _build_expectedvip_contents(self, testvip, subnet):
expectedvip = dict(id=testvip['id'],
name=testvip['name'],
description=testvip['description'],
tenant_id=testvip['tenant_id'],
subnet_id=testvip['subnet_id'],
address=testvip['address'],
network_id=subnet['network_id'],
port_id=testvip['port_id'],
pool_id=testvip['pool_id'],
protocol=testvip['protocol'],
protocol_port=testvip['protocol_port'],
connection_limit=testvip['connection_limit'],
admin_state_up=testvip['admin_state_up'])
return expectedvip
def _build_updated_testvip_contents(self, testvip, subnet, pool):
#update some updateable fields of the vip
testvip['name'] = 'udpated testvip'
testvip['description'] = 'An updated version of test vip'
testvip['connection_limit'] = 2000
return testvip
def _build_updated_expectedvip_contents(self, testvip, subnet, pool):
expectedvip = dict(name=testvip['name'],
description=testvip['description'],
connection_limit=testvip['connection_limit'],
admin_state_up=testvip['admin_state_up'],
pool_id=testvip['pool_id'])
return expectedvip
def _build_testpool_contents(self, subnet):
pool_obj = dict(id=TESTPOOL_ID,
name='testpool',
description='a test pool',
tenant_id=self._tenant_id,
subnet_id=subnet['id'],
protocol='HTTP',
vip_id=None,
admin_state_up=True,
lb_method='ROUND_ROBIN',
status='PENDING_CREATE',
status_description='',
members=[],
health_monitors=[],
health_monitors_status=None,
provider=LBAAS_PROVIDER_NAME)
return pool_obj
def _build_expectedpool_contents(self, testpool, subnet):
expectedpool = dict(id=testpool['id'],
name=testpool['name'],
description=testpool['description'],
tenant_id=testpool['tenant_id'],
subnet_id=testpool['subnet_id'],
network_id=subnet['network_id'],
protocol=testpool['protocol'],
vip_id=testpool['vip_id'],
lb_method=testpool['lb_method'],
snat_ip=TESTPOOL_SNATIP_ADDRESS,
port_id=TESTPOOL_PORT_ID,
admin_state_up=testpool['admin_state_up'])
return expectedpool
def _build_updated_testpool_contents(self, testpool, subnet):
updated_pool = dict(testpool.items())
updated_pool['name'] = 'udpated testpool'
updated_pool['description'] = 'An updated version of test pool'
updated_pool['lb_method'] = 'LEAST_CONNECTIONS'
updated_pool['admin_state_up'] = True
updated_pool['provider'] = LBAAS_PROVIDER_NAME
updated_pool['status'] = 'PENDING_UPDATE'
updated_pool['status_description'] = ''
updated_pool['members'] = []
updated_pool["health_monitors"] = []
updated_pool["health_monitors_status"] = None
return updated_pool
def _build_updated_expectedpool_contents(self, testpool, subnet):
expectedpool = dict(name=testpool['name'],
description=testpool['description'],
lb_method=testpool['lb_method'],
admin_state_up=testpool['admin_state_up'])
return expectedpool
def _build_testmember_contents(self, pool):
member_obj = dict(
id=TESTMEMBER_ID,
tenant_id=self._tenant_id,
pool_id=pool['id'],
address=TESTMEMBER_IP,
protocol_port=8080,
weight=2,
admin_state_up=True,
status='PENDING_CREATE',
status_description='')
return member_obj
def _build_expectedmember_contents(self, testmember):
expectedmember = dict(
id=testmember['id'],
tenant_id=testmember['tenant_id'],
pool_id=testmember['pool_id'],
address=testmember['address'],
protocol_port=testmember['protocol_port'],
weight=testmember['weight'],
admin_state_up=testmember['admin_state_up'])
return expectedmember
def _build_updated_testmember_contents(self, testmember):
updated_member = dict(testmember.items())
updated_member.update(
weight=3,
admin_state_up=True,
status='PENDING_CREATE',
status_description=''
)
return updated_member
def _build_updated_expectedmember_contents(self, testmember):
expectedmember = dict(weight=testmember['weight'],
pool_id=testmember['pool_id'],
admin_state_up=testmember['admin_state_up'])
return expectedmember
def _build_testhealthmonitor_contents(self, pool):
monitor_obj = dict(
id=TESTMONITOR_ID,
tenant_id=self._tenant_id,
type='TCP',
delay=10,
timeout=5,
max_retries=3,
admin_state_up=True,
pools=[])
pool_obj = dict(status='PENDING_CREATE',
status_description=None,
pool_id=pool['id'])
monitor_obj['pools'].append(pool_obj)
return monitor_obj
def _build_expectedhealthmonitor_contents(self, testhealthmonitor):
expectedmonitor = dict(id=testhealthmonitor['id'],
tenant_id=testhealthmonitor['tenant_id'],
type=testhealthmonitor['type'],
delay=testhealthmonitor['delay'],
timeout=testhealthmonitor['timeout'],
max_retries=testhealthmonitor['max_retries'],
admin_state_up=(
testhealthmonitor['admin_state_up']))
return expectedmonitor
def _build_updated_testhealthmonitor_contents(self, testmonitor):
updated_monitor = dict(testmonitor.items())
updated_monitor.update(
delay=30,
timeout=3,
max_retries=5,
admin_state_up=True
)
return updated_monitor
def _build_updated_expectedhealthmonitor_contents(self, testmonitor):
expectedmonitor = dict(delay=testmonitor['delay'],
timeout=testmonitor['timeout'],
max_retries=testmonitor['max_retries'],
admin_state_up=testmonitor['admin_state_up'])
return expectedmonitor
def _mock_update_status(self):
#patch the plugin's update_status() method with a mock object
self.mock_update_status_patcher = mock.patch.object(
self.driver.plugin,
'update_status')
self.mock_update_status_obj = self.mock_update_status_patcher.start()
def mock_create_resource_func(*args, **kwargs):
return 201, {}
def mock_update_resource_func(*args, **kwargs):
return 202, {}
def mock_retrieve_resource_func(*args, **kwargs):
return 200, {}
def mock_remove_resource_func(*args, **kwargs):
return 200, {}

View File

@ -1,982 +0,0 @@
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import mock
from neutron import context
from neutron import manager
from neutron.plugins.common import constants
from neutron_lib import constants as n_constants
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import queue as Queue
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.services.loadbalancer.drivers.radware import driver
from neutron_lbaas.services.loadbalancer.drivers.radware \
import exceptions as r_exc
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
SERVER_DOWN_CODES = (-1, 301, 307)
class QueueMock(Queue.Queue):
def __init__(self, completion_handler):
self.completion_handler = completion_handler
super(QueueMock, self).__init__()
def put_nowait(self, oper):
self.completion_handler(oper)
def _recover_function_mock(action, resource, data, headers, binary=False):
pass
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_description', None
if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
return val, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
data = jsonutils.loads('[]')
else:
data = jsonutils.loads(
'[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
)
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = jsonutils.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 404, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = jsonutils.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron_lbaas.services.'
'loadbalancer.drivers.radware.driver.'
'LoadBalancerDriver:default')
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerPlugin, self).setUp()
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': False})
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': 200})
self.operation_completer_start_mock = mock.Mock(
return_value=None)
self.operation_completer_join_mock = mock.Mock(
return_value=None)
self.driver_rest_call_mock = mock.Mock(
side_effect=rest_call_function_mock)
self.flip_servers_mock = mock.Mock(
return_value=None)
self.recover_mock = mock.Mock(
side_effect=_recover_function_mock)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.completion_handler.start = (
self.operation_completer_start_mock)
radware_driver.completion_handler.join = (
self.operation_completer_join_mock)
self.orig_call = radware_driver.rest_client.call
self.orig__call = radware_driver.rest_client._call
radware_driver.rest_client.call = self.driver_rest_call_mock
radware_driver.rest_client._call = self.driver_rest_call_mock
radware_driver.rest_client._flip_servers = self.flip_servers_mock
radware_driver.rest_client._recover = self.recover_mock
radware_driver.completion_handler.rest_client.call = (
self.driver_rest_call_mock)
radware_driver.queue = QueueMock(
radware_driver.completion_handler.handle_operation_completion)
self.addCleanup(radware_driver.completion_handler.join)
def test_get_pip(self):
"""Call _get_pip twice and verify that a Port is created once."""
port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10',
'ip_address': '11.11.11.11'}]}
port_data = {
'tenant_id': 'tenant_id',
'name': 'port_name',
'network_id': 'network_id',
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': 'neutron:' + constants.LOADBALANCER,
'fixed_ips': [{'subnet_id': '10.10.10.10'}]
}
self.plugin_instance._core_plugin.get_ports = mock.Mock(
return_value=[])
self.plugin_instance._core_plugin.create_port = mock.Mock(
return_value=port_dict)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.plugin_instance._core_plugin.create_port.assert_called_once_with(
mock.ANY, {'port': port_data})
self.plugin_instance._core_plugin.create_port.reset_mock()
self.plugin_instance._core_plugin.get_ports.reset_mock()
self.plugin_instance._core_plugin.get_ports.return_value = [port_dict]
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.assertFalse(self.plugin_instance._core_plugin.create_port.called)
def test_rest_client_recover_was_called(self):
"""Call the real REST client and verify _recover is called."""
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.orig_call
radware_driver.rest_client._call = self.orig__call
self.assertRaises(r_exc.RESTRequestFailure,
radware_driver._verify_workflow_templates)
self.recover_mock.assert_called_once_with('GET',
'/api/workflowTemplate',
None, None, False)
def test_rest_client_flip_servers(self):
radware_driver = self.plugin_instance.drivers['radware']
server = radware_driver.rest_client.server
sec_server = radware_driver.rest_client.secondary_server
radware_driver.rest_client._flip_servers()
self.assertEqual(server,
radware_driver.rest_client.secondary_server)
self.assertEqual(sec_server,
radware_driver.rest_client.server)
def test_verify_workflow_templates_server_down(self):
"""Test the rest call failure when backend is down."""
for value in SERVER_DOWN_CODES:
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': value})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
self.assertRaises(r_exc.WorkflowMissing,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.pool(do_delete=False,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
mock.call('GET', u'/api/service/srv_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', u'/api/service?name=srv_' +
subnet['subnet']['network_id'] + '&tenant=' +
vip['tenant_id'], mock.ANY,
driver.CREATE_SERVICE_HEADER),
mock.call('GET', u'/api/workflow/l2_l3_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', '/api/workflow/l2_l3_' +
subnet['subnet']['network_id'] +
'/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls,
any_order=True)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(constants.ACTIVE, new_vip['status'])
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_vip_2_leg(self):
"""Test creation of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_sub:
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
name_suffix = '%s_%s' % (subnet['subnet']['network_id'],
pool_sub['subnet']['network_id'])
# Test creation REST calls
calls = [
mock.call('GET', '/api/workflowTemplate', None, None),
mock.call('GET', '/api/service/srv_' + name_suffix,
None, None),
mock.call('POST', '/api/service?name=srv_' +
name_suffix + '&tenant=' + vip['tenant_id'],
mock.ANY, driver.CREATE_SERVICE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('GET', '/api/workflow/l2_l3_' + name_suffix,
None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + name_suffix,
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/l2_l3_' +
name_suffix + '/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(constants.ACTIVE, new_vip['status'])
# Test that PIP neutron port was created
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports > 0)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
vip_data['status'] = constants.PENDING_UPDATE
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(constants.ACTIVE, updated_vip['status'])
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_update_vip_2_leg(self):
"""Test update of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(calls)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(constants.ACTIVE, updated_vip['status'])
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_delete_vip_failure(self):
plugin = self.plugin_instance
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.pool(do_delete=False,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.member(pool_id=pool['pool']['id'],
do_delete=False) as mem1, \
self.member(pool_id=pool['pool']['id'],
address='192.168.1.101',
do_delete=False) as mem2, \
self.health_monitor(do_delete=False) as hm, \
self.vip(pool=pool, subnet=subnet,
do_delete=False) as vip:
plugin.create_pool_health_monitor(
context.get_admin_context(), hm, pool['pool']['id']
)
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
plugin.delete_vip(
context.get_admin_context(), vip['vip']['id'])
u_vip = plugin.get_vip(
context.get_admin_context(), vip['vip']['id'])
u_pool = plugin.get_pool(
context.get_admin_context(), pool['pool']['id'])
u_mem1 = plugin.get_member(
context.get_admin_context(), mem1['member']['id'])
u_mem2 = plugin.get_member(
context.get_admin_context(), mem2['member']['id'])
u_phm = plugin.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id'])
self.assertEqual(constants.ERROR, u_vip['status'])
self.assertEqual(constants.ACTIVE, u_pool['status'])
self.assertEqual(constants.ACTIVE, u_mem1['status'])
self.assertEqual(constants.ACTIVE, u_mem2['status'])
self.assertEqual(constants.ACTIVE, u_phm['status'])
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_delete_vip_2_leg(self):
"""Test deletion of a VIP where Alteon VIP and PIP are different."""
self.driver_rest_call_mock.reset_mock()
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
# Test that PIP neutron port was deleted
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports == 0)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_update_pool(self):
with self.subnet():
with self.pool() as pool:
del pool['pool']['provider']
del pool['pool']['status']
self.plugin_instance.update_pool(
context.get_admin_context(),
pool['pool']['id'], pool)
pool_db = self.plugin_instance.get_pool(
context.get_admin_context(), pool['pool']['id'])
self.assertEqual(constants.PENDING_UPDATE, pool_db['status'])
def test_delete_pool_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
context.get_admin_context(),
pool['pool']['id'])
def test_create_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.vip(pool=p, subnet=subnet):
with self.member(pool_id=p['pool']['id']):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_member_on_different_subnets(self):
with self.subnet() as vip_sub, \
self.subnet(cidr='20.0.0.0/24') as pool_sub, \
self.subnet(cidr='30.0.0.0/24') as member_sub, \
self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool, \
self.port(subnet=vip_sub,
fixed_ips=[{'ip_address': '10.0.0.2'}]), \
self.port(subnet=pool_sub,
fixed_ips=[{'ip_address': '20.0.0.2'}]), \
self.port(subnet=member_sub,
fixed_ips=[{'ip_address': '30.0.0.2'}]), \
self.member(pool_id=pool['pool']['id'], address='10.0.0.2'), \
self.member(pool_id=pool['pool']['id'], address='20.0.0.2'), \
self.member(pool_id=pool['pool']['id'], address='30.0.0.2'), \
self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
mock_calls = self.driver_rest_call_mock.mock_calls
params = mock_calls[-2][1][2]['parameters']
member_subnet_array = params['member_subnet_array']
member_mask_array = params['member_mask_array']
member_gw_array = params['member_gw_array']
self.assertEqual(['10.0.0.0',
'255.255.255.255',
'30.0.0.0'],
member_subnet_array)
self.assertEqual(['255.255.255.0',
'255.255.255.255',
'255.255.255.0'],
member_mask_array)
self.assertEqual(
[pool_sub['subnet']['gateway_ip'],
'255.255.255.255',
pool_sub['subnet']['gateway_ip']],
member_gw_array)
def test_create_member_on_different_subnet_no_port(self):
with self.subnet() as vip_sub, \
self.subnet(cidr='20.0.0.0/24') as pool_sub, \
self.subnet(cidr='30.0.0.0/24'), \
self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool, \
self.member(pool_id=pool['pool']['id'],
address='30.0.0.2'), \
self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
mock_calls = self.driver_rest_call_mock.mock_calls
params = mock_calls[-2][1][2]['parameters']
member_subnet_array = params['member_subnet_array']
member_mask_array = params['member_mask_array']
member_gw_array = params['member_gw_array']
self.assertEqual(['30.0.0.2'],
member_subnet_array)
self.assertEqual(['255.255.255.255'],
member_mask_array)
self.assertEqual([pool_sub['subnet']['gateway_ip']],
member_gw_array)
def test_create_member_on_different_subnet_multiple_ports(self):
cfg.CONF.set_override("allow_overlapping_ips", 'true')
with self.network() as other_net, \
self.subnet() as vip_sub, \
self.subnet(cidr='20.0.0.0/24') as pool_sub, \
self.subnet(cidr='30.0.0.0/24') as member_sub1, \
self.subnet(network=other_net, cidr='30.0.0.0/24') as member_sub2, \
self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool, \
self.port(subnet=member_sub1,
fixed_ips=[{'ip_address': '30.0.0.2'}]), \
self.port(subnet=member_sub2,
fixed_ips=[{'ip_address': '30.0.0.2'}]), \
self.member(pool_id=pool['pool']['id'],
address='30.0.0.2'), \
self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
calls = self.driver_rest_call_mock.mock_calls
params = calls[-2][1][2]['parameters']
m_sub_array = params['member_subnet_array']
m_mask_array = params['member_mask_array']
m_gw_array = params['member_gw_array']
self.assertEqual(['30.0.0.2'],
m_sub_array)
self.assertEqual(['255.255.255.255'],
m_mask_array)
self.assertEqual(
[pool_sub['subnet']['gateway_ip']],
m_gw_array)
def test_update_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(constants.ACTIVE,
updated_member['status'])
def test_update_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
self.assertEqual(constants.PENDING_UPDATE,
updated_member['status'])
def test_delete_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id'],
do_delete=False) as m:
with self.vip(pool=p, subnet=subnet):
# Reset mock and
# wait for being sure the member
# Changed status from PENDING-CREATE
# to ACTIVE
self.plugin_instance.delete_member(
context.get_admin_context(),
m['member']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'member_address_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_delete_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'],
do_delete=False) as m:
self.plugin_instance.delete_member(
context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_create_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(constants.ACTIVE, phm['status'])
def test_delete_pool_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor(do_delete=False) as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
self.plugin_instance.delete_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'hm_uuid_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(
loadbalancer.PoolMonitorAssociationNotFound,
self.plugin_instance.get_pool_health_monitor,
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)

View File

@ -1,735 +0,0 @@
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.db import servicetype_db as st_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants
from neutron.tests.unit import testlib_api
from oslo_utils import uuidutils
import six
from six import moves
from webob import exc
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.services.loadbalancer.drivers.common \
import agent_driver_base
from neutron_lbaas.tests import base
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
def reset_device_driver():
agent_driver_base.AgentDriverBase.device_driver = None
self.addCleanup(reset_device_driver)
self.mock_importer = mock.patch.object(
agent_driver_base, 'importutils').start()
# needed to reload provider configuration
st_db.ServiceTypeManager._instance = None
agent_driver_base.AgentDriverBase.device_driver = 'dummy'
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=('LOADBALANCER:lbaas:neutron_lbaas.services.'
'loadbalancer.drivers.common.agent_driver_base.'
'AgentDriverBase:default'))
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = agent_driver_base.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron_lbaas.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in moves.range(3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(3, ctx.session.query(ldb.Pool).count())
self.assertEqual(2, ctx.session.query(ldb.Vip).count())
with mock.patch('neutron_lbaas.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(3, len(ready))
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_non_active(self):
with self.pool() as pool:
ctx = context.get_admin_context()
for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], status)
pool['pool']['status'] = status
expected = {
'pool': pool['pool'],
'members': [],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id']
)
self.assertEqual(expected, logical_config)
def test_get_logical_device_active(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# activate objects
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Member, member['member']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id']
)
self.assertEqual(expected, logical_config)
def test_get_logical_device_inactive_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Member,
member['member']['id'],
'INACTIVE')
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
member['member']['status'] = constants.INACTIVE
self.assertEqual([member['member']],
logical_config['members'])
def test_get_logical_device_pending_create_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
member = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE',
member['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
self.assertEqual([member], logical_config['members'])
def test_get_logical_device_pending_create_health_monitor(self):
with self.health_monitor() as monitor:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.create_pool_health_monitor(
ctx, monitor, pool['pool']['id'])
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id'])
monitor = self.plugin_instance.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.assertEqual(
'PENDING_CREATE',
pool['health_monitors_status'][0]['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'])
self.assertEqual([monitor],
logical_config['healthmonitors'])
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in six.iteritems(expected):
self.assertEqual(v, db_port[k])
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_plug_vip_port_mock_with_host(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'admin_state_up': True,
portbindings.HOST_ID: 'host'
}
with mock.patch.object(
self.plugin._core_plugin, 'update_port') as mock_update_port:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.callbacks.plug_vip_port(
ctx, port_id=vip['vip']['port_id'], host='host')
mock_update_port.assert_called_once_with(
ctx, vip['vip']['port_id'],
{'port': testlib_api.SubDictMatch(exp)})
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
def test_pool_deployed(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('PENDING_CREATE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('PENDING_CREATE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE', m['status'])
self.callbacks.pool_deployed(ctx, pool['pool']['id'])
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('ACTIVE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('ACTIVE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('ACTIVE', m['status'])
def test_update_status_pool(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('PENDING_CREATE', p['status'])
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('ACTIVE', p['status'])
def test_update_status_pool_deleted_already(self):
with mock.patch.object(agent_driver_base, 'LOG') as mock_log:
pool_id = 'deleted_pool'
ctx = context.get_admin_context()
self.assertRaises(loadbalancer.PoolNotFound,
self.plugin_instance.get_pool, ctx, pool_id)
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
self.assertTrue(mock_log.warning.called)
def test_update_status_health_monitor(self):
with self.health_monitor() as hm, \
self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
hm_id = hm['health_monitor']['id']
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('PENDING_CREATE', h['status'])
self.callbacks.update_status(
ctx, 'health_monitor',
{'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('ACTIVE', h['status'])
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.api = agent_driver_base.LoadBalancerAgentApi('topic')
def test_init(self):
self.assertEqual('topic', self.api.client.target.topic)
def _call_test_helper(self, method_name, method_args):
with mock.patch.object(self.api.client, 'cast') as rpc_mock, \
mock.patch.object(self.api.client, 'prepare') as prepare_mock:
prepare_mock.return_value = self.api.client
getattr(self.api, method_name)(mock.sentinel.context,
host='host',
**method_args)
prepare_args = {'server': 'host'}
prepare_mock.assert_called_once_with(**prepare_args)
if method_name == 'agent_updated':
method_args = {'payload': method_args}
rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
**method_args)
def test_agent_updated(self):
self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
def test_create_pool(self):
self._call_test_helper('create_pool', {'pool': 'test',
'driver_name': 'dummy'})
def test_update_pool(self):
self._call_test_helper('update_pool', {'old_pool': 'test',
'pool': 'test'})
def test_delete_pool(self):
self._call_test_helper('delete_pool', {'pool': 'test'})
def test_create_vip(self):
self._call_test_helper('create_vip', {'vip': 'test'})
def test_update_vip(self):
self._call_test_helper('update_vip', {'old_vip': 'test',
'vip': 'test'})
def test_delete_vip(self):
self._call_test_helper('delete_vip', {'vip': 'test'})
def test_create_member(self):
self._call_test_helper('create_member', {'member': 'test'})
def test_update_member(self):
self._call_test_helper('update_member', {'old_member': 'test',
'member': 'test'})
def test_delete_member(self):
self._call_test_helper('delete_member', {'member': 'test'})
def test_create_monitor(self):
self._call_test_helper('create_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
def test_update_monitor(self):
self._call_test_helper('update_pool_health_monitor',
{'old_health_monitor': 'test',
'health_monitor': 'test',
'pool_id': 'test'})
def test_delete_monitor(self):
self._call_test_helper('delete_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(agent_driver_base, 'LOG')
api_cls = mock.patch.object(agent_driver_base,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.mock_get_driver = mock.patch.object(self.plugin_instance,
'_get_driver')
self.mock_get_driver.return_value = (agent_driver_base.
AgentDriverBase(
self.plugin_instance
))
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.create_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
ctx = context.get_admin_context()
old_vip = vip['vip'].copy()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.update_vip.assert_called_once_with(
mock.ANY,
old_vip,
new_vip,
'host'
)
self.assertEqual(
constants.PENDING_UPDATE,
new_vip['status']
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet,
do_delete=False) as vip:
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
vip['vip']['status'] = 'PENDING_DELETE'
self.mock_api.delete_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_create_pool(self):
with self.pool() as pool:
self.mock_api.create_pool.assert_called_once_with(
mock.ANY,
pool['pool'],
mock.ANY,
'dummy'
)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, orig_pool, 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, orig_pool, updated, 'host')
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
old_pool = pool['pool'].copy()
old_pool['vip_id'] = vip['vip']['id']
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, old_pool, updated, 'host')
def test_delete_pool(self):
with self.pool(do_delete=False) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
pool['pool']['status'] = 'PENDING_DELETE'
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, pool['pool'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
self.mock_api.create_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.update_member.assert_called_once_with(
mock.ANY, member['member'], updated, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
self.mock_api.create_member.reset_mock()
ctx = context.get_admin_context()
old_member = member['member'].copy()
member['member']['pool_id'] = pool2_id
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, old_member, 'host')
self.mock_api.create_member.assert_called_once_with(
mock.ANY, updated, 'host')
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
do_delete=False) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
member['member']['status'] = 'PENDING_DELETE'
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_create_pool_health_monitor(self):
with self.health_monitor() as hm, \
self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with self.pool() as pool, \
self.health_monitor() as hm:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
hm['pools'][0]['status'] = 'PENDING_DELETE'
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['id'], pool_id)
self.mock_api.delete_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with self.health_monitor(type='HTTP') as monitor, \
self.pool() as pool:
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
# hm now has a ref to the pool with which it is associated
ctx = context.get_admin_context()
hm = self.plugin.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
updated = hm.copy()
updated.update(data['health_monitor'])
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.update_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
updated,
pool['pool']['id'],
'host')

View File

@ -1,160 +0,0 @@
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from neutron import context as ncontext
from neutron.plugins.common import constants
from neutron_lib import constants as n_constants
from neutron_lbaas.drivers import driver_mixins
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
class DummyManager(driver_mixins.BaseManagerMixin):
def __init__(self, driver):
super(DummyManager, self).__init__(driver)
self.driver = driver
self._db_delete_method = None
@property
def db_delete_method(self):
return self._db_delete_method
def delete(self, context, obj):
pass
def update(self, context, obj_old, obj):
pass
def create(self, context, obj):
pass
class TestBaseManager(test_db_loadbalancerv2.LbaasPluginDbTestCase):
def _setup_db_data(self, context):
hm = self.plugin.db.create_healthmonitor(
context, {'admin_state_up': True,
'type': lb_const.HEALTH_MONITOR_HTTP,
'delay': 1, 'timeout': 1, 'max_retries': 1})
lb = self.plugin.db.create_loadbalancer(
context, {'vip_address': '10.0.0.1',
'vip_subnet_id': self.subnet_id,
'admin_state_up': True})
pool = self.plugin.db.create_pool(
context, {'protocol': lb_const.PROTOCOL_HTTP,
'session_persistence': None,
'lb_algorithm': lb_const.LB_METHOD_ROUND_ROBIN,
'admin_state_up': True, 'healthmonitor_id': hm.id,
'loadbalancer_id': lb.id})
self.plugin.db.create_pool_member(
context, {'address': '10.0.0.1', 'protocol_port': 80,
'admin_state_up': True}, pool.id)
listener = self.plugin.db.create_listener(
context, {'protocol_port': 80, 'protocol': lb_const.PROTOCOL_HTTP,
'admin_state_up': True, 'loadbalancer_id': lb.id,
'default_pool_id': pool.id, 'sni_container_ids': []})
return listener
def setUp(self):
super(TestBaseManager, self).setUp()
self.context = ncontext.get_admin_context()
self.driver = mock.Mock()
self.driver.plugin = self.plugin
self.manager = DummyManager(self.driver)
network = self._make_network(self.fmt, 'test-net', True)
self.subnet = self._make_subnet(
self.fmt, network, gateway=n_constants.ATTR_NOT_SPECIFIED,
cidr='10.0.0.0/24')
self.subnet_id = self.subnet['subnet']['id']
self.listener = self._setup_db_data(self.context)
class TestLBManager(TestBaseManager):
def setUp(self):
super(TestLBManager, self).setUp()
self.manager._db_delete_method = self.plugin.db.delete_loadbalancer
def test_success_completion(self):
self.manager.successful_completion(self.context,
self.listener.loadbalancer)
lb = self.plugin.db.get_loadbalancer(self.context,
self.listener.loadbalancer.id)
self.assertEqual(constants.ACTIVE, lb.provisioning_status)
self.assertEqual(lb_const.ONLINE, lb.operating_status)
def test_success_completion_delete(self):
self.plugin.db.delete_listener(self.context, self.listener.id)
self.manager.successful_completion(self.context,
self.listener.loadbalancer,
delete=True)
self.assertRaises(loadbalancerv2.EntityNotFound,
self.plugin.db.get_loadbalancer,
self.context,
self.listener.loadbalancer.id)
def test_failed_completion(self):
self.manager.failed_completion(self.context,
self.listener.loadbalancer)
lb = self.plugin.db.get_loadbalancer(self.context,
self.listener.loadbalancer.id)
self.assertEqual(constants.ERROR, lb.provisioning_status)
self.assertEqual(lb_const.OFFLINE, lb.operating_status)
listener = self.plugin.db.get_listener(self.context, self.listener.id)
self.assertEqual(constants.PENDING_CREATE,
listener.provisioning_status)
self.assertEqual(lb_const.OFFLINE, listener.operating_status)
class TestListenerManager(TestBaseManager):
"""This should also cover Pool, Member, and Health Monitor cases."""
def setUp(self):
super(TestListenerManager, self).setUp()
self.manager._db_delete_method = self.plugin.db.delete_listener
def test_success_completion(self):
self.manager.successful_completion(self.context, self.listener)
listener = self.plugin.db.get_listener(self.context, self.listener.id)
self.assertEqual(constants.ACTIVE, listener.provisioning_status)
self.assertEqual(lb_const.ONLINE, listener.operating_status)
self.assertEqual(constants.ACTIVE,
listener.loadbalancer.provisioning_status)
# because the load balancer's original operating status was OFFLINE
self.assertEqual(lb_const.OFFLINE,
listener.loadbalancer.operating_status)
def test_success_completion_delete(self):
self.manager.successful_completion(self.context,
self.listener,
delete=True)
self.assertRaises(loadbalancerv2.EntityNotFound,
self.plugin.db.get_listener,
self.context,
self.listener.loadbalancer.id)
def test_failed_completion(self):
self.manager.failed_completion(self.context, self.listener)
lb = self.plugin.db.get_loadbalancer(self.context,
self.listener.loadbalancer.id)
self.assertEqual(constants.ACTIVE, lb.provisioning_status)
self.assertEqual(lb_const.OFFLINE, lb.operating_status)
listener = self.plugin.db.get_listener(self.context, self.listener.id)
self.assertEqual(constants.ERROR, listener.provisioning_status)
self.assertEqual(lb_const.OFFLINE, listener.operating_status)

View File

@ -1,476 +0,0 @@
# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.plugins.common import constants
from neutron import manager
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.services.loadbalancer.drivers.vmware import db
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
EDGE_PROVIDER = ('LOADBALANCER:vmwareedge:neutron_lbaas.services.'
'loadbalancer.drivers.vmware.edge_driver.'
'EdgeLoadbalancerDriver:default')
HEALTHMON_ID = 'cb297614-66c9-4048-8838-7e87231569ae'
POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b'
TENANT_ID = 'f9135d3a908842bd8d785816c2c90d36'
SUBNET_ID = 'c8924d77-ff57-406f-a13c-a8c5def01fc9'
VIP_ID = 'f6393b95-34b0-4299-9001-cbc21e32bf03'
VIP_PORT_ID = '49c547e3-6775-42ea-a607-91e8f1a07432'
MEMBER_ID = '90dacafd-9c11-4af7-9d89-234e2d1fedb1'
EDGE_ID = 'edge-x'
EDGE_POOL_ID = '111'
EDGE_VSE_ID = '222'
APP_PROFILE_ID = '333'
EDGE_MON_ID = '444'
EDGE_FW_RULE_ID = '555'
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=EDGE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.service_plugin = loaded_plugins[constants.LOADBALANCER]
self.edge_driver = self.service_plugin.drivers['vmwareedge']
self.service_plugin._core_plugin.nsx_v = mock.Mock()
class TestEdgeLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestEdgeLoadBalancerPlugin, self).setUp()
self.context = context.get_admin_context()
def test_create_pool_successful(self):
pool = {'id': POOL_ID}
with mock.patch.object(db,
'add_nsxv_edge_pool_mapping') as mock_add_pool, \
mock.patch.object(self.edge_driver,
'pool_successful') as mock_pool_successful:
self.edge_driver.create_pool_successful(self.context,
pool,
EDGE_ID, EDGE_POOL_ID)
mock_add_pool.assert_called_with(self.context, POOL_ID, EDGE_ID,
EDGE_POOL_ID)
mock_pool_successful.assert_called_with(self.context, pool)
def test_delete_pool_successful(self):
pool = {'id': POOL_ID}
with mock.patch.object(self.service_plugin,
'_delete_db_pool') as mock_del_db_pool, \
mock.patch.object(db, 'delete_nsxv_edge_pool_mapping') as \
mock_del_mapping:
self.edge_driver.delete_pool_successful(self.context, pool)
mock_del_db_pool.assert_called_with(self.context, POOL_ID)
mock_del_mapping.assert_called_with(self.context, POOL_ID)
def test_pool_successful(self):
pool = {'id': POOL_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.pool_successful(self.context, pool)
mock_update_status.assert_called_with(self.context, lb_db.Pool,
pool['id'], constants.ACTIVE)
def test_pool_failed(self):
pool = {'id': POOL_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.pool_failed(self.context, pool)
mock_update_status.assert_called_with(self.context, lb_db.Pool,
pool['id'], constants.ERROR)
def test_create_pool(self):
lbaas_pool = {
'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP', 'description': '', 'health_monitors': [],
'members': [], 'status_description': None, 'id': POOL_ID,
'vip_id': None, 'name': 'testpool', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'health_monitors_status': [], 'provider': 'vmwareedge'}
with mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'create_pool') as mock_create_pool:
self.edge_driver.create_pool(self.context, lbaas_pool)
mock_create_pool.assert_called_with(self.context, lbaas_pool)
def test_update_pool(self):
from_pool = {
'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP', 'description': '', 'health_monitors': [],
'members': [], 'status_description': None, 'id': POOL_ID,
'vip_id': None, 'name': 'testpool2', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'health_monitors_status': [], 'provider': 'vmwareedge'}
to_pool = {
'status': 'PENDING_UPDATE', 'lb_method': 'LEAST_CONNECTIONS',
'protocol': 'HTTP', 'description': '', 'health_monitors': [],
'members': [], 'status_description': None, 'id': POOL_ID,
'vip_id': None, 'name': 'testpool2', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'health_monitors_status': [], 'provider': 'vmwareedge'}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_'
'mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'update_pool') as mock_update_pool:
mock_get_mapping.return_value = mapping
self.edge_driver.update_pool(self.context, from_pool, to_pool)
mock_update_pool.assert_called_with(self.context, from_pool,
to_pool, mapping)
def test_delete_pool(self):
lbaas_pool = {
'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP', 'description': '', 'health_monitors': [],
'members': [], 'status_description': None, 'id': POOL_ID,
'vip_id': None, 'name': 'testpool', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'health_monitors_status': [], 'provider': 'vmwareedge'}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_'
'mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin, 'get_pool',
return_value={}), \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'delete_pool') as mock_delete_pool:
mock_get_mapping.return_value = mapping
self.edge_driver.delete_pool(self.context, lbaas_pool)
mock_delete_pool.assert_called_with(self.context, lbaas_pool,
mapping)
def test_create_vip_successful(self):
vip = {'pool_id': POOL_ID}
with mock.patch.object(db, 'add_nsxv_edge_vip_'
'mapping') as mock_add_vip_mapping, \
mock.patch.object(self.edge_driver,
'vip_successful') as mock_vip_successful:
self.edge_driver.create_vip_successful(
self.context, vip, EDGE_ID, APP_PROFILE_ID, EDGE_VSE_ID,
EDGE_FW_RULE_ID)
mock_add_vip_mapping.assert_called_with(
self.context, POOL_ID, EDGE_ID, APP_PROFILE_ID,
EDGE_VSE_ID, EDGE_FW_RULE_ID)
mock_vip_successful.assert_called_with(self.context, vip)
def test_delete_vip_successful(self):
vip = {'pool_id': POOL_ID, 'id': VIP_ID}
with mock.patch.object(db, 'delete_nsxv_edge_vip_'
'mapping') as mock_del_vip_mapping, \
mock.patch.object(self.service_plugin,
'_delete_db_vip') as mock_del_vip:
self.edge_driver.delete_vip_successful(self.context, vip)
mock_del_vip_mapping.assert_called_with(self.context, POOL_ID)
mock_del_vip.assert_called_with(self.context, VIP_ID)
def test_vip_successful(self):
vip = {'pool_id': POOL_ID, 'id': VIP_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.vip_successful(self.context, vip)
mock_update_status.assert_called_with(
self.context, lb_db.Vip, VIP_ID, constants.ACTIVE)
def test_vip_failed(self):
vip = {'pool_id': POOL_ID, 'id': VIP_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.vip_failed(self.context, vip)
mock_update_status.assert_called_with(
self.context, lb_db.Vip, VIP_ID, constants.ERROR)
def test_create_vip(self):
lbaas_vip = {
'status': 'PENDING_CREATE', 'protocol': 'HTTP',
'description': '', 'address': '10.0.0.8', 'protocol_port': 555,
'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None,
'name': 'testvip1', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'connection_limit': -1, 'pool_id': POOL_ID,
'session_persistence': {'type': 'SOURCE_IP'}}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'create_vip') as mock_create_vip:
mock_get_mapping.return_value = mapping
self.edge_driver.create_vip(self.context, lbaas_vip)
mock_create_vip.assert_called_with(self.context, lbaas_vip,
mapping)
def test_update_vip(self):
vip_from = {
'status': 'ACTIVE', 'protocol': 'HTTP', 'description': '',
'address': '10.0.0.8', 'protocol_port': 555,
'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None,
'name': 'testvip1', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'connection_limit': -1, 'pool_id': POOL_ID,
'session_persistence': {'type': 'SOURCE_IP'}}
vip_to = {
'status': 'PENDING_UPDATE', 'protocol': 'HTTP',
'description': '', 'address': '10.0.0.8', 'protocol_port': 555,
'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None,
'name': 'testvip1', 'admin_state_up': True,
'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID,
'connection_limit': -1, 'pool_id': POOL_ID,
'session_persistence': {'type': 'HTTP_COOKIE'}}
pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
vip_mapping = {'edge_id': EDGE_ID, 'edge_vse_id': EDGE_VSE_ID,
'edge_app_profile_id': APP_PROFILE_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_mapping') as mock_get_pool_mapping, \
mock.patch.object(db, 'get_nsxv_edge_vip_mapping') as mock_get_vip_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'update_vip') as mock_upd_vip:
mock_get_pool_mapping.return_value = pool_mapping
mock_get_vip_mapping.return_value = vip_mapping
self.edge_driver.update_vip(self.context, vip_from, vip_to)
mock_upd_vip.assert_called_with(self.context, vip_from, vip_to,
pool_mapping, vip_mapping)
def test_delete_vip(self):
lbaas_vip = {
'status': 'PENDING_DELETE', 'protocol': 'HTTP',
'description': '', 'address': '10.0.0.11', 'protocol_port': 555,
'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None,
'name': 'testvip', 'admin_state_up': True, 'subnet_id': SUBNET_ID,
'tenant_id': TENANT_ID, 'connection_limit': -1,
'pool_id': POOL_ID, 'session_persistence': None}
mapping = {'edge_id': EDGE_ID, 'edge_vse_id': EDGE_VSE_ID,
'edge_app_profile_id': APP_PROFILE_ID,
'edge_fw_rule_id': EDGE_FW_RULE_ID}
with mock.patch.object(db, 'get_nsxv_edge_vip_mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'delete_vip') as mock_del_vip:
mock_get_mapping.return_value = mapping
self.edge_driver.delete_vip(self.context, lbaas_vip)
mock_del_vip.assert_called_with(self.context, lbaas_vip, mapping)
def test_member_successful(self):
member = {'id': MEMBER_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.member_successful(self.context, member)
mock_update_status.assert_called_with(
self.context, lb_db.Member, member['id'], constants.ACTIVE)
def test_member_failed(self):
member = {'id': MEMBER_ID}
with mock.patch.object(self.service_plugin, 'update_status') as (
mock_update_status):
self.edge_driver.member_failed(self.context, member)
mock_update_status.assert_called_with(
self.context, lb_db.Member, member['id'], constants.ERROR)
def test_create_member(self):
lbaas_member = {
'admin_state_up': True, 'status': 'PENDING_CREATE',
'status_description': None, 'weight': 5, 'address': '10.0.0.4',
'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID,
'pool_id': POOL_ID}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'create_member') as mock_create_member:
mock_get_mapping.return_value = mapping
self.edge_driver.create_member(self.context, lbaas_member)
mock_create_member.assert_called_with(self.context, lbaas_member,
mapping)
def test_update_member(self):
member_from = {
'admin_state_up': True, 'status': 'PENDING_UPDATE',
'status_description': None, 'weight': 5, 'address': '10.0.0.4',
'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID,
'pool_id': POOL_ID}
member_to = {
'admin_state_up': True, 'status': 'ACTIVE',
'status_description': None, 'weight': 10, 'address': '10.0.0.4',
'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID,
'pool_id': POOL_ID}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'update_member') as mock_update_member:
mock_get_mapping.return_value = mapping
self.edge_driver.update_member(self.context, member_from,
member_to)
mock_update_member.assert_called_with(self.context, member_from,
member_to, mapping)
def test_delete_member(self):
lbaas_member = {
'admin_state_up': True, 'status': 'PENDING_DELETE',
'status_description': None, 'weight': 5, 'address': '10.0.0.4',
'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID,
'pool_id': POOL_ID}
mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_mapping') as mock_get_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'delete_member') as mock_delete_member:
mock_get_mapping.return_value = mapping
self.edge_driver.delete_member(self.context, lbaas_member)
mock_delete_member.assert_called_with(self.context, lbaas_member,
mapping)
def test_create_pool_health_monitor_successful(self):
hmon = {'id': HEALTHMON_ID}
with mock.patch.object(db, 'add_nsxv_edge_monitor_'
'mapping') as mock_add_pool_mon_mapping, \
mock.patch.object(self.edge_driver,
'pool_health_monitor_'
'successful') as mock_pool_hmon_successful:
self.edge_driver.create_pool_health_monitor_successful(
self.context, hmon, POOL_ID, EDGE_ID, EDGE_MON_ID)
mock_add_pool_mon_mapping.assert_called_with(
self.context, HEALTHMON_ID, EDGE_ID, EDGE_MON_ID)
mock_pool_hmon_successful.assert_called_with(self.context,
hmon, POOL_ID)
def test_delete_pool_health_monitor_successful(self):
hmon = {'id': HEALTHMON_ID, 'pool_id': POOL_ID}
hmon_mapping = {'edge_id': EDGE_ID}
with mock.patch.object(db, 'delete_nsxv_edge_monitor_'
'mapping') as mock_del_pool_hmon_mapping, \
mock.patch.object(self.service_plugin,
'_delete_db_pool_health_'
'monitor') as mock_del_db_pool_hmon:
self.edge_driver.delete_pool_health_monitor_successful(
self.context, hmon, POOL_ID, hmon_mapping)
mock_del_pool_hmon_mapping.assert_called_with(
self.context, HEALTHMON_ID, EDGE_ID)
mock_del_db_pool_hmon.assert_called_with(
self.context, HEALTHMON_ID, POOL_ID)
def test_pool_health_monitor_successful(self):
hmon = {'id': HEALTHMON_ID}
with mock.patch.object(self.service_plugin,
'update_pool_health_monitor') as (
mock_update_hmon):
self.edge_driver.pool_health_monitor_successful(self.context,
hmon, POOL_ID)
mock_update_hmon.assert_called_with(
self.context, HEALTHMON_ID, POOL_ID, constants.ACTIVE, '')
def test_pool_health_monitor_failed(self):
hmon = {'id': HEALTHMON_ID}
with mock.patch.object(self.service_plugin,
'update_pool_health_monitor') as (
mock_update_hmon):
self.edge_driver.pool_health_monitor_failed(self.context, hmon,
POOL_ID)
mock_update_hmon.assert_called_with(
self.context, HEALTHMON_ID, POOL_ID, constants.ERROR, '')
def test_create_pool_health_monitor(self):
hmon = {
'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5,
'max_retries': 5, 'timeout': 5, 'pools': [
{'status': 'PENDING_CREATE', 'status_description': None,
'pool_id': POOL_ID}],
'type': 'PING', 'id': HEALTHMON_ID}
pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_'
'mapping') as mock_get_pool_mapping, \
mock.patch.object(db, 'get_nsxv_edge_monitor_'
'mapping') as mock_get_mon_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'create_pool_health_'
'monitor') as mock_create_pool_hm:
mock_get_pool_mapping.return_value = pool_mapping
mock_get_mon_mapping.return_value = None
self.edge_driver.create_pool_health_monitor(self.context,
hmon, POOL_ID)
mock_create_pool_hm.assert_called_with(self.context, hmon, POOL_ID,
pool_mapping, None)
def test_update_pool_health_monitor(self):
from_hmon = {
'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5,
'max_retries': 5, 'timeout': 5, 'pools': [
{'status': 'PENDING_UPDATE', 'status_description': None,
'pool_id': POOL_ID}],
'type': 'PING', 'id': HEALTHMON_ID}
to_hmon = {
'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5,
'max_retries': 10, 'timeout': 5, 'pools': [
{'status': 'ACTIVE', 'status_description': None,
'pool_id': POOL_ID}],
'type': 'PING', 'id': HEALTHMON_ID}
pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
mon_mapping = {'edge_id': EDGE_ID, 'edge_monitor_id': EDGE_MON_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_'
'mapping') as mock_get_pool_mapping, \
mock.patch.object(db, 'get_nsxv_edge_monitor_'
'mapping') as mock_get_mon_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'update_pool_health_'
'monitor') as mock_upd_pool_hm:
mock_get_pool_mapping.return_value = pool_mapping
mock_get_mon_mapping.return_value = mon_mapping
self.edge_driver.update_pool_health_monitor(
self.context, from_hmon, to_hmon, POOL_ID)
mock_upd_pool_hm.assert_called_with(
self.context, from_hmon, to_hmon, POOL_ID, mon_mapping)
def test_delete_pool_health_monitor(self):
hmon = {
'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5,
'max_retries': 5, 'timeout': 5, 'pools': [
{'status': 'PENDING_DELETE', 'status_description': None,
'pool_id': POOL_ID}],
'type': 'PING', 'id': HEALTHMON_ID}
pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID}
mon_mapping = {'edge_id': EDGE_ID, 'edge_monitor_id': EDGE_MON_ID}
with mock.patch.object(db, 'get_nsxv_edge_pool_'
'mapping') as mock_get_pool_mapping, \
mock.patch.object(db, 'get_nsxv_edge_monitor_'
'mapping') as mock_get_mon_mapping, \
mock.patch.object(self.service_plugin._core_plugin.nsx_v,
'delete_pool_health_'
'monitor') as mock_del_pool_hm:
mock_get_pool_mapping.return_value = pool_mapping
mock_get_mon_mapping.return_value = mon_mapping
self.edge_driver.delete_pool_health_monitor(self.context, hmon,
POOL_ID)
mock_del_pool_hm.assert_called_with(self.context, hmon, POOL_ID,
pool_mapping, mon_mapping)

View File

@ -1,224 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron import context
from neutron.extensions import agent
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron.tests.common import helpers
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_agentschedulers_db
from neutron.tests.unit.extensions import test_agent
from neutron_lib import constants
from oslo_config import cfg
import six
from webob import exc
from neutron_lbaas.extensions import lbaas_agentscheduler
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.tests import base
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
LBAAS_HOSTA = 'hosta'
class AgentSchedulerTestMixIn(test_agentschedulers_db.AgentSchedulerTestMixIn):
def _list_pools_hosted_by_lbaas_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
lbaas_agentscheduler.LOADBALANCER_POOLS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _get_lbaas_agent_hosting_pool(self, pool_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/lb/pools/%s/%s.%s" % (pool_id,
lbaas_agentscheduler.LOADBALANCER_AGENT,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
class LBaaSAgentSchedulerTestCase(test_agent.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_db_loadbalancer.LoadBalancerTestMixin,
base.NeutronDbPluginV2TestCase):
fmt = 'json'
plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.saved_attr_map[res] = attrs.copy()
service_plugins = {
'lb_plugin_name': test_db_loadbalancer.DB_LB_PLUGIN_KLASS}
# default provider should support agent scheduling
self.set_override([('LOADBALANCER:lbaas:neutron_lbaas.services.'
'loadbalancer.drivers.haproxy.plugin_driver.'
'HaproxyOnHostPluginDriver:default')])
super(LBaaSAgentSchedulerTestCase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_report_states(self):
self._register_agent_states(lbaas_agents=True)
agents = self._list_agents()
self.assertEqual(6, len(agents['agents']))
def test_pool_scheduling_on_pool_creation(self):
self._register_agent_states(lbaas_agents=True)
with self.pool() as pool:
lbaas_agent = self._get_lbaas_agent_hosting_pool(
pool['pool']['id'])
self.assertIsNotNone(lbaas_agent)
self.assertEqual(constants.AGENT_TYPE_LOADBALANCER,
lbaas_agent['agent']['agent_type'])
pools = self._list_pools_hosted_by_lbaas_agent(
lbaas_agent['agent']['id'])
self.assertEqual(1, len(pools['pools']))
self.assertEqual(pool['pool'], pools['pools'][0])
def test_schedule_pool_with_disabled_agent(self):
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
helpers._register_agent(lbaas_hosta)
with self.pool() as pool:
lbaas_agent = self._get_lbaas_agent_hosting_pool(
pool['pool']['id'])
self.assertIsNotNone(lbaas_agent)
agents = self._list_agents()
self._disable_agent(agents['agents'][0]['id'])
pool = {'pool': {'name': 'test',
'subnet_id': 'test',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'admin_state_up': True,
'tenant_id': 'test',
'description': 'test'}}
lbaas_plugin = manager.NeutronManager.get_service_plugins()[
plugin_const.LOADBALANCER]
self.assertRaises(loadbalancer.NoEligibleBackend,
lbaas_plugin.create_pool, self.adminContext, pool)
pools = lbaas_plugin.get_pools(self.adminContext)
self.assertEqual('ERROR', pools[0]['status'])
self.assertEqual('No eligible backend',
pools[0]['status_description'])
def test_schedule_pool_with_down_agent(self):
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
helpers._register_agent(lbaas_hosta)
is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down'
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = False
with self.pool() as pool:
lbaas_agent = self._get_lbaas_agent_hosting_pool(
pool['pool']['id'])
self.assertIsNotNone(lbaas_agent)
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = True
pool = {'pool': {'name': 'test',
'subnet_id': 'test',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'provider': 'lbaas',
'admin_state_up': True,
'tenant_id': 'test',
'description': 'test'}}
lbaas_plugin = manager.NeutronManager.get_service_plugins()[
plugin_const.LOADBALANCER]
self.assertRaises(loadbalancer.NoEligibleBackend,
lbaas_plugin.create_pool,
self.adminContext, pool)
pools = lbaas_plugin.get_pools(self.adminContext)
self.assertEqual('ERROR', pools[0]['status'])
self.assertEqual('No eligible backend',
pools[0]['status_description'])
def test_pool_unscheduling_on_pool_deletion(self):
self._register_agent_states(lbaas_agents=True)
with self.pool(do_delete=False) as pool:
lbaas_agent = self._get_lbaas_agent_hosting_pool(
pool['pool']['id'])
self.assertIsNotNone(lbaas_agent)
self.assertEqual(constants.AGENT_TYPE_LOADBALANCER,
lbaas_agent['agent']['agent_type'])
pools = self._list_pools_hosted_by_lbaas_agent(
lbaas_agent['agent']['id'])
self.assertEqual(1, len(pools['pools']))
self.assertEqual(pool['pool'], pools['pools'][0])
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
pools = self._list_pools_hosted_by_lbaas_agent(
lbaas_agent['agent']['id'])
self.assertEqual(0, len(pools['pools']))
def test_pool_scheduling_non_admin_access(self):
self._register_agent_states(lbaas_agents=True)
with self.pool() as pool:
self._get_lbaas_agent_hosting_pool(
pool['pool']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_pools_hosted_by_lbaas_agent(
'fake_id',
expected_code=exc.HTTPForbidden.code,
admin_context=False)
class LeastPoolAgentSchedulerTestCase(LBaaSAgentSchedulerTestCase):
def setUp(self):
# Setting LeastPoolAgentScheduler as scheduler
cfg.CONF.set_override(
'loadbalancer_pool_scheduler_driver',
'neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LeastPoolAgentScheduler')
super(LeastPoolAgentSchedulerTestCase, self).setUp()

View File

@ -23,7 +23,6 @@ from oslo_utils import uuidutils
from webob import exc
from neutron_lbaas.extensions import healthmonitor_max_retries_down as hm_down
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.extensions import sharedpools
from neutron_lbaas.tests import base
@ -33,461 +32,6 @@ _uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
class TestLoadBalancerExtensionTestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(TestLoadBalancerExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron_lbaas.extensions.loadbalancer.LoadBalancerPluginBase',
constants.LOADBALANCER, loadbalancer.RESOURCE_ATTRIBUTE_MAP,
loadbalancer.Loadbalancer, 'lb', use_quota=True)
def test_vip_create(self):
vip_id = _uuid()
data = {'vip': {'name': 'vip1',
'description': 'descr_vip1',
'subnet_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'protocol': 'HTTP',
'pool_id': _uuid(),
'session_persistence': {'type': 'HTTP_COOKIE'},
'connection_limit': 100,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['vip'])
return_value.update({'status': "ACTIVE", 'id': vip_id})
instance = self.plugin.return_value
instance.create_vip.return_value = return_value
res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vip.assert_called_with(mock.ANY,
vip=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(return_value, res['vip'])
def test_vip_create_with_connection_limit_smaller_than_min_value(self):
data = {'vip': {'name': 'vip1',
'description': 'descr_vip1',
'subnet_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'protocol': 'HTTP',
'pool_id': _uuid(),
'session_persistence': {'type': 'HTTP_COOKIE'},
'connection_limit': -4,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_vip_list(self):
vip_id = _uuid()
return_value = [{'name': 'vip1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': vip_id}]
instance = self.plugin.return_value
instance.get_vips.return_value = return_value
res = self.api.get(_get_path('lb/vips', fmt=self.fmt))
instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_vip_update(self):
vip_id = _uuid()
update_data = {'vip': {'admin_state_up': False}}
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.update_vip.return_value = return_value
res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_vip.assert_called_with(mock.ANY, vip_id,
vip=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(return_value, res['vip'])
def test_vip_update_with_connection_limit_smaller_than_min_value(self):
vip_id = _uuid()
data = {'vip': {'connection_limit': -4}}
res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_vip_get(self):
vip_id = _uuid()
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.get_vip.return_value = return_value
res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt))
instance.get_vip.assert_called_with(mock.ANY, vip_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(return_value, res['vip'])
def test_vip_delete(self):
self._test_entity_delete('vip')
def test_pool_create(self):
pool_id = _uuid()
hm_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'subnet_id': _uuid(),
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': [hm_id],
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['pool'])
return_value['provider'] = 'lbaas'
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
data['pool']['provider'] = n_constants.ATTR_NOT_SPECIFIED
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(return_value, res['pool'])
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lb/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(return_value, res['pool'])
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(return_value, res['pool'])
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_stats(self):
pool_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lb/pools', id=pool_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, pool_id)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(stats['stats'], res['stats'])
def test_member_create(self):
member_id = _uuid()
data = {'member': {'pool_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'status': "ACTIVE", 'id': member_id})
instance = self.plugin.return_value
instance.create_member.return_value = return_value
res = self.api.post(_get_path('lb/members', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_member.assert_called_with(mock.ANY,
member=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(return_value, res['member'])
def test_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_members.return_value = return_value
res = self.api.get(_get_path('lb/members', fmt=self.fmt))
instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_member_update(self):
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.update_member.return_value = return_value
res = self.api.put(_get_path('lb/members', id=member_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_member.assert_called_with(mock.ANY, member_id,
member=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(return_value, res['member'])
def test_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.get_member.return_value = return_value
res = self.api.get(_get_path('lb/members', id=member_id,
fmt=self.fmt))
instance.get_member.assert_called_with(mock.ANY, member_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(return_value, res['member'])
def test_member_delete(self):
self._test_entity_delete('member')
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
instance = self.plugin.return_value
instance.create_health_monitor.return_value = return_value
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_health_monitor.assert_called_with(mock.ANY,
health_monitor=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(return_value, res['health_monitor'])
def test_health_monitor_create_with_timeout_negative(self):
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': -1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_health_monitors.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt))
instance.get_health_monitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'health_monitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_health_monitor.return_value = return_value
res = self.api.put(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, health_monitor=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(return_value, res['health_monitor'])
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_health_monitor.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(return_value, res['health_monitor'])
def test_health_monitor_delete(self):
self._test_entity_delete('health_monitor')
def test_create_pool_health_monitor(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'id': health_monitor_id,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
instance = self.plugin.return_value
instance.create_pool_health_monitor.return_value = return_value
res = self.api.post('/lb/pools/id1/health_monitors',
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool_health_monitor.assert_called_with(
mock.ANY, pool_id='id1', health_monitor=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(return_value, res['health_monitor'])
def test_delete_pool_health_monitor(self):
health_monitor_id = _uuid()
res = self.api.delete('/lb/pools/id1/health_monitors/%s' %
health_monitor_id)
instance = self.plugin.return_value
instance.delete_pool_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, pool_id='id1')
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
class TestLoadBalancerExtensionV2TestCase(base.ExtensionTestCase):
fmt = 'json'

View File

@ -44,10 +44,7 @@ class LBaaSQuotaExtensionDbTestCase(base.QuotaExtensionTestCase):
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['vip'])
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(-1, quota['quota']['extra1'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
@ -61,10 +58,7 @@ class LBaaSQuotaExtensionDbTestCase(base.QuotaExtensionTestCase):
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['vip'])
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
@ -77,10 +71,7 @@ class LBaaSQuotaExtensionDbTestCase(base.QuotaExtensionTestCase):
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['vip'])
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
@ -131,10 +122,7 @@ class LBaaSQuotaExtensionDbTestCase(base.QuotaExtensionTestCase):
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['vip'])
self.assertEqual(100, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(100, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
@ -155,10 +143,7 @@ class LBaaSQuotaExtensionCfgTestCase(base.QuotaExtensionTestCase):
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['vip'])
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(-1, quota['quota']['extra1'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])

View File

@ -32,20 +32,9 @@ setup-hooks =
[entry_points]
console_scripts =
neutron-lbaas-agent = neutron_lbaas.cmd.lbaas_agent:main
neutron-lbaasv2-agent = neutron_lbaas.cmd.lbaasv2_agent:main
device_drivers =
# These are for backwards compat with Juno loadbalancer service provider configuration values
neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver = neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1:ThunderDriver
neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver:HaproxyOnHostPluginDriver
neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver:HaproxyNSDriver
neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver = neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver:NetScalerPluginDriver
neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver = neutron_lbaas.services.loadbalancer.drivers.radware.driver:LoadBalancerDriver
loadbalancer_schedulers =
neutron_lbaas.agent_scheduler.ChanceScheduler = neutron_lbaas.agent_scheduler:ChanceScheduler
pool_schedulers =
neutron.services.loadbalancer.agent_scheduler.ChanceScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:ChanceScheduler
neutron.services.loadbalancer.agent_scheduler.LeastPoolAgentScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:LeastPoolAgentScheduler
neutron.service_plugins =
lbaasv2 = neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
neutron.db.alembic_migrations =