Neutron LBaaS v2 Radware driver implementation
Change-Id: I4af1ef5390bc70692ce363a73bd899fb3add6db3 Implements: blueprint radware-lbaas-driver
This commit is contained in:
parent
9e1d5612c7
commit
442ad83097
|
@ -51,7 +51,8 @@
|
|||
# This is multiline option
|
||||
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
|
||||
service_provider=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
|
||||
# service_provider = LOADBALANCER:Radware:neutron_lbaas.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
|
||||
# service_provider = LOADBALANCER:radware:neutron_lbaas.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
|
||||
# service_provider = LOADBALANCERV2:radwarev2:neutron_lbaas.drivers.radware.v2_driver.RadwareLBaaSV2Driver:default
|
||||
# service_provider=LOADBALANCER:NetScaler:neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
|
||||
# service_provider=LOADBALANCER:Embrane:neutron_lbaas.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
|
||||
# service_provider = LOADBALANCER:A10Networks:neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
|
||||
|
|
|
@ -20,6 +20,32 @@
|
|||
#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
|
||||
#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2
|
||||
|
||||
[radwarev2]
|
||||
#vdirect_address = 0.0.0.0
|
||||
#ha_secondary_address=
|
||||
#vdirect_user = vDirect
|
||||
#vdirect_password = radware
|
||||
#service_ha_pair = False
|
||||
#service_throughput = 1000
|
||||
#service_ssl_throughput = 200
|
||||
#service_compression_throughput = 100
|
||||
#service_cache = 20
|
||||
#service_adc_type = VA
|
||||
#service_adc_version=
|
||||
#service_session_mirroring_enabled = False
|
||||
#service_isl_vlan = -1
|
||||
#service_resource_pool_ids = []
|
||||
#workflow_template_name = os_lb_v2
|
||||
#child_workflow_template_names = [manage_l3]
|
||||
#workflow_params = twoleg_enabled: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True, data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2"
|
||||
#workflow_action_name = apply
|
||||
#stats_action_name = stats
|
||||
|
||||
[radwarev2_debug]
|
||||
#provision_service = True
|
||||
#configure_l3 = True
|
||||
#configure_l4 = True
|
||||
|
||||
[netscaler_driver]
|
||||
#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api
|
||||
#netscaler_ncc_username = admin
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
# Copyright 2015, Radware LTD. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import log as call_log
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron_lbaas.drivers import driver_base
|
||||
|
||||
|
||||
VERSION = "K1.0.0"
|
||||
|
||||
driver_opts = [
|
||||
cfg.StrOpt('vdirect_address',
|
||||
help=_('IP address of vDirect server.')),
|
||||
cfg.StrOpt('ha_secondary_address',
|
||||
help=_('IP address of secondary vDirect server.')),
|
||||
cfg.StrOpt('vdirect_user',
|
||||
default='vDirect',
|
||||
help=_('vDirect user name.')),
|
||||
cfg.StrOpt('vdirect_password',
|
||||
default='radware',
|
||||
help=_('vDirect user password.')),
|
||||
cfg.StrOpt('service_adc_type',
|
||||
default="VA",
|
||||
help=_('Service ADC type. Default: VA.')),
|
||||
cfg.StrOpt('service_adc_version',
|
||||
default="",
|
||||
help=_('Service ADC version.')),
|
||||
cfg.BoolOpt('service_ha_pair',
|
||||
default=False,
|
||||
help=_('Enables or disables the Service HA pair. '
|
||||
'Default: False.')),
|
||||
cfg.IntOpt('service_throughput',
|
||||
default=1000,
|
||||
help=_('Service throughput. Default: 1000.')),
|
||||
cfg.IntOpt('service_ssl_throughput',
|
||||
default=100,
|
||||
help=_('Service SSL throughput. Default: 100.')),
|
||||
cfg.IntOpt('service_compression_throughput',
|
||||
default=100,
|
||||
help=_('Service compression throughput. Default: 100.')),
|
||||
cfg.IntOpt('service_cache',
|
||||
default=20,
|
||||
help=_('Size of service cache. Default: 20.')),
|
||||
cfg.ListOpt('service_resource_pool_ids',
|
||||
default=[],
|
||||
help=_('Resource pool IDs.')),
|
||||
cfg.IntOpt('service_isl_vlan',
|
||||
default=-1,
|
||||
help=_('A required VLAN for the interswitch link to use.')),
|
||||
cfg.BoolOpt('service_session_mirroring_enabled',
|
||||
default=False,
|
||||
help=_('Enable or disable Alteon interswitch link for '
|
||||
'stateful session failover. Default: False.')),
|
||||
cfg.StrOpt('workflow_template_name',
|
||||
default='os_lb_v2',
|
||||
help=_('Name of the workflow template. Default: os_lb_v2.')),
|
||||
cfg.ListOpt('child_workflow_template_names',
|
||||
default=['manage_l3'],
|
||||
help=_('Name of child workflow templates used.'
|
||||
'Default: manage_l3')),
|
||||
cfg.DictOpt('workflow_params',
|
||||
default={"twoleg_enabled": "_REPLACE_",
|
||||
"ha_network_name": "HA-Network",
|
||||
"ha_ip_pool_name": "default",
|
||||
"allocate_ha_vrrp": True,
|
||||
"allocate_ha_ips": True,
|
||||
"data_port": 1,
|
||||
"data_ip_address": "192.168.200.99",
|
||||
"data_ip_mask": "255.255.255.0",
|
||||
"gateway": "192.168.200.1",
|
||||
"ha_port": 2},
|
||||
help=_('Parameter for l2_l3 workflow constructor.')),
|
||||
cfg.StrOpt('workflow_action_name',
|
||||
default='apply',
|
||||
help=_('Name of the workflow action. '
|
||||
'Default: apply.')),
|
||||
cfg.StrOpt('stats_action_name',
|
||||
default='stats',
|
||||
help=_('Name of the workflow action for statistics. '
|
||||
'Default: stats.'))
|
||||
]
|
||||
|
||||
driver_debug_opts = [
|
||||
cfg.BoolOpt('provision_service',
|
||||
default=True,
|
||||
help=_('Provision ADC service?')),
|
||||
cfg.BoolOpt('configure_l3',
|
||||
default=True,
|
||||
help=_('Configule ADC with L3 parameters?')),
|
||||
cfg.BoolOpt('configure_l4',
|
||||
default=True,
|
||||
help=_('Configule ADC with L4 parameters?'))
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(driver_opts, "radwarev2")
|
||||
cfg.CONF.register_opts(driver_debug_opts, "radwarev2_debug")
|
||||
|
||||
|
||||
class RadwareLBaaSBaseV2Driver(driver_base.LoadBalancerBaseDriver):
|
||||
|
||||
def __init__(self, plugin):
|
||||
super(RadwareLBaaSBaseV2Driver, self).__init__(plugin)
|
||||
|
||||
self.load_balancer = LoadBalancerManager(self)
|
||||
self.listener = ListenerManager(self)
|
||||
self.pool = PoolManager(self)
|
||||
self.member = MemberManager(self)
|
||||
self.health_monitor = HealthMonitorManager(self)
|
||||
|
||||
|
||||
class LoadBalancerManager(driver_base.BaseLoadBalancerManager):
|
||||
|
||||
@call_log.log
|
||||
def create(self, context, lb):
|
||||
self.successful_completion(context, lb)
|
||||
|
||||
@call_log.log
|
||||
def update(self, context, old_lb, lb):
|
||||
if self.driver.workflow_exists(old_lb):
|
||||
self.driver.execute_workflow(
|
||||
context, self, lb, old_data_model=old_lb)
|
||||
else:
|
||||
self.successful_completion(context, lb)
|
||||
|
||||
@call_log.log
|
||||
def delete(self, context, lb):
|
||||
self.driver.remove_workflow(
|
||||
context, self, lb)
|
||||
|
||||
@call_log.log
|
||||
def refresh(self, context, lb):
|
||||
if lb.listeners and any(listener.default_pool and
|
||||
listener.default_pool.members for listener in lb.listeners):
|
||||
self.driver.execute_workflow(
|
||||
context, self, lb)
|
||||
else:
|
||||
self.successful_completion(context, lb)
|
||||
|
||||
@call_log.log
|
||||
def stats(self, context, lb):
|
||||
if self.driver.workflow_exists(lb):
|
||||
return self.driver.get_stats(context, lb)
|
||||
else:
|
||||
self.successful_completion(context, lb)
|
||||
|
||||
|
||||
class ListenerManager(driver_base.BaseListenerManager):
|
||||
|
||||
@call_log.log
|
||||
def create(self, context, listener):
|
||||
self.successful_completion(context, listener)
|
||||
|
||||
@call_log.log
|
||||
def update(self, context, old_listener, listener):
|
||||
if self.driver.workflow_exists(old_listener.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, listener, old_data_model=old_listener)
|
||||
else:
|
||||
self.successful_completion(context, listener)
|
||||
|
||||
@call_log.log
|
||||
def delete(self, context, listener):
|
||||
if self.driver.workflow_exists(listener.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, listener, delete=True)
|
||||
else:
|
||||
self.successful_completion(context, listener,
|
||||
delete=True)
|
||||
|
||||
|
||||
class PoolManager(driver_base.BasePoolManager):
|
||||
|
||||
@call_log.log
|
||||
def create(self, context, pool):
|
||||
self.successful_completion(context, pool)
|
||||
|
||||
@call_log.log
|
||||
def update(self, context, old_pool, pool):
|
||||
if self.driver.workflow_exists(old_pool.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, pool, old_data_model=old_pool)
|
||||
else:
|
||||
self.successful_completion(context, pool)
|
||||
|
||||
@call_log.log
|
||||
def delete(self, context, pool):
|
||||
if self.driver.workflow_exists(pool.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, pool, delete=True)
|
||||
else:
|
||||
self.successful_completion(context, pool,
|
||||
delete=True)
|
||||
|
||||
|
||||
class MemberManager(driver_base.BaseMemberManager):
|
||||
|
||||
@call_log.log
|
||||
def create(self, context, member):
|
||||
self.driver.execute_workflow(
|
||||
context, self, member)
|
||||
|
||||
@call_log.log
|
||||
def update(self, context, old_member, member):
|
||||
self.driver.execute_workflow(
|
||||
context, self, member, old_data_model=old_member)
|
||||
|
||||
@call_log.log
|
||||
def delete(self, context, member):
|
||||
self.driver.execute_workflow(
|
||||
context, self, member,
|
||||
delete=True)
|
||||
|
||||
|
||||
class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
|
||||
|
||||
@call_log.log
|
||||
def create(self, context, hm):
|
||||
if self.driver.workflow_exists(hm.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, hm)
|
||||
else:
|
||||
self.successful_completion(context, hm)
|
||||
|
||||
@call_log.log
|
||||
def update(self, context, old_hm, hm):
|
||||
if self.driver.workflow_exists(old_hm.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, hm, old_data_model=old_hm)
|
||||
else:
|
||||
self.successful_completion(context, hm)
|
||||
|
||||
@call_log.log
|
||||
def delete(self, context, hm):
|
||||
if self.driver.workflow_exists(hm.root_loadbalancer):
|
||||
self.driver.execute_workflow(
|
||||
context, self, hm, delete=True)
|
||||
else:
|
||||
self.successful_completion(context, hm,
|
||||
delete=True)
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2015 Radware LTD.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from neutron.i18n import _LE
|
||||
|
||||
from neutron_lbaas.common import exceptions
|
||||
|
||||
|
||||
class RadwareLBaasV2Exception(exceptions.LbaasException):
|
||||
message = _LE('An unknown exception occurred in '
|
||||
'Radware LBaaS v2 provider.')
|
||||
|
||||
|
||||
class AuthenticationMissing(RadwareLBaasV2Exception):
|
||||
message = _LE('vDirect user/password missing. '
|
||||
'Specify in configuration file, under [radwarev2] section')
|
||||
|
||||
|
||||
class WorkflowTemplateMissing(RadwareLBaasV2Exception):
|
||||
message = _LE('Workflow template %(workflow_template)s is missing '
|
||||
'on vDirect server. Upload missing workflow')
|
||||
|
||||
|
||||
class RESTRequestFailure(RadwareLBaasV2Exception):
|
||||
message = _LE('REST request failed with status %(status)s. '
|
||||
'Reason: %(reason)s, Description: %(description)s. '
|
||||
'Success status codes are %(success_codes)s')
|
||||
|
||||
|
||||
class UnsupportedEntityOperation(RadwareLBaasV2Exception):
|
||||
message = _LE('%(operation)s operation is not supported for %(entity)s.')
|
|
@ -0,0 +1,148 @@
|
|||
# Copyright 2015, Radware LTD. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import httplib
|
||||
|
||||
from neutron.common import log as call_log
|
||||
from neutron.i18n import _LE, _LW
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron_lbaas.drivers.radware import exceptions as r_exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
RESP_STATUS = 0
|
||||
RESP_REASON = 1
|
||||
RESP_STR = 2
|
||||
RESP_DATA = 3
|
||||
|
||||
|
||||
class vDirectRESTClient(object):
|
||||
"""REST server proxy to Radware vDirect."""
|
||||
@call_log.log
|
||||
def __init__(self,
|
||||
server='localhost',
|
||||
secondary_server=None,
|
||||
user=None,
|
||||
password=None,
|
||||
port=2189,
|
||||
ssl=True,
|
||||
timeout=5000,
|
||||
base_uri=''):
|
||||
self.server = server
|
||||
self.secondary_server = secondary_server
|
||||
self.port = port
|
||||
self.ssl = ssl
|
||||
self.base_uri = base_uri
|
||||
self.timeout = timeout
|
||||
if user and password:
|
||||
self.auth = base64.encodestring('%s:%s' % (user, password))
|
||||
self.auth = self.auth.replace('\n', '')
|
||||
else:
|
||||
raise r_exc.AuthenticationMissing()
|
||||
|
||||
debug_params = {'server': self.server,
|
||||
'sec_server': self.secondary_server,
|
||||
'port': self.port,
|
||||
'ssl': self.ssl}
|
||||
LOG.debug('vDirectRESTClient:init server=%(server)s, '
|
||||
'secondary server=%(sec_server)s, '
|
||||
'port=%(port)d, '
|
||||
'ssl=%(ssl)r', debug_params)
|
||||
|
||||
def _flip_servers(self):
|
||||
LOG.warning(_LW('Fliping servers. Current is: %(server)s, '
|
||||
'switching to %(secondary)s'),
|
||||
{'server': self.server,
|
||||
'secondary': self.secondary_server})
|
||||
self.server, self.secondary_server = self.secondary_server, self.server
|
||||
|
||||
def _recover(self, action, resource, data, headers, binary=False):
|
||||
if self.server and self.secondary_server:
|
||||
self._flip_servers()
|
||||
resp = self._call(action, resource, data,
|
||||
headers, binary)
|
||||
return resp
|
||||
else:
|
||||
LOG.error(_LE('REST client is not able to recover '
|
||||
'since only one vDirect server is '
|
||||
'configured.'))
|
||||
return -1, None, None, None
|
||||
|
||||
def call(self, action, resource, data, headers, binary=False):
|
||||
resp = self._call(action, resource, data, headers, binary)
|
||||
if resp[RESP_STATUS] == -1:
|
||||
LOG.warning(_LW('vDirect server is not responding (%s).'),
|
||||
self.server)
|
||||
return self._recover(action, resource, data, headers, binary)
|
||||
elif resp[RESP_STATUS] in (301, 307):
|
||||
LOG.warning(_LW('vDirect server is not active (%s).'),
|
||||
self.server)
|
||||
return self._recover(action, resource, data, headers, binary)
|
||||
else:
|
||||
return resp
|
||||
|
||||
@call_log.log
|
||||
def _call(self, action, resource, data, headers, binary=False):
|
||||
if resource.startswith('http'):
|
||||
uri = resource
|
||||
else:
|
||||
uri = self.base_uri + resource
|
||||
if binary:
|
||||
body = data
|
||||
else:
|
||||
body = jsonutils.dumps(data)
|
||||
|
||||
debug_data = 'binary' if binary else body
|
||||
debug_data = debug_data if debug_data else 'EMPTY'
|
||||
if not headers:
|
||||
headers = {'Authorization': 'Basic %s' % self.auth}
|
||||
else:
|
||||
headers['Authorization'] = 'Basic %s' % self.auth
|
||||
conn = None
|
||||
if self.ssl:
|
||||
conn = httplib.HTTPSConnection(
|
||||
self.server, self.port, timeout=self.timeout)
|
||||
if conn is None:
|
||||
LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS '
|
||||
'connection'))
|
||||
return 0, None, None, None
|
||||
else:
|
||||
conn = httplib.HTTPConnection(
|
||||
self.server, self.port, timeout=self.timeout)
|
||||
if conn is None:
|
||||
LOG.error(_LE('vdirectRESTClient: Could not establish HTTP '
|
||||
'connection'))
|
||||
return 0, None, None, None
|
||||
|
||||
try:
|
||||
conn.request(action, uri, body, headers)
|
||||
response = conn.getresponse()
|
||||
respstr = response.read()
|
||||
respdata = respstr
|
||||
try:
|
||||
respdata = jsonutils.loads(respstr)
|
||||
except ValueError:
|
||||
# response was not JSON, ignore the exception
|
||||
pass
|
||||
ret = (response.status, response.reason, respstr, respdata)
|
||||
except Exception as e:
|
||||
log_dict = {'action': action, 'e': e}
|
||||
LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
|
||||
log_dict)
|
||||
ret = -1, None, None, None
|
||||
conn.close()
|
||||
return ret
|
|
@ -0,0 +1,647 @@
|
|||
# Copyright 2015, Radware LTD. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import netaddr
|
||||
import threading
|
||||
import time
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import log as call_log
|
||||
from neutron import context
|
||||
from neutron.i18n import _LE, _LW, _LI
|
||||
from neutron.plugins.common import constants
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from six.moves import queue as Queue
|
||||
|
||||
import neutron_lbaas.common.cert_manager
|
||||
from neutron_lbaas.drivers.radware import base_v2_driver
|
||||
from neutron_lbaas.drivers.radware import exceptions as r_exc
|
||||
from neutron_lbaas.drivers.radware import rest_client as rest
|
||||
|
||||
CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.CERT_MANAGER_PLUGIN
|
||||
TEMPLATE_HEADER = {'Content-Type':
|
||||
'application/vnd.com.radware.vdirect.'
|
||||
'template-parameters+json'}
|
||||
PROVISION_HEADER = {'Content-Type':
|
||||
'application/vnd.com.radware.'
|
||||
'vdirect.status+json'}
|
||||
CREATE_SERVICE_HEADER = {'Content-Type':
|
||||
'application/vnd.com.radware.'
|
||||
'vdirect.adc-service-specification+json'}
|
||||
|
||||
PROPERTY_DEFAULTS = {'type': 'none',
|
||||
'cookie_name': 'none',
|
||||
'url_path': '/',
|
||||
'http_method': 'GET',
|
||||
'expected_codes': '200',
|
||||
'subnet': '255.255.255.255',
|
||||
'mask': '255.255.255.255',
|
||||
'gw': '255.255.255.255',
|
||||
}
|
||||
LOADBALANCER_PROPERTIES = ['vip_address', 'admin_state_up']
|
||||
LISTENER_PROPERTIES = ['protocol_port', 'protocol',
|
||||
'connection_limit', 'admin_state_up']
|
||||
POOL_PROPERTIES = ['protocol', 'lb_algorithm', 'admin_state_up']
|
||||
MEMBER_PROPERTIES = ['id', 'address', 'protocol_port', 'weight',
|
||||
'admin_state_up', 'subnet', 'mask', 'gw']
|
||||
SESSION_PERSISTENCY_PROPERTIES = ['type', 'cookie_name']
|
||||
HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
|
||||
'admin_state_up', 'url_path', 'http_method',
|
||||
'expected_codes', 'id']
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RadwareLBaaSV2Driver(base_v2_driver.RadwareLBaaSBaseV2Driver):
|
||||
#
|
||||
# Assumptions:
|
||||
# 1) We have only one worflow that takes care of l2-l4 and service creation
|
||||
# 2) The workflow template exsists on the vDirect server
|
||||
# 3) The workflow expose one operaion named 'update' (plus ctor and dtor)
|
||||
# 4) The 'update' operation gets the loadbalancer object graph as input
|
||||
# 5) The object graph is enehanced by our code before it is sent to the
|
||||
# workflow
|
||||
# 6) Async operations are handled by a diffrent thread
|
||||
#
|
||||
def __init__(self, plugin):
|
||||
super(RadwareLBaaSV2Driver, self).__init__(plugin)
|
||||
rad = cfg.CONF.radwarev2
|
||||
rad_debug = cfg.CONF.radwarev2_debug
|
||||
self.plugin = plugin
|
||||
self.service = {
|
||||
"name": "_REPLACE_",
|
||||
"tenantId": "_REPLACE_",
|
||||
"haPair": rad.service_ha_pair,
|
||||
"sessionMirroringEnabled": rad.service_session_mirroring_enabled,
|
||||
"primary": {
|
||||
"capacity": {
|
||||
"throughput": rad.service_throughput,
|
||||
"sslThroughput": rad.service_ssl_throughput,
|
||||
"compressionThroughput":
|
||||
rad.service_compression_throughput,
|
||||
"cache": rad.service_cache
|
||||
},
|
||||
"network": {
|
||||
"type": "portgroup",
|
||||
"portgroups": '_REPLACE_'
|
||||
},
|
||||
"adcType": rad.service_adc_type,
|
||||
"acceptableAdc": "Exact"
|
||||
}
|
||||
}
|
||||
if rad.service_resource_pool_ids:
|
||||
ids = rad.service_resource_pool_ids
|
||||
self.service['resourcePoolIds'] = [
|
||||
{'id': id} for id in ids
|
||||
]
|
||||
else:
|
||||
self.service['resourcePoolIds'] = []
|
||||
|
||||
if rad.service_isl_vlan:
|
||||
self.service['islVlan'] = rad.service_isl_vlan
|
||||
self.workflow_template_name = rad.workflow_template_name
|
||||
self.child_workflow_template_names = rad.child_workflow_template_names
|
||||
self.workflow_params = rad.workflow_params
|
||||
self.workflow_action_name = rad.workflow_action_name
|
||||
self.stats_action_name = rad.stats_action_name
|
||||
vdirect_address = rad.vdirect_address
|
||||
sec_server = rad.ha_secondary_address
|
||||
self.rest_client = rest.vDirectRESTClient(
|
||||
server=vdirect_address,
|
||||
secondary_server=sec_server,
|
||||
user=rad.vdirect_user,
|
||||
password=rad.vdirect_password)
|
||||
self.workflow_params['provision_service'] = rad_debug.provision_service
|
||||
self.workflow_params['configure_l3'] = rad_debug.configure_l3
|
||||
self.workflow_params['configure_l4'] = rad_debug.configure_l4
|
||||
|
||||
self.queue = Queue.Queue()
|
||||
self.completion_handler = OperationCompletionHandler(self.queue,
|
||||
self.rest_client,
|
||||
plugin)
|
||||
self.workflow_templates_exists = False
|
||||
self.completion_handler.setDaemon(True)
|
||||
self.completion_handler_started = False
|
||||
|
||||
def _start_completion_handling_thread(self):
|
||||
if not self.completion_handler_started:
|
||||
LOG.info(_LI('Starting operation completion handling thread'))
|
||||
self.completion_handler.start()
|
||||
self.completion_handler_started = True
|
||||
|
||||
@staticmethod
|
||||
def _get_wf_name(lb):
|
||||
return 'LB_' + lb.id
|
||||
|
||||
@call_log.log
|
||||
def _verify_workflow_templates(self):
|
||||
"""Verify the existence of workflows on vDirect server."""
|
||||
resource = '/api/workflowTemplate/'
|
||||
workflow_templates = {self.workflow_template_name: False}
|
||||
for child_wf_name in self.child_workflow_template_names:
|
||||
workflow_templates[child_wf_name] = False
|
||||
response = _rest_wrapper(self.rest_client.call('GET',
|
||||
resource,
|
||||
None,
|
||||
None), [200])
|
||||
for workflow_template in workflow_templates.keys():
|
||||
for template in response:
|
||||
if workflow_template == template['name']:
|
||||
workflow_templates[workflow_template] = True
|
||||
break
|
||||
for template, found in workflow_templates.items():
|
||||
if not found:
|
||||
raise r_exc.WorkflowTemplateMissing(
|
||||
workflow_template=template)
|
||||
|
||||
@call_log.log
|
||||
def workflow_exists(self, lb):
|
||||
"""Create workflow for loadbalancer instance"""
|
||||
wf_name = self._get_wf_name(lb)
|
||||
wf_resource = '/api/workflow/%s' % (wf_name)
|
||||
try:
|
||||
_rest_wrapper(self.rest_client.call(
|
||||
'GET', wf_resource, None, None),
|
||||
[200])
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
@call_log.log
|
||||
def _create_workflow(self, lb, lb_network_id, proxy_network_id):
|
||||
"""Create workflow for loadbalancer instance"""
|
||||
|
||||
self._verify_workflow_templates()
|
||||
|
||||
wf_name = self._get_wf_name(lb)
|
||||
service = copy.deepcopy(self.service)
|
||||
service['tenantId'] = lb.tenant_id
|
||||
service['name'] = 'srv_' + lb_network_id
|
||||
|
||||
if lb_network_id != proxy_network_id:
|
||||
self.workflow_params["twoleg_enabled"] = True
|
||||
service['primary']['network']['portgroups'] = [
|
||||
lb_network_id, proxy_network_id]
|
||||
else:
|
||||
self.workflow_params["twoleg_enabled"] = False
|
||||
service['primary']['network']['portgroups'] = [lb_network_id]
|
||||
|
||||
tmpl_resource = '/api/workflowTemplate/%s?name=%s' % (
|
||||
self.workflow_template_name, wf_name)
|
||||
_rest_wrapper(self.rest_client.call(
|
||||
'POST', tmpl_resource,
|
||||
{'parameters': dict(self.workflow_params,
|
||||
service_params=service)},
|
||||
TEMPLATE_HEADER))
|
||||
|
||||
@call_log.log
|
||||
def get_stats(self, ctx, lb):
|
||||
|
||||
wf_name = self._get_wf_name(lb)
|
||||
resource = '/api/workflow/%s/action/%s' % (
|
||||
wf_name, self.stats_action_name)
|
||||
response = _rest_wrapper(self.rest_client.call('POST', resource,
|
||||
None, TEMPLATE_HEADER), success_codes=[202])
|
||||
LOG.debug('stats_action response: %s ', response)
|
||||
|
||||
resource = '/api/workflow/%s/parameters' % (wf_name)
|
||||
response = _rest_wrapper(self.rest_client.call('GET', resource,
|
||||
None, TEMPLATE_HEADER), success_codes=[200])
|
||||
LOG.debug('stats_values response: %s ', response)
|
||||
return response['stats']
|
||||
|
||||
@call_log.log
|
||||
def execute_workflow(self, ctx, manager, data_model,
|
||||
old_data_model=None, delete=False):
|
||||
lb = data_model.root_loadbalancer
|
||||
|
||||
# Get possible proxy subnet.
|
||||
# Proxy subnet equals to LB subnet if no proxy
|
||||
# is necessary.
|
||||
# Get subnet id of any member located on different than
|
||||
# loadbalancer's network. If returned subnet id is the subnet id
|
||||
# of loadbalancer - all members are accesssible from loadbalancer's
|
||||
# network, meaning no second leg or static routes are required.
|
||||
# Otherwise, create proxy port on found member's subnet and get its
|
||||
# address as a proxy address for loadbalancer instance
|
||||
lb_subnet = self.plugin.db._core_plugin.get_subnet(
|
||||
ctx, lb.vip_subnet_id)
|
||||
proxy_subnet = lb_subnet
|
||||
proxy_port_subnet_id = self._get_proxy_port_subnet_id(lb)
|
||||
if proxy_port_subnet_id == lb.vip_subnet_id:
|
||||
proxy_port_address = lb.vip_address
|
||||
else:
|
||||
proxy_port_address = self._create_proxy_port_and_get_address(
|
||||
ctx, lb, proxy_port_subnet_id)
|
||||
proxy_subnet = self.plugin.db._core_plugin.get_subnet(
|
||||
ctx, proxy_port_subnet_id)
|
||||
|
||||
# Check if workflow exist, create if not
|
||||
if not self.workflow_exists(lb):
|
||||
self._create_workflow(lb,
|
||||
lb_subnet['network_id'],
|
||||
proxy_subnet['network_id'])
|
||||
|
||||
# Build objects graph
|
||||
objects_graph = self._build_objects_graph(ctx, lb, data_model,
|
||||
proxy_port_address,
|
||||
proxy_subnet)
|
||||
LOG.debug("Radware vDirect LB object graph is " + str(objects_graph))
|
||||
|
||||
wf_name = self._get_wf_name(lb)
|
||||
resource = '/api/workflow/%s/action/%s' % (
|
||||
wf_name, self.workflow_action_name)
|
||||
response = _rest_wrapper(self.rest_client.call('POST', resource,
|
||||
{'parameters': objects_graph},
|
||||
TEMPLATE_HEADER), success_codes=[202])
|
||||
LOG.debug('_update_workflow response: %s ', response)
|
||||
|
||||
oper = OperationAttributes(
|
||||
manager, response['uri'], lb,
|
||||
data_model, old_data_model,
|
||||
delete=delete)
|
||||
|
||||
LOG.debug('Pushing operation %s to the queue', oper)
|
||||
self._start_completion_handling_thread()
|
||||
self.queue.put_nowait(oper)
|
||||
|
||||
def remove_workflow(self, ctx, manager, lb):
|
||||
wf_name = self._get_wf_name(lb)
|
||||
LOG.debug('Remove the workflow %s' % wf_name)
|
||||
resource = '/api/workflow/%s' % (wf_name)
|
||||
rest_return = self.rest_client.call('DELETE', resource, None, None)
|
||||
response = _rest_wrapper(rest_return, [204, 202, 404])
|
||||
if rest_return[rest.RESP_STATUS] in [404]:
|
||||
try:
|
||||
self._delete_proxy_port(ctx, lb)
|
||||
LOG.debug('Proxy port for LB %s was deleted', lb.id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Proxy port deletion for LB %s '
|
||||
'failed'), lb.id)
|
||||
manager.successful_completion(ctx, lb)
|
||||
else:
|
||||
oper = OperationAttributes(
|
||||
manager, response['uri'], lb,
|
||||
lb, old_data_model=None,
|
||||
delete=True)
|
||||
|
||||
self._start_completion_handling_thread()
|
||||
self.queue.put_nowait(oper)
|
||||
|
||||
def _build_objects_graph(self, ctx, lb, data_model,
|
||||
proxy_port_address, proxy_subnet):
|
||||
"""Iterate over the LB model starting from root lb entity
|
||||
and build its JSON representtaion for vDirect
|
||||
"""
|
||||
graph = {}
|
||||
for prop in LOADBALANCER_PROPERTIES:
|
||||
graph[prop] = getattr(lb, prop, PROPERTY_DEFAULTS.get(prop))
|
||||
|
||||
graph['pip_address'] = proxy_port_address
|
||||
|
||||
graph['listeners'] = []
|
||||
for listener in lb.listeners:
|
||||
if not listener.default_pool or \
|
||||
not listener.default_pool.members:
|
||||
break
|
||||
listener_dict = {}
|
||||
for prop in LISTENER_PROPERTIES:
|
||||
listener_dict[prop] = getattr(
|
||||
listener, prop, PROPERTY_DEFAULTS.get(prop))
|
||||
|
||||
if listener.default_tls_container_id:
|
||||
default_cert = CERT_MANAGER_PLUGIN.CertManager.get_cert(
|
||||
listener.default_tls_container_id,
|
||||
service_name='Neutron LBaaS v2 Radware provider')
|
||||
cert_dict = {
|
||||
'certificate': default_cert.get_certificate(),
|
||||
'intermediates': default_cert.get_intermediates(),
|
||||
'private_key': default_cert.get_private_key(),
|
||||
'passphrase': default_cert.get_private_key_passphrase()}
|
||||
listener_dict['default_tls_certificate'] = cert_dict
|
||||
|
||||
if listener.sni_containers:
|
||||
listener_dict['sni_tls_certificates'] = []
|
||||
for sni_container in listener.sni_containers:
|
||||
sni_cert = CERT_MANAGER_PLUGIN.CertManager.get_cert(
|
||||
sni_container.tls_container_id,
|
||||
service_name='Neutron LBaaS v2 Radware provider')
|
||||
listener_dict['sni_tls_certificates'].append(
|
||||
{'position': sni_container.position,
|
||||
'certificate': sni_cert.get_certificate(),
|
||||
'intermediates': sni_cert.get_intermediates(),
|
||||
'private_key': sni_cert.get_private_key(),
|
||||
'passphrase': sni_cert.get_private_key_passphrase()})
|
||||
|
||||
if listener.default_pool:
|
||||
pool_dict = {}
|
||||
for prop in POOL_PROPERTIES:
|
||||
pool_dict[prop] = getattr(
|
||||
listener.default_pool, prop,
|
||||
PROPERTY_DEFAULTS.get(prop))
|
||||
|
||||
if listener.default_pool.healthmonitor:
|
||||
hm_dict = {}
|
||||
for prop in HEALTH_MONITOR_PROPERTIES:
|
||||
hm_dict[prop] = getattr(
|
||||
listener.default_pool.healthmonitor, prop,
|
||||
PROPERTY_DEFAULTS.get(prop))
|
||||
pool_dict['healthmonitor'] = hm_dict
|
||||
|
||||
if listener.default_pool.sessionpersistence:
|
||||
sess_pers_dict = {}
|
||||
for prop in SESSION_PERSISTENCY_PROPERTIES:
|
||||
sess_pers_dict[prop] = getattr(
|
||||
listener.default_pool.sessionpersistence, prop,
|
||||
PROPERTY_DEFAULTS.get(prop))
|
||||
pool_dict['sessionpersistence'] = sess_pers_dict
|
||||
|
||||
pool_dict['members'] = []
|
||||
for member in listener.default_pool.members:
|
||||
member_dict = {}
|
||||
for prop in MEMBER_PROPERTIES:
|
||||
member_dict[prop] = getattr(
|
||||
member, prop,
|
||||
PROPERTY_DEFAULTS.get(prop))
|
||||
if (proxy_port_address != lb.vip_address and
|
||||
netaddr.IPAddress(member.address)
|
||||
not in netaddr.IPNetwork(proxy_subnet['cidr'])):
|
||||
self._accomplish_member_static_route_data(
|
||||
ctx, member, member_dict,
|
||||
proxy_subnet['gateway_ip'])
|
||||
pool_dict['members'].append(member_dict)
|
||||
|
||||
listener_dict['default_pool'] = pool_dict
|
||||
graph['listeners'].append(listener_dict)
|
||||
return graph
|
||||
|
||||
def _get_lb_proxy_port_name(self, lb):
|
||||
return 'proxy_' + lb.id
|
||||
|
||||
def _get_proxy_port_subnet_id(self, lb):
|
||||
"""Look for at least one member of any listener's pool
|
||||
that is located on subnet different than loabalancer's subnet.
|
||||
If such member found, return its subnet id.
|
||||
Otherwise, return loadbalancer's subnet id
|
||||
"""
|
||||
for listener in lb.listeners:
|
||||
if listener.default_pool:
|
||||
for member in listener.default_pool.members:
|
||||
if lb.vip_subnet_id != member.subnet_id:
|
||||
return member.subnet_id
|
||||
return lb.vip_subnet_id
|
||||
|
||||
def _create_proxy_port_and_get_address(self,
|
||||
ctx, lb, proxy_port_subnet_id):
|
||||
"""Check if proxy port was created earlier.
|
||||
If not, create a new port on proxy subnet and return its ip address.
|
||||
Returns port IP address
|
||||
"""
|
||||
proxy_port_name = self._get_lb_proxy_port_name(lb)
|
||||
ports = self.plugin.db._core_plugin.get_ports(
|
||||
ctx, filters={'name': [proxy_port_name], })
|
||||
if not ports:
|
||||
# Create pip port. Use the subnet
|
||||
# determined before by _get_pip_port_subnet_id() function
|
||||
proxy_port_subnet = self.plugin.db._core_plugin.get_subnet(
|
||||
ctx, proxy_port_subnet_id)
|
||||
proxy_port_data = {
|
||||
'tenant_id': lb.tenant_id,
|
||||
'name': proxy_port_name,
|
||||
'network_id': proxy_port_subnet['network_id'],
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'admin_state_up': False,
|
||||
'device_id': '',
|
||||
'device_owner': 'neutron:' + constants.LOADBALANCERV2,
|
||||
'fixed_ips': [{'subnet_id': proxy_port_subnet_id}]
|
||||
}
|
||||
proxy_port = self.plugin.db._core_plugin.create_port(
|
||||
ctx, {'port': proxy_port_data})
|
||||
else:
|
||||
proxy_port = ports[0]
|
||||
|
||||
ips_on_subnet = [ip for ip in proxy_port['fixed_ips']
|
||||
if ip['subnet_id'] == proxy_port_subnet_id]
|
||||
if not ips_on_subnet:
|
||||
raise Exception(_('Could not find or allocate '
|
||||
'IP address on subnet id %s for proxy port'),
|
||||
proxy_port_subnet_id)
|
||||
else:
|
||||
return ips_on_subnet[0]['ip_address']
|
||||
|
||||
def _delete_proxy_port(self, ctx, lb):
|
||||
port_filter = {
|
||||
'name': [self._get_lb_proxy_port_name(lb)],
|
||||
}
|
||||
ports = self.plugin.db._core_plugin.get_ports(
|
||||
ctx, filters=port_filter)
|
||||
if ports:
|
||||
for port in ports:
|
||||
try:
|
||||
self.plugin.db._core_plugin.delete_port(
|
||||
ctx, port['id'])
|
||||
|
||||
except Exception as exception:
|
||||
# stop exception propagation, nport may have
|
||||
# been deleted by other means
|
||||
LOG.warning(_LW('proxy port deletion failed: %r'),
|
||||
exception)
|
||||
|
||||
def _accomplish_member_static_route_data(self,
|
||||
ctx, member, member_data, proxy_gateway_ip):
|
||||
member_ports = self.plugin.db._core_plugin.get_ports(
|
||||
ctx,
|
||||
filters={'fixed_ips': {'ip_address': [member.address]},
|
||||
'tenant_id': [member.tenant_id]})
|
||||
if len(member_ports) == 1:
|
||||
member_subnet = self.plugin._core_plugin.get_subnet(
|
||||
ctx,
|
||||
member_ports[0]['fixed_ips'][0]['subnet_id'])
|
||||
member_network = netaddr.IPNetwork(member_subnet['cidr'])
|
||||
member_data['subnet'] = str(member_network.network)
|
||||
member_data['mask'] = str(member_network.netmask)
|
||||
else:
|
||||
member_data['subnet'] = member_data['address']
|
||||
member_data['gw'] = proxy_gateway_ip
|
||||
|
||||
|
||||
class OperationCompletionHandler(threading.Thread):
|
||||
|
||||
"""Update DB with operation status or delete the entity from DB."""
|
||||
|
||||
def __init__(self, queue, rest_client, plugin):
|
||||
threading.Thread.__init__(self)
|
||||
self.queue = queue
|
||||
self.rest_client = rest_client
|
||||
self.plugin = plugin
|
||||
self.stoprequest = threading.Event()
|
||||
self.opers_to_handle_before_rest = 0
|
||||
|
||||
def join(self, timeout=None):
|
||||
self.stoprequest.set()
|
||||
super(OperationCompletionHandler, self).join(timeout)
|
||||
|
||||
def handle_operation_completion(self, oper):
|
||||
result = self.rest_client.call('GET',
|
||||
oper.operation_url,
|
||||
None,
|
||||
None)
|
||||
LOG.debug('Operation completion requested %(uri) and got: %(result)',
|
||||
{'uri': oper.operation_url, 'result': result})
|
||||
completed = result[rest.RESP_DATA]['complete']
|
||||
reason = result[rest.RESP_REASON],
|
||||
description = result[rest.RESP_STR]
|
||||
if completed:
|
||||
# operation is done - update the DB with the status
|
||||
# or delete the entire graph from DB
|
||||
success = result[rest.RESP_DATA]['success']
|
||||
sec_to_completion = time.time() - oper.creation_time
|
||||
debug_data = {'oper': oper,
|
||||
'sec_to_completion': sec_to_completion,
|
||||
'success': success}
|
||||
LOG.debug('Operation %(oper)s is completed after '
|
||||
'%(sec_to_completion)d sec '
|
||||
'with success status: %(success)s :',
|
||||
debug_data)
|
||||
if not success:
|
||||
# failure - log it and set the return ERROR as DB state
|
||||
if reason or description:
|
||||
msg = 'Reason:%s. Description:%s' % (reason, description)
|
||||
else:
|
||||
msg = "unknown"
|
||||
error_params = {"operation": oper, "msg": msg}
|
||||
LOG.error(_LE(
|
||||
'Operation %(operation)s failed. Reason: %(msg)s'),
|
||||
error_params)
|
||||
oper.status = constants.ERROR
|
||||
OperationCompletionHandler._run_post_failure_function(oper)
|
||||
else:
|
||||
oper.status = constants.ACTIVE
|
||||
OperationCompletionHandler._run_post_success_function(oper)
|
||||
|
||||
return completed
|
||||
|
||||
def run(self):
|
||||
while not self.stoprequest.isSet():
|
||||
try:
|
||||
oper = self.queue.get(timeout=1)
|
||||
|
||||
# Get the current queue size (N) and set the counter with it.
|
||||
# Handle N operations with no intermission.
|
||||
# Once N operations handles, get the size again and repeat.
|
||||
if self.opers_to_handle_before_rest <= 0:
|
||||
self.opers_to_handle_before_rest = self.queue.qsize() + 1
|
||||
|
||||
LOG.debug('Operation consumed from the queue: ' +
|
||||
str(oper))
|
||||
# check the status - if oper is done: update the db ,
|
||||
# else push the oper again to the queue
|
||||
if not self.handle_operation_completion(oper):
|
||||
LOG.debug('Operation %s is not completed yet..' % oper)
|
||||
# Not completed - push to the queue again
|
||||
self.queue.put_nowait(oper)
|
||||
|
||||
self.queue.task_done()
|
||||
self.opers_to_handle_before_rest -= 1
|
||||
|
||||
# Take one second rest before start handling
|
||||
# new operations or operations handled before
|
||||
if self.opers_to_handle_before_rest <= 0:
|
||||
time.sleep(1)
|
||||
|
||||
except Queue.Empty:
|
||||
continue
|
||||
except Exception:
|
||||
LOG.error(_LE(
|
||||
"Exception was thrown inside OperationCompletionHandler"))
|
||||
|
||||
@staticmethod
|
||||
def _run_post_success_function(oper):
|
||||
try:
|
||||
ctx = context.get_admin_context(load_admin_roles=False)
|
||||
oper.manager.successful_completion(ctx, oper.data_model,
|
||||
delete=oper.delete)
|
||||
LOG.debug('Post-operation success function completed '
|
||||
'for operation %s',
|
||||
repr(oper))
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Post-operation success function failed '
|
||||
'for operation %s'),
|
||||
repr(oper))
|
||||
|
||||
@staticmethod
|
||||
def _run_post_failure_function(oper):
|
||||
try:
|
||||
ctx = context.get_admin_context(load_admin_roles=False)
|
||||
oper.manager.failed_completion(ctx, oper.data_model)
|
||||
LOG.debug('Post-operation failure function completed '
|
||||
'for operation %s',
|
||||
repr(oper))
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Post-operation failure function failed '
|
||||
'for operation %s'),
|
||||
repr(oper))
|
||||
|
||||
|
||||
class OperationAttributes(object):
|
||||
|
||||
"""Holds operation attributes"""
|
||||
|
||||
def __init__(self,
|
||||
manager,
|
||||
operation_url,
|
||||
lb,
|
||||
data_model=None,
|
||||
old_data_model=None,
|
||||
delete=False):
|
||||
self.manager = manager
|
||||
self.operation_url = operation_url
|
||||
self.lb = lb
|
||||
self.data_model = data_model
|
||||
self.old_data_model = old_data_model
|
||||
self.delete = delete
|
||||
self.creation_time = time.time()
|
||||
|
||||
def __repr__(self):
|
||||
attrs = self.__dict__
|
||||
items = ("%s = %r" % (k, v) for k, v in attrs.items())
|
||||
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
|
||||
|
||||
|
||||
def _rest_wrapper(response, success_codes=None):
|
||||
"""Wrap a REST call and make sure a valido status is returned."""
|
||||
success_codes = success_codes or [202]
|
||||
if not response:
|
||||
raise r_exc.RESTRequestFailure(
|
||||
status=-1,
|
||||
reason="Unknown",
|
||||
description="Unknown",
|
||||
success_codes=success_codes
|
||||
)
|
||||
elif response[rest.RESP_STATUS] not in success_codes:
|
||||
raise r_exc.RESTRequestFailure(
|
||||
status=response[rest.RESP_STATUS],
|
||||
reason=response[rest.RESP_REASON],
|
||||
description=response[rest.RESP_STR],
|
||||
success_codes=success_codes
|
||||
)
|
||||
else:
|
||||
LOG.debug("this is a respone: %s" % (response,))
|
||||
return response[rest.RESP_DATA]
|
|
@ -0,0 +1,864 @@
|
|||
# Copyright 2015 Radware LTD. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import copy
|
||||
import mock
|
||||
import re
|
||||
|
||||
from neutron import context
|
||||
from neutron import manager
|
||||
from neutron.plugins.common import constants
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from six.moves import queue as Queue
|
||||
|
||||
from neutron_lbaas.common.cert_manager import cert_manager
|
||||
from neutron_lbaas.drivers.radware import exceptions as r_exc
|
||||
from neutron_lbaas.drivers.radware import v2_driver
|
||||
from neutron_lbaas.services.loadbalancer import constants as lb_const
|
||||
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
|
||||
|
||||
GET_200 = ('/api/workflow/', '/api/workflowTemplate')
|
||||
SERVER_DOWN_CODES = (-1, 301, 307)
|
||||
|
||||
|
||||
class QueueMock(Queue.Queue):
|
||||
def __init__(self, completion_handler):
|
||||
self.completion_handler = completion_handler
|
||||
super(QueueMock, self).__init__()
|
||||
|
||||
def put_nowait(self, oper):
|
||||
self.completion_handler(oper)
|
||||
|
||||
|
||||
def _recover_function_mock(action, resource, data, headers, binary=False):
|
||||
pass
|
||||
|
||||
|
||||
def rest_call_function_mock(action, resource, data, headers, binary=False):
|
||||
if rest_call_function_mock.RESPOND_WITH_ERROR:
|
||||
return 400, 'error_status', 'error_description', None
|
||||
if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
|
||||
val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
|
||||
return val, 'error_status', 'error_description', None
|
||||
if action == 'GET':
|
||||
return _get_handler(resource)
|
||||
elif action == 'DELETE':
|
||||
return _delete_handler(resource)
|
||||
elif action == 'POST':
|
||||
return _post_handler(resource, binary)
|
||||
else:
|
||||
return 0, None, None, None
|
||||
|
||||
|
||||
def _get_handler(resource):
|
||||
if resource.startswith(GET_200[1]):
|
||||
return 200, '', '', rest_call_function_mock.WF_TEMPLATES_TO_RETURN
|
||||
|
||||
if resource.startswith(GET_200[0]):
|
||||
if rest_call_function_mock.WORKFLOW_MISSING:
|
||||
data = jsonutils.loads('{"complete":"True", "success": "True"}')
|
||||
return 404, '', '', data
|
||||
elif resource.endswith('parameters'):
|
||||
return 200, '', '', {'stats': {'bytes_in': 100,
|
||||
'total_connections': 2, 'active_connections': 1,
|
||||
'bytes_out': 200}}
|
||||
else:
|
||||
return 200, '', '', ''
|
||||
|
||||
if resource.startswith(GET_200):
|
||||
return 200, '', '', ''
|
||||
else:
|
||||
data = jsonutils.loads('{"complete":"True", "success": "True"}')
|
||||
return 202, '', '', data
|
||||
|
||||
|
||||
def _delete_handler(resource):
|
||||
return 404, '', '', {'message': 'Not Found'}
|
||||
|
||||
|
||||
def _post_handler(resource, binary):
|
||||
if re.search(r'/api/workflow/.+/action/.+', resource):
|
||||
data = jsonutils.loads('{"uri":"some_uri"}')
|
||||
return 202, '', '', data
|
||||
elif re.search(r'/api/service\?name=.+', resource):
|
||||
data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}')
|
||||
return 201, '', '', data
|
||||
elif binary:
|
||||
return 201, '', '', ''
|
||||
else:
|
||||
return 202, '', '', ''
|
||||
|
||||
RADWARE_PROVIDER = ('LOADBALANCERV2:radwarev2:neutron_lbaas.'
|
||||
'drivers.radware.v2_driver.'
|
||||
'RadwareLBaaSV2Driver:default')
|
||||
|
||||
WF_SRV_PARAMS = {
|
||||
"name": "_REPLACE_", "tenantId": "_REPLACE_", "haPair": False,
|
||||
"sessionMirroringEnabled": False, "islVlan": -1,
|
||||
"primary": {
|
||||
"capacity": {
|
||||
"throughput": 1000, "sslThroughput": 100,
|
||||
"compressionThroughput": 100, "cache": 20},
|
||||
"network": {
|
||||
"type": "portgroup", "portgroups": "_REPLACE_"},
|
||||
"adcType": "VA", "acceptableAdc": "Exact"},
|
||||
"resourcePoolIds": []}
|
||||
|
||||
WF_CREATE_PARAMS = {'parameters':
|
||||
{"provision_service": True, "configure_l3": True, "configure_l4": True,
|
||||
"twoleg_enabled": False, "ha_network_name": "HA-Network",
|
||||
"ha_ip_pool_name": "default", "allocate_ha_vrrp": True,
|
||||
"allocate_ha_ips": True, "data_port": 1,
|
||||
"data_ip_address": "192.168.200.99", "data_ip_mask": "255.255.255.0",
|
||||
"gateway": "192.168.200.1", "ha_port": 2}}
|
||||
WF_APPLY_EMPTY_LB_PARAMS = {'parameters': {
|
||||
'loadbalancer': {'listeners': [], 'admin_state_up': True,
|
||||
'pip_address': u'10.0.0.2', 'vip_address': u'10.0.0.2'}}}
|
||||
|
||||
|
||||
class TestLBaaSDriverBase(
|
||||
test_db_loadbalancerv2.LbaasPluginDbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestLBaaSDriverBase, self).setUp(
|
||||
lbaas_provider=RADWARE_PROVIDER)
|
||||
|
||||
loaded_plugins = manager.NeutronManager().get_service_plugins()
|
||||
self.plugin_instance = loaded_plugins[constants.LOADBALANCERV2]
|
||||
self.driver = self.plugin_instance.drivers['radwarev2']
|
||||
|
||||
|
||||
class TestLBaaSDriverRestClient(TestLBaaSDriverBase):
|
||||
def setUp(self):
|
||||
super(TestLBaaSDriverRestClient, self).setUp()
|
||||
|
||||
self.flip_servers_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.recover_mock = mock.Mock(
|
||||
side_effect=_recover_function_mock)
|
||||
|
||||
#self.driver = self.plugin_instance.drivers['radwarev2']
|
||||
self.orig_recover = self.driver.rest_client._recover
|
||||
self.orig_flip_servers = self.driver.rest_client._flip_servers
|
||||
self.driver.rest_client._flip_servers = self.flip_servers_mock
|
||||
self.driver.rest_client._recover = self.recover_mock
|
||||
|
||||
def test_recover_was_called(self):
|
||||
"""Call REST client which fails and verify _recover is called."""
|
||||
self.driver.rest_client.call('GET', '/api/workflowTemplate',
|
||||
None, None)
|
||||
self.recover_mock.assert_called_once_with('GET',
|
||||
'/api/workflowTemplate',
|
||||
None, None, False)
|
||||
|
||||
def test_flip_servers(self):
|
||||
server = self.driver.rest_client.server
|
||||
sec_server = self.driver.rest_client.secondary_server
|
||||
self.driver.rest_client._recover = self.orig_recover
|
||||
self.driver.rest_client.call('GET', '/api/workflowTemplate',
|
||||
None, None)
|
||||
self.flip_servers_mock.assert_called_once()
|
||||
self.assertEqual(server, self.driver.rest_client.secondary_server)
|
||||
self.assertEqual(sec_server, self.driver.rest_client.server)
|
||||
|
||||
|
||||
class CertMock(cert_manager.Cert):
|
||||
def __init__(self, cert_container):
|
||||
pass
|
||||
|
||||
def get_certificate(self):
|
||||
return "certificate"
|
||||
|
||||
def get_intermediates(self):
|
||||
return "intermediates"
|
||||
|
||||
def get_private_key(self):
|
||||
return "private_key"
|
||||
|
||||
def get_private_key_passphrase(self):
|
||||
return "private_key_passphrase"
|
||||
|
||||
|
||||
class TestLBaaSDriver(TestLBaaSDriverBase):
|
||||
def setUp(self):
|
||||
super(TestLBaaSDriver, self).setUp()
|
||||
|
||||
templates_to_return = [{'name': self.driver.workflow_template_name}]
|
||||
for t in self.driver.child_workflow_template_names:
|
||||
templates_to_return.append({'name': t})
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'RESPOND_WITH_ERROR': False, 'WORKFLOW_MISSING': True,
|
||||
'WORKFLOW_TEMPLATE_MISSING': True,
|
||||
'RESPOND_WITH_SERVER_DOWN': 200,
|
||||
'WF_TEMPLATES_TO_RETURN': templates_to_return})
|
||||
|
||||
self.operation_completer_start_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.operation_completer_join_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.driver_rest_call_mock = mock.Mock(
|
||||
side_effect=rest_call_function_mock)
|
||||
self.flip_servers_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.recover_mock = mock.Mock(
|
||||
side_effect=_recover_function_mock)
|
||||
|
||||
self.driver.completion_handler.start = (
|
||||
self.operation_completer_start_mock)
|
||||
self.driver.completion_handler.join = (
|
||||
self.operation_completer_join_mock)
|
||||
self.driver.rest_client.call = self.driver_rest_call_mock
|
||||
self.driver.rest_client._call = self.driver_rest_call_mock
|
||||
self.driver.completion_handler.rest_client.call = (
|
||||
self.driver_rest_call_mock)
|
||||
|
||||
self.driver.queue = QueueMock(
|
||||
self.driver.completion_handler.handle_operation_completion)
|
||||
|
||||
self.addCleanup(self.driver.completion_handler.join)
|
||||
|
||||
def test_verify_workflow_templates(self):
|
||||
templates_to_return = []
|
||||
for t in self.driver.child_workflow_template_names:
|
||||
templates_to_return.append({'name': t})
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'WF_TEMPLATES_TO_RETURN': templates_to_return})
|
||||
message = r_exc.WorkflowTemplateMissing.message % \
|
||||
{'workflow_template': self.driver.workflow_template_name}
|
||||
try:
|
||||
self.driver._verify_workflow_templates()
|
||||
except r_exc.WorkflowTemplateMissing as e:
|
||||
self.assertEqual(e.msg, message)
|
||||
|
||||
templates_to_return.append(
|
||||
{'name': self.driver.workflow_template_name})
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'WF_TEMPLATES_TO_RETURN': templates_to_return})
|
||||
try:
|
||||
self.driver._verify_workflow_templates()
|
||||
self.assertTrue(True)
|
||||
except r_exc.WorkflowTemplateMissing as e:
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_wf_created_on_first_member_creation(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
|
||||
with self.loadbalancer(subnet=vip_sub) as lb:
|
||||
with self.listener(
|
||||
loadbalancer_id=lb['loadbalancer']['id']) as listener:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=listener['listener']['id']) as pool:
|
||||
self.driver_rest_call_mock.assert_has_calls([])
|
||||
with self.member(pool_id=pool['pool']['id'],
|
||||
subnet=vip_sub, address='10.0.1.10'):
|
||||
self.driver_rest_call_mock.assert_called_once()
|
||||
|
||||
def test_wf_deleted_on_lb_deletion(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
|
||||
with self.loadbalancer(subnet=vip_sub) as lb:
|
||||
get_calls = [
|
||||
mock.call('GET', u'/api/workflow/LB_' +
|
||||
lb['loadbalancer']['id'], None, None)]
|
||||
with self.listener(
|
||||
loadbalancer_id=lb['loadbalancer']['id']) as listener:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=listener['listener']['id']) as pool:
|
||||
with self.member(pool_id=pool['pool']['id'],
|
||||
subnet=vip_sub, address='10.0.1.10'):
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'WORKFLOW_MISSING': False})
|
||||
|
||||
self.driver_rest_call_mock.assert_has_calls(get_calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
self.driver_rest_call_mock.assert_has_calls(get_calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
self.driver_rest_call_mock.assert_has_calls(get_calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
self.driver_rest_call_mock.assert_any_call(
|
||||
'DELETE', u'/api/workflow/LB_' + lb['loadbalancer']['id'],
|
||||
None, None)
|
||||
|
||||
def test_lb_crud(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as s:
|
||||
with self.loadbalancer(subnet=s, no_delete=True) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as l:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=l['listener']['id']) as p:
|
||||
self.driver_rest_call_mock.assert_has_calls([])
|
||||
|
||||
self.plugin_instance.update_loadbalancer(
|
||||
context.get_admin_context(),
|
||||
lb_id, {'loadbalancer': lb})
|
||||
self.driver_rest_call_mock.assert_has_calls([])
|
||||
|
||||
lb_db = self.plugin_instance.db.get_loadbalancer(
|
||||
context.get_admin_context(),
|
||||
lb_id)
|
||||
self.driver.load_balancer.refresh(
|
||||
context.get_admin_context(), lb_db)
|
||||
self.driver_rest_call_mock.assert_has_calls([])
|
||||
|
||||
with self.member(
|
||||
no_delete=True, pool_id=p['pool']['id'],
|
||||
subnet=s, address='10.0.1.10') as m:
|
||||
|
||||
m_data = {
|
||||
"id": m['member']['id'],
|
||||
"address": "10.0.1.10",
|
||||
"protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "255.255.255.255",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "255.255.255.255",
|
||||
"admin_state_up": True}
|
||||
wf_apply_params = {'parameters': {
|
||||
'listeners': [{
|
||||
"admin_state_up": True,
|
||||
"protocol_port": 80,
|
||||
"protocol": lb_const.PROTOCOL_HTTP,
|
||||
"connection_limit": -1,
|
||||
"admin_state_up": True,
|
||||
"default_pool": {
|
||||
"protocol": lb_const.PROTOCOL_HTTP,
|
||||
"lb_algorithm":
|
||||
"ROUND_ROBIN",
|
||||
"admin_state_up": True,
|
||||
"members": [m_data]}}],
|
||||
"admin_state_up": True,
|
||||
"pip_address": "10.0.0.2",
|
||||
"vip_address": "10.0.0.2"}}
|
||||
calls = [
|
||||
mock.call(
|
||||
'POST', '/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id, mock.ANY,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflow/LB_' + lb_id +
|
||||
'/action/apply',
|
||||
wf_apply_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
|
||||
self.driver_rest_call_mock.assert_has_calls(calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'WORKFLOW_MISSING': False})
|
||||
|
||||
calls = [
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflow/LB_' + lb_id +
|
||||
'/action/apply',
|
||||
wf_apply_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
self.plugin_instance.update_loadbalancer(
|
||||
context.get_admin_context(),
|
||||
lb_id, {'loadbalancer': lb})
|
||||
self.driver_rest_call_mock.assert_has_calls(calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
|
||||
lb_db = self.plugin_instance.db.get_loadbalancer(
|
||||
context.get_admin_context(), lb_id)
|
||||
self.driver.load_balancer.refresh(
|
||||
context.get_admin_context(), lb_db)
|
||||
self.driver_rest_call_mock.assert_has_calls(calls)
|
||||
self.driver_rest_call_mock.reset_mock()
|
||||
|
||||
self.plugin_instance.delete_loadbalancer(
|
||||
context.get_admin_context(), lb_id)
|
||||
self.driver_rest_call_mock.assert_any_call(
|
||||
'DELETE', '/api/workflow/LB_' + lb_id,
|
||||
None, None)
|
||||
|
||||
def test_lb_stats(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as s:
|
||||
with self.loadbalancer(subnet=s) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as l:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=l['listener']['id']) as p:
|
||||
with self.member(
|
||||
no_delete=True, pool_id=p['pool']['id'],
|
||||
subnet=s, address='10.0.1.10'):
|
||||
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'WORKFLOW_MISSING': False})
|
||||
|
||||
stats = self.plugin_instance.stats(
|
||||
context.get_admin_context(), lb_id,)
|
||||
self.assertEqual(stats, {'stats': {'bytes_in': 100,
|
||||
'total_connections': 2,
|
||||
'active_connections': 1, 'bytes_out': 200}})
|
||||
|
||||
def test_member_crud(self):
|
||||
expected = {
|
||||
"address": "10.0.1.10",
|
||||
"protocol_port": 80,
|
||||
"weight": 1,
|
||||
"admin_state_up": True,
|
||||
"tenant_id": "test-tenant",
|
||||
"admin_state_up": True,
|
||||
"provisioning_status": "ACTIVE",
|
||||
"operating_status": "ONLINE"}
|
||||
with self.subnet(cidr='10.0.0.0/24') as s:
|
||||
with self.loadbalancer(subnet=s) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as l:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=l['listener']['id']) as p:
|
||||
with self.member(
|
||||
no_delete=True, pool_id=p['pool']['id'],
|
||||
subnet=s, address='10.0.1.10') as m:
|
||||
|
||||
expected["id"] = m['member']['id']
|
||||
expected["pool_id"] = p['pool']['id']
|
||||
expected["subnet_id"] = s['subnet']['id']
|
||||
m_data = {
|
||||
"id": m['member']['id'],
|
||||
"address": "10.0.1.10",
|
||||
"protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "255.255.255.255",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "255.255.255.255",
|
||||
"admin_state_up": True}
|
||||
wf_apply_params = {'parameters': {
|
||||
'listeners': [{
|
||||
"admin_state_up": True,
|
||||
"protocol_port": 80,
|
||||
"protocol": lb_const.PROTOCOL_HTTP,
|
||||
"connection_limit": -1,
|
||||
"admin_state_up": True,
|
||||
"default_pool": {
|
||||
"protocol": lb_const.PROTOCOL_HTTP,
|
||||
"lb_algorithm":
|
||||
"ROUND_ROBIN",
|
||||
"admin_state_up": True,
|
||||
"members": [m_data]}}],
|
||||
"admin_state_up": True,
|
||||
"pip_address": "10.0.0.2",
|
||||
"vip_address": "10.0.0.2"}}
|
||||
calls = [
|
||||
mock.call(
|
||||
'POST', '/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id, mock.ANY,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflow/LB_' + lb_id +
|
||||
'/action/apply',
|
||||
wf_apply_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
|
||||
self.driver_rest_call_mock.assert_has_calls(calls)
|
||||
member = self.plugin_instance.db.get_pool_member(
|
||||
context.get_admin_context(),
|
||||
m['member']['id']).to_dict(pool=False)
|
||||
self.assertEqual(member, expected)
|
||||
|
||||
member['weight'] = 2
|
||||
expected['weight'] = 2
|
||||
self.plugin_instance.update_pool_member(
|
||||
context.get_admin_context(),
|
||||
m['member']['id'], p['pool']['id'],
|
||||
{'member': member})
|
||||
member_u = self.plugin_instance.db.get_pool_member(
|
||||
context.get_admin_context(),
|
||||
m['member']['id']).to_dict(pool=False)
|
||||
self.assertEqual(member_u, expected)
|
||||
|
||||
self.plugin_instance.delete_pool_member(
|
||||
context.get_admin_context(),
|
||||
m['member']['id'], p['pool']['id'])
|
||||
lb = self.plugin_instance.db.get_loadbalancer(
|
||||
context.get_admin_context(),
|
||||
lb_id).to_dict(listener=False)
|
||||
self.assertEqual(lb['provisioning_status'],
|
||||
'ACTIVE')
|
||||
|
||||
def test_build_objects_with_tls(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
|
||||
with self.loadbalancer(subnet=vip_sub) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with contextlib.nested(
|
||||
mock.patch('neutron_lbaas.services.loadbalancer.plugin.'
|
||||
'cert_parser', autospec=True),
|
||||
mock.patch('neutron_lbaas.services.loadbalancer.plugin.'
|
||||
'CERT_MANAGER_PLUGIN.CertManager',
|
||||
autospec=True)
|
||||
) as (cert_parser_mock, cert_manager_mock):
|
||||
cert_mock = mock.Mock(spec=cert_manager.Cert)
|
||||
cert_mock.get_certificate.return_value = 'certificate'
|
||||
cert_mock.get_intermediates.return_value = 'intermediates'
|
||||
cert_mock.get_private_key.return_value = 'private_key'
|
||||
cert_mock.get_private_key_passphrase.return_value = \
|
||||
'private_key_passphrase'
|
||||
cert_manager_mock.get_cert.return_value = cert_mock
|
||||
cert_parser_mock.validate_cert.return_value = True
|
||||
|
||||
with self.listener(
|
||||
protocol=lb_const.PROTOCOL_TERMINATED_HTTPS,
|
||||
loadbalancer_id=lb_id,
|
||||
default_tls_container_id='def1',
|
||||
sni_container_ids=['sni1', 'sni2']) as listener:
|
||||
with self.pool(
|
||||
protocol=lb_const.PROTOCOL_HTTP,
|
||||
listener_id=listener['listener']['id']) as pool:
|
||||
with self.member(pool_id=pool['pool']['id'],
|
||||
subnet=vip_sub,
|
||||
address='10.0.1.10') as m:
|
||||
|
||||
wf_srv_params = copy.deepcopy(WF_SRV_PARAMS)
|
||||
wf_params = copy.deepcopy(WF_CREATE_PARAMS)
|
||||
|
||||
wf_srv_params['name'] = 'srv_' + (
|
||||
vip_sub['subnet']['network_id'])
|
||||
wf_srv_params['tenantId'] = self._tenant_id
|
||||
wf_srv_params['primary']['network'][
|
||||
'portgroups'] = [vip_sub['subnet'][
|
||||
'network_id']]
|
||||
wf_params['parameters']['service_params'] = (
|
||||
wf_srv_params)
|
||||
|
||||
m_data = {
|
||||
"id": m['member']['id'],
|
||||
"address": "10.0.1.10",
|
||||
"protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "255.255.255.255",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "255.255.255.255",
|
||||
'admin_state_up': True}
|
||||
default_tls_cert_data = {
|
||||
'certificate': 'certificate',
|
||||
'intermediates': 'intermediates',
|
||||
'private_key': 'private_key',
|
||||
'passphrase': 'private_key_passphrase'}
|
||||
sni1_tls_cert_data = {
|
||||
'position': 0,
|
||||
'certificate': 'certificate',
|
||||
'intermediates': 'intermediates',
|
||||
'private_key': 'private_key',
|
||||
'passphrase': 'private_key_passphrase'}
|
||||
sni2_tls_cert_data = {
|
||||
'position': 1,
|
||||
'certificate': 'certificate',
|
||||
'intermediates': 'intermediates',
|
||||
'private_key': 'private_key',
|
||||
'passphrase': 'private_key_passphrase'}
|
||||
wf_apply_one_leg_params = {'parameters': {
|
||||
'listeners': [{
|
||||
"admin_state_up": True,
|
||||
"protocol_port": 80,
|
||||
"protocol":
|
||||
lb_const.PROTOCOL_TERMINATED_HTTPS,
|
||||
"connection_limit": -1,
|
||||
"default_pool": {
|
||||
"protocol": lb_const.PROTOCOL_HTTP,
|
||||
"lb_algorithm": "ROUND_ROBIN",
|
||||
"admin_state_up": True,
|
||||
"members": [m_data]},
|
||||
"default_tls_certificate":
|
||||
default_tls_cert_data,
|
||||
"sni_tls_certificates": [
|
||||
sni1_tls_cert_data,
|
||||
sni2_tls_cert_data]}],
|
||||
"admin_state_up": True,
|
||||
"pip_address": "10.0.0.2",
|
||||
"vip_address": "10.0.0.2"}}
|
||||
|
||||
calls = [
|
||||
mock.call('GET',
|
||||
'/api/workflow/LB_' + lb_id,
|
||||
None, None),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id,
|
||||
wf_params,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflow/LB_' + lb_id +
|
||||
'/action/apply',
|
||||
wf_apply_one_leg_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
self.driver_rest_call_mock.assert_has_calls(
|
||||
calls, any_order=True)
|
||||
|
||||
def test_build_objects_graph_one_leg(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
|
||||
with self.loadbalancer(subnet=vip_sub) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as listener:
|
||||
with self.pool(
|
||||
protocol='HTTP',
|
||||
listener_id=listener['listener']['id']) as pool:
|
||||
with contextlib.nested(
|
||||
self.member(pool_id=pool['pool']['id'],
|
||||
subnet=vip_sub, address='10.0.1.10'),
|
||||
self.member(pool_id=pool['pool']['id'],
|
||||
subnet=vip_sub, address='10.0.1.20')
|
||||
) as (member1, member2):
|
||||
|
||||
wf_srv_params = copy.deepcopy(WF_SRV_PARAMS)
|
||||
wf_params = copy.deepcopy(WF_CREATE_PARAMS)
|
||||
|
||||
wf_srv_params['name'] = 'srv_' + (
|
||||
vip_sub['subnet']['network_id'])
|
||||
wf_srv_params['tenantId'] = self._tenant_id
|
||||
wf_srv_params['primary']['network'][
|
||||
'portgroups'] = [vip_sub['subnet'][
|
||||
'network_id']]
|
||||
wf_params['parameters']['service_params'] = (
|
||||
wf_srv_params)
|
||||
|
||||
member1_data = {
|
||||
"id": member1['member']['id'],
|
||||
"address": "10.0.1.10", "protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "255.255.255.255",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "255.255.255.255",
|
||||
'admin_state_up': True}
|
||||
member2_data = {
|
||||
"id": member2['member']['id'],
|
||||
"address": "10.0.1.20", "protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "255.255.255.255",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "255.255.255.255",
|
||||
"admin_state_up": True}
|
||||
wf_apply_one_leg_params = {'parameters': {
|
||||
'listeners': [{
|
||||
"admin_state_up": True,
|
||||
"protocol_port": 80,
|
||||
"protocol": "HTTP",
|
||||
"connection_limit": -1,
|
||||
"default_pool": {
|
||||
"protocol": "HTTP",
|
||||
"lb_algorithm": "ROUND_ROBIN",
|
||||
"admin_state_up": True,
|
||||
"members": [
|
||||
member1_data, member2_data]}}],
|
||||
"admin_state_up": True,
|
||||
"pip_address": "10.0.0.2",
|
||||
"vip_address": "10.0.0.2"}}
|
||||
|
||||
calls = [
|
||||
mock.call('GET', '/api/workflow/LB_' + lb_id,
|
||||
None, None),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id,
|
||||
wf_params,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflow/LB_' + lb_id +
|
||||
'/action/apply',
|
||||
wf_apply_one_leg_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
self.driver_rest_call_mock.assert_has_calls(
|
||||
calls, any_order=True)
|
||||
|
||||
def test_build_objects_graph_two_legs_full(self):
|
||||
with contextlib.nested(
|
||||
self.subnet(cidr='10.0.0.0/24'),
|
||||
self.subnet(cidr='20.0.0.0/24'),
|
||||
self.subnet(cidr='30.0.0.0/24')
|
||||
) as (vip_sub, member_sub1, member_sub2):
|
||||
with self.loadbalancer(subnet=vip_sub) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as listener:
|
||||
with self.pool(
|
||||
protocol='HTTP',
|
||||
listener_id=listener['listener']['id'],
|
||||
session_persistence={
|
||||
'type': "APP_COOKIE",
|
||||
'cookie_name': 'sessionId'}) as pool:
|
||||
with self.healthmonitor(
|
||||
type='HTTP', pool_id=pool['pool']['id']) as hm:
|
||||
with self.member(
|
||||
pool_id=pool['pool']['id'],
|
||||
subnet=member_sub1,
|
||||
address='20.0.1.10') as member:
|
||||
|
||||
wf_params = copy.deepcopy(WF_CREATE_PARAMS)
|
||||
wf_srv_params = copy.deepcopy(
|
||||
WF_SRV_PARAMS)
|
||||
wf_srv_params['name'] = (
|
||||
'srv_' + vip_sub['subnet'][
|
||||
'network_id'])
|
||||
wf_srv_params['tenantId'] = self._tenant_id
|
||||
wf_srv_params['primary']['network'][
|
||||
'portgroups'] = [
|
||||
vip_sub['subnet']['network_id'],
|
||||
member_sub1['subnet']['network_id']]
|
||||
wf_params['parameters'][
|
||||
'twoleg_enabled'] = True
|
||||
wf_params['parameters'][
|
||||
'service_params'] = (wf_srv_params)
|
||||
hm_data = {
|
||||
"admin_state_up": True,
|
||||
"id": hm['healthmonitor']['id'],
|
||||
"type": "HTTP", "delay": 1,
|
||||
"timeout": 1,
|
||||
"max_retries": 1,
|
||||
"admin_state_up": True,
|
||||
"url_path": "/", "http_method": "GET",
|
||||
"expected_codes": '200'}
|
||||
sp_data = {
|
||||
"type": "APP_COOKIE",
|
||||
"cookie_name": "sessionId"}
|
||||
m_data = {
|
||||
"id": member['member']['id'],
|
||||
"address": "20.0.1.10",
|
||||
"protocol_port": 80,
|
||||
"weight": 1, "admin_state_up": True,
|
||||
"subnet": "20.0.1.10",
|
||||
"mask": "255.255.255.255",
|
||||
"gw": "20.0.0.1",
|
||||
"admin_state_up": True}
|
||||
wf_apply_full_params = {'parameters': {
|
||||
'listeners': [{
|
||||
"admin_state_up": True,
|
||||
"protocol_port": 80,
|
||||
"protocol": "HTTP",
|
||||
"connection_limit": -1,
|
||||
"admin_state_up": True,
|
||||
"default_pool": {
|
||||
"protocol": "HTTP",
|
||||
"lb_algorithm":
|
||||
"ROUND_ROBIN",
|
||||
"admin_state_up": True,
|
||||
"healthmonitor": hm_data,
|
||||
"sessionpersistence":
|
||||
sp_data,
|
||||
"members": [m_data]}}],
|
||||
"admin_state_up": True,
|
||||
"pip_address": "20.0.0.2",
|
||||
"vip_address": "10.0.0.2"}}
|
||||
calls = [
|
||||
mock.call(
|
||||
'GET',
|
||||
'/api/workflow/LB_' + lb_id,
|
||||
None, None),
|
||||
mock.call(
|
||||
'POST', '/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id,
|
||||
wf_params,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call(
|
||||
'POST', '/api/workflow/LB_' +
|
||||
lb_id + '/action/apply',
|
||||
wf_apply_full_params,
|
||||
v2_driver.TEMPLATE_HEADER),
|
||||
mock.call('GET', 'some_uri',
|
||||
None, None)]
|
||||
self.driver_rest_call_mock.\
|
||||
assert_has_calls(
|
||||
calls, any_order=True)
|
||||
|
||||
|
||||
class TestLBaaSDriverDebugOptions(TestLBaaSDriverBase):
|
||||
def setUp(self):
|
||||
cfg.CONF.set_override('configure_l3', False,
|
||||
group='radwarev2_debug')
|
||||
cfg.CONF.set_override('configure_l4', False,
|
||||
group='radwarev2_debug')
|
||||
super(TestLBaaSDriverDebugOptions, self).setUp()
|
||||
|
||||
templates_to_return = [{'name': self.driver.workflow_template_name}]
|
||||
for t in self.driver.child_workflow_template_names:
|
||||
templates_to_return.append({'name': t})
|
||||
rest_call_function_mock.__dict__.update(
|
||||
{'RESPOND_WITH_ERROR': False, 'WORKFLOW_MISSING': True,
|
||||
'WORKFLOW_TEMPLATE_MISSING': True,
|
||||
'RESPOND_WITH_SERVER_DOWN': 200,
|
||||
'WF_TEMPLATES_TO_RETURN': templates_to_return})
|
||||
|
||||
self.operation_completer_start_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.operation_completer_join_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.driver_rest_call_mock = mock.Mock(
|
||||
side_effect=rest_call_function_mock)
|
||||
self.flip_servers_mock = mock.Mock(
|
||||
return_value=None)
|
||||
self.recover_mock = mock.Mock(
|
||||
side_effect=_recover_function_mock)
|
||||
|
||||
self.driver.completion_handler.start = (
|
||||
self.operation_completer_start_mock)
|
||||
self.driver.completion_handler.join = (
|
||||
self.operation_completer_join_mock)
|
||||
self.driver.rest_client.call = self.driver_rest_call_mock
|
||||
self.driver.rest_client._call = self.driver_rest_call_mock
|
||||
self.driver.completion_handler.rest_client.call = (
|
||||
self.driver_rest_call_mock)
|
||||
|
||||
self.driver.queue = QueueMock(
|
||||
self.driver.completion_handler.handle_operation_completion)
|
||||
|
||||
def test_debug_options(self):
|
||||
with self.subnet(cidr='10.0.0.0/24') as s:
|
||||
with self.loadbalancer(subnet=s) as lb:
|
||||
lb_id = lb['loadbalancer']['id']
|
||||
with self.listener(loadbalancer_id=lb_id) as l:
|
||||
with self.pool(
|
||||
protocol='HTTP',
|
||||
listener_id=l['listener']['id']) as p:
|
||||
with self.member(
|
||||
pool_id=p['pool']['id'],
|
||||
subnet=s, address='10.0.1.10'):
|
||||
wf_srv_params = copy.deepcopy(WF_SRV_PARAMS)
|
||||
wf_params = copy.deepcopy(WF_CREATE_PARAMS)
|
||||
|
||||
wf_srv_params['name'] = 'srv_' + (
|
||||
s['subnet']['network_id'])
|
||||
wf_srv_params['tenantId'] = self._tenant_id
|
||||
wf_srv_params['primary']['network'][
|
||||
'portgroups'] = [s['subnet'][
|
||||
'network_id']]
|
||||
wf_params['parameters']['service_params'] = (
|
||||
wf_srv_params)
|
||||
wf_params['parameters']['configure_l3'] = False
|
||||
wf_params['parameters']['configure_l4'] = False
|
||||
calls = [
|
||||
mock.call('GET', '/api/workflow/LB_' + lb_id,
|
||||
None, None),
|
||||
mock.call(
|
||||
'POST',
|
||||
'/api/workflowTemplate/' +
|
||||
'os_lb_v2?name=LB_' + lb_id,
|
||||
wf_params,
|
||||
v2_driver.TEMPLATE_HEADER)
|
||||
]
|
||||
self.driver_rest_call_mock.assert_has_calls(
|
||||
calls, any_order=True)
|
Loading…
Reference in New Issue