amphorav1 removal

Removing the amphorav1 provider, it was deprecated in Zed and can now be
removed in Bobcat 2023.2.

Change-Id: I2ecfc0f40549d80b3058b76c619ff4ef35aadb97
This commit is contained in:
Gregory Thiemonge 2023-03-01 15:46:38 +01:00
parent 1794a61899
commit 6c0515c988
66 changed files with 29 additions and 20481 deletions

View File

@ -27,8 +27,6 @@ sys.path.insert(0, os.path.abspath('.'))
from tools import create_flow_docs
# Generate our flow diagrams
create_flow_docs.generate(
'tools/flow-list.txt', 'doc/source/contributor/devref/flow_diagrams')
create_flow_docs.generate(
'tools/flow-list-v2.txt', 'doc/source/contributor/devref/flow_diagrams_v2')

View File

@ -8,18 +8,6 @@ controller needs to take while managing load balancers.
This document is meant as a reference for the key flows used in the
Octavia controller.
.. toctree::
:maxdepth: 1
flow_diagrams/AmphoraFlows.rst
flow_diagrams/HealthMonitorFlows.rst
flow_diagrams/L7PolicyFlows.rst
flow_diagrams/L7RuleFlows.rst
flow_diagrams/ListenerFlows.rst
flow_diagrams/LoadBalancerFlows.rst
flow_diagrams/MemberFlows.rst
flow_diagrams/PoolFlows.rst
The following are flow diagrams for the **amphora V2** driver.
.. toctree::

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,547 +0,0 @@
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jsonschema import exceptions as js_exceptions
from jsonschema import validate
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from stevedore import driver as stevedore_driver
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from octavia_lib.api.drivers import provider_base as driver_base
from octavia_lib.common import constants as lib_consts
from octavia.api.drivers.amphora_driver import availability_zone_schema
from octavia.api.drivers.amphora_driver import flavor_schema
from octavia.api.drivers import utils as driver_utils
from octavia.common import constants as consts
from octavia.common import data_models
from octavia.common import rpc
from octavia.common import utils
from octavia.db import api as db_apis
from octavia.db import repositories
from octavia.network import base as network_base
CONF = cfg.CONF
CONF.import_group('oslo_messaging', 'octavia.common.config')
LOG = logging.getLogger(__name__)
AMPHORA_SUPPORTED_LB_ALGORITHMS = [
consts.LB_ALGORITHM_ROUND_ROBIN,
consts.LB_ALGORITHM_SOURCE_IP,
consts.LB_ALGORITHM_LEAST_CONNECTIONS]
AMPHORA_SUPPORTED_PROTOCOLS = [
lib_consts.PROTOCOL_TCP,
lib_consts.PROTOCOL_HTTP,
lib_consts.PROTOCOL_HTTPS,
lib_consts.PROTOCOL_TERMINATED_HTTPS,
lib_consts.PROTOCOL_PROXY,
lib_consts.PROTOCOL_PROXYV2,
lib_consts.PROTOCOL_UDP,
lib_consts.PROTOCOL_SCTP,
lib_consts.PROTOCOL_PROMETHEUS,
]
VALID_L7POLICY_LISTENER_PROTOCOLS = [
lib_consts.PROTOCOL_HTTP,
lib_consts.PROTOCOL_TERMINATED_HTTPS
]
class AmphoraProviderDriver(driver_base.ProviderDriver):
def __init__(self):
super().__init__()
topic = cfg.CONF.oslo_messaging.topic
self.target = messaging.Target(
namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT,
topic=topic, version="1.0", fanout=False)
self.client = rpc.get_client(self.target)
self.repositories = repositories.Repositories()
def _validate_pool_algorithm(self, pool):
if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS:
msg = ('Amphora provider does not support %s algorithm.'
% pool.lb_algorithm)
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
def _validate_listener_protocol(self, listener):
if listener.protocol not in AMPHORA_SUPPORTED_PROTOCOLS:
msg = ('Amphora provider does not support %s protocol. '
'Supported: %s'
% (listener.protocol,
", ".join(AMPHORA_SUPPORTED_PROTOCOLS)))
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
def _validate_alpn_protocols(self, obj):
if not obj.alpn_protocols:
return
supported = consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS
not_supported = set(obj.alpn_protocols) - set(supported)
if not_supported:
msg = ('Amphora provider does not support %s ALPN protocol(s). '
'Supported: %s'
% (", ".join(not_supported), ", ".join(supported)))
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
# Load Balancer
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary,
additional_vip_dicts=None):
if additional_vip_dicts:
msg = 'Amphora v1 driver does not support additional_vips.'
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary)
lb_obj = data_models.LoadBalancer(id=loadbalancer_id,
project_id=project_id, vip=vip_obj)
network_driver = utils.get_network_driver()
vip_network = network_driver.get_network(
vip_dictionary[lib_consts.VIP_NETWORK_ID])
if not vip_network.port_security_enabled:
message = "Port security must be enabled on the VIP network."
raise exceptions.DriverError(user_fault_string=message,
operator_fault_string=message)
try:
# allocated_vip returns (vip, add_vips), skipping the 2nd element
# as amphorav1 doesn't support add_vips
vip = network_driver.allocate_vip(lb_obj)[0]
except network_base.AllocateVIPException as e:
message = str(e)
if getattr(e, 'orig_msg', None) is not None:
message = e.orig_msg
raise exceptions.DriverError(user_fault_string=message,
operator_fault_string=message)
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
vip.port_id, loadbalancer_id)
return driver_utils.vip_dict_to_provider_dict(vip.to_dict()), []
# TODO(johnsom) convert this to octavia_lib constant flavor
# once octavia is transitioned to use octavia_lib
def loadbalancer_create(self, loadbalancer):
if loadbalancer.flavor == driver_dm.Unset:
loadbalancer.flavor = None
if loadbalancer.availability_zone == driver_dm.Unset:
loadbalancer.availability_zone = None
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
consts.FLAVOR: loadbalancer.flavor,
consts.AVAILABILITY_ZONE: loadbalancer.availability_zone}
self.client.cast({}, 'create_load_balancer', **payload)
def loadbalancer_delete(self, loadbalancer, cascade=False):
loadbalancer_id = loadbalancer.loadbalancer_id
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id,
'cascade': cascade}
self.client.cast({}, 'delete_load_balancer', **payload)
def loadbalancer_failover(self, loadbalancer_id):
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id}
self.client.cast({}, 'failover_load_balancer', **payload)
def loadbalancer_update(self, old_loadbalancer, new_loadbalancer):
# Adapt the provider data model to the queue schema
lb_dict = new_loadbalancer.to_dict()
if 'admin_state_up' in lb_dict:
lb_dict['enabled'] = lb_dict.pop('admin_state_up')
lb_id = lb_dict.pop('loadbalancer_id')
# Put the qos_policy_id back under the vip element the controller
# expects
vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None)
if vip_qos_policy_id:
vip_dict = {"qos_policy_id": vip_qos_policy_id}
lb_dict["vip"] = vip_dict
payload = {consts.LOAD_BALANCER_ID: lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
self.client.cast({}, 'update_load_balancer', **payload)
# Listener
def listener_create(self, listener):
self._validate_listener_protocol(listener)
self._validate_alpn_protocols(listener)
payload = {consts.LISTENER_ID: listener.listener_id}
self.client.cast({}, 'create_listener', **payload)
def listener_delete(self, listener):
listener_id = listener.listener_id
payload = {consts.LISTENER_ID: listener_id}
self.client.cast({}, 'delete_listener', **payload)
def listener_update(self, old_listener, new_listener):
self._validate_alpn_protocols(new_listener)
listener_dict = new_listener.to_dict()
if 'admin_state_up' in listener_dict:
listener_dict['enabled'] = listener_dict.pop('admin_state_up')
listener_id = listener_dict.pop('listener_id')
if 'client_ca_tls_container_ref' in listener_dict:
listener_dict['client_ca_tls_container_id'] = listener_dict.pop(
'client_ca_tls_container_ref')
listener_dict.pop('client_ca_tls_container_data', None)
if 'client_crl_container_ref' in listener_dict:
listener_dict['client_crl_container_id'] = listener_dict.pop(
'client_crl_container_ref')
listener_dict.pop('client_crl_container_data', None)
payload = {consts.LISTENER_ID: listener_id,
consts.LISTENER_UPDATES: listener_dict}
self.client.cast({}, 'update_listener', **payload)
# Pool
def pool_create(self, pool):
self._validate_pool_algorithm(pool)
self._validate_alpn_protocols(pool)
payload = {consts.POOL_ID: pool.pool_id}
self.client.cast({}, 'create_pool', **payload)
def pool_delete(self, pool):
pool_id = pool.pool_id
payload = {consts.POOL_ID: pool_id}
self.client.cast({}, 'delete_pool', **payload)
def pool_update(self, old_pool, new_pool):
self._validate_alpn_protocols(new_pool)
if new_pool.lb_algorithm:
self._validate_pool_algorithm(new_pool)
pool_dict = new_pool.to_dict()
if 'admin_state_up' in pool_dict:
pool_dict['enabled'] = pool_dict.pop('admin_state_up')
pool_id = pool_dict.pop('pool_id')
if 'tls_container_ref' in pool_dict:
pool_dict['tls_certificate_id'] = pool_dict.pop(
'tls_container_ref')
pool_dict.pop('tls_container_data', None)
if 'ca_tls_container_ref' in pool_dict:
pool_dict['ca_tls_certificate_id'] = pool_dict.pop(
'ca_tls_container_ref')
pool_dict.pop('ca_tls_container_data', None)
if 'crl_container_ref' in pool_dict:
pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref')
pool_dict.pop('crl_container_data', None)
payload = {consts.POOL_ID: pool_id,
consts.POOL_UPDATES: pool_dict}
self.client.cast({}, 'update_pool', **payload)
# Member
def member_create(self, member):
pool_id = member.pool_id
db_pool = self.repositories.pool.get(db_apis.get_session(),
id=pool_id)
self._validate_members(db_pool, [member])
payload = {consts.MEMBER_ID: member.member_id}
self.client.cast({}, 'create_member', **payload)
def member_delete(self, member):
member_id = member.member_id
payload = {consts.MEMBER_ID: member_id}
self.client.cast({}, 'delete_member', **payload)
def member_update(self, old_member, new_member):
member_dict = new_member.to_dict()
if 'admin_state_up' in member_dict:
member_dict['enabled'] = member_dict.pop('admin_state_up')
member_id = member_dict.pop('member_id')
payload = {consts.MEMBER_ID: member_id,
consts.MEMBER_UPDATES: member_dict}
self.client.cast({}, 'update_member', **payload)
def member_batch_update(self, pool_id, members):
# The DB should not have updated yet, so we can still use the pool
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
self._validate_members(db_pool, members)
old_members = db_pool.members
old_member_ids = [m.id for m in old_members]
# The driver will always pass objects with IDs.
new_member_ids = [m.member_id for m in members]
# Find members that are brand new or updated
new_members = []
updated_members = []
for m in members:
if m.member_id not in old_member_ids:
new_members.append(m)
else:
member_dict = m.to_dict(render_unsets=False)
member_dict['id'] = member_dict.pop('member_id')
if 'address' in member_dict:
member_dict['ip_address'] = member_dict.pop('address')
if 'admin_state_up' in member_dict:
member_dict['enabled'] = member_dict.pop('admin_state_up')
updated_members.append(member_dict)
# Find members that are deleted
deleted_members = []
for m in old_members:
if m.id not in new_member_ids:
deleted_members.append(m)
payload = {'old_member_ids': [m.id for m in deleted_members],
'new_member_ids': [m.member_id for m in new_members],
'updated_members': updated_members}
self.client.cast({}, 'batch_update_members', **payload)
def _validate_members(self, db_pool, members):
if db_pool.protocol in consts.LVS_PROTOCOLS:
# For SCTP/UDP LBs, check that we are not mixing IPv4 and IPv6
for member in members:
member_is_ipv6 = utils.is_ipv6(member.address)
for listener in db_pool.listeners:
lb = listener.load_balancer
vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address)
if member_is_ipv6 != vip_is_ipv6:
msg = ("This provider doesn't support mixing IPv4 and "
"IPv6 addresses for its VIP and members in {} "
"load balancers.".format(db_pool.protocol))
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
# Health Monitor
def health_monitor_create(self, healthmonitor):
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
self.client.cast({}, 'create_health_monitor', **payload)
def health_monitor_delete(self, healthmonitor):
healthmonitor_id = healthmonitor.healthmonitor_id
payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id}
self.client.cast({}, 'delete_health_monitor', **payload)
def health_monitor_update(self, old_healthmonitor, new_healthmonitor):
healthmon_dict = new_healthmonitor.to_dict()
if 'admin_state_up' in healthmon_dict:
healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up')
if 'max_retries_down' in healthmon_dict:
healthmon_dict['fall_threshold'] = healthmon_dict.pop(
'max_retries_down')
if 'max_retries' in healthmon_dict:
healthmon_dict['rise_threshold'] = healthmon_dict.pop(
'max_retries')
healthmon_id = healthmon_dict.pop('healthmonitor_id')
payload = {consts.HEALTH_MONITOR_ID: healthmon_id,
consts.HEALTH_MONITOR_UPDATES: healthmon_dict}
self.client.cast({}, 'update_health_monitor', **payload)
# L7 Policy
def l7policy_create(self, l7policy):
db_listener = self.repositories.listener.get(db_apis.get_session(),
id=l7policy.listener_id)
if db_listener.protocol not in VALID_L7POLICY_LISTENER_PROTOCOLS:
msg = ('%s protocol listeners do not support L7 policies' % (
db_listener.protocol))
raise exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
payload = {consts.L7POLICY_ID: l7policy.l7policy_id}
self.client.cast({}, 'create_l7policy', **payload)
def l7policy_delete(self, l7policy):
l7policy_id = l7policy.l7policy_id
payload = {consts.L7POLICY_ID: l7policy_id}
self.client.cast({}, 'delete_l7policy', **payload)
def l7policy_update(self, old_l7policy, new_l7policy):
l7policy_dict = new_l7policy.to_dict()
if 'admin_state_up' in l7policy_dict:
l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up')
l7policy_id = l7policy_dict.pop('l7policy_id')
payload = {consts.L7POLICY_ID: l7policy_id,
consts.L7POLICY_UPDATES: l7policy_dict}
self.client.cast({}, 'update_l7policy', **payload)
# L7 Rule
def l7rule_create(self, l7rule):
payload = {consts.L7RULE_ID: l7rule.l7rule_id}
self.client.cast({}, 'create_l7rule', **payload)
def l7rule_delete(self, l7rule):
l7rule_id = l7rule.l7rule_id
payload = {consts.L7RULE_ID: l7rule_id}
self.client.cast({}, 'delete_l7rule', **payload)
def l7rule_update(self, old_l7rule, new_l7rule):
l7rule_dict = new_l7rule.to_dict()
if 'admin_state_up' in l7rule_dict:
l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up')
l7rule_id = l7rule_dict.pop('l7rule_id')
payload = {consts.L7RULE_ID: l7rule_id,
consts.L7RULE_UPDATES: l7rule_dict}
self.client.cast({}, 'update_l7rule', **payload)
# Flavor
def get_supported_flavor_metadata(self):
"""Returns the valid flavor metadata keys and descriptions.
This extracts the valid flavor metadata keys and descriptions
from the JSON validation schema and returns it as a dictionary.
:return: Dictionary of flavor metadata keys and descriptions.
:raises DriverError: An unexpected error occurred.
"""
try:
props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties']
return {k: v.get('description', '') for k, v in props.items()}
except Exception as e:
raise exceptions.DriverError(
user_fault_string='Failed to get the supported flavor '
'metadata due to: {}'.format(str(e)),
operator_fault_string='Failed to get the supported flavor '
'metadata due to: {}'.format(str(e)))
def validate_flavor(self, flavor_dict):
"""Validates flavor profile data.
This will validate a flavor profile dataset against the flavor
settings the amphora driver supports.
:param flavor_dict: The flavor dictionary to validate.
:type flavor: dict
:return: None
:raises DriverError: An unexpected error occurred.
:raises UnsupportedOptionError: If the driver does not support
one of the flavor settings.
"""
try:
validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA)
except js_exceptions.ValidationError as e:
error_object = ''
if e.relative_path:
error_object = '{} '.format(e.relative_path[0])
raise exceptions.UnsupportedOptionError(
user_fault_string='{0}{1}'.format(error_object, e.message),
operator_fault_string=str(e))
except Exception as e:
raise exceptions.DriverError(
user_fault_string='Failed to validate the flavor metadata '
'due to: {}'.format(str(e)),
operator_fault_string='Failed to validate the flavor metadata '
'due to: {}'.format(str(e)))
compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None)
if compute_flavor:
compute_driver = stevedore_driver.DriverManager(
namespace='octavia.compute.drivers',
name=CONF.controller_worker.compute_driver,
invoke_on_load=True
).driver
# TODO(johnsom) Fix this to raise a NotFound error
# when the octavia-lib supports it.
compute_driver.validate_flavor(compute_flavor)
amp_image_tag = flavor_dict.get(consts.AMP_IMAGE_TAG, None)
if amp_image_tag:
image_driver = stevedore_driver.DriverManager(
namespace='octavia.image.drivers',
name=CONF.controller_worker.image_driver,
invoke_on_load=True
).driver
try:
image_driver.get_image_id_by_tag(
amp_image_tag, CONF.controller_worker.amp_image_owner_id)
except Exception as e:
raise exceptions.NotFound(
user_fault_string='Failed to find an image with tag {} '
'due to: {}'.format(
amp_image_tag, str(e)),
operator_fault_string='Failed to find an image with tag '
'{} due to: {}'.format(
amp_image_tag, str(e)))
# Availability Zone
def get_supported_availability_zone_metadata(self):
"""Returns the valid availability zone metadata keys and descriptions.
This extracts the valid availability zone metadata keys and
descriptions from the JSON validation schema and returns it as a
dictionary.
:return: Dictionary of availability zone metadata keys and descriptions
:raises DriverError: An unexpected error occurred.
"""
try:
props = (
availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[
'properties'])
return {k: v.get('description', '') for k, v in props.items()}
except Exception as e:
raise exceptions.DriverError(
user_fault_string='Failed to get the supported availability '
'zone metadata due to: {}'.format(str(e)),
operator_fault_string='Failed to get the supported '
'availability zone metadata due to: '
'{}'.format(str(e)))
def validate_availability_zone(self, availability_zone_dict):
"""Validates availability zone profile data.
This will validate an availability zone profile dataset against the
availability zone settings the amphora driver supports.
:param availability_zone_dict: The availability zone dict to validate.
:type availability_zone_dict: dict
:return: None
:raises DriverError: An unexpected error occurred.
:raises UnsupportedOptionError: If the driver does not support
one of the availability zone settings.
"""
try:
validate(
availability_zone_dict,
availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA)
except js_exceptions.ValidationError as e:
error_object = ''
if e.relative_path:
error_object = '{} '.format(e.relative_path[0])
raise exceptions.UnsupportedOptionError(
user_fault_string='{0}{1}'.format(error_object, e.message),
operator_fault_string=str(e))
except Exception as e:
raise exceptions.DriverError(
user_fault_string='Failed to validate the availability zone '
'metadata due to: {}'.format(str(e)),
operator_fault_string='Failed to validate the availability '
'zone metadata due to: {}'.format(str(e))
)
compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None)
if compute_zone:
compute_driver = stevedore_driver.DriverManager(
namespace='octavia.compute.drivers',
name=CONF.controller_worker.compute_driver,
invoke_on_load=True
).driver
# TODO(johnsom) Fix this to raise a NotFound error
# when the octavia-lib supports it.
compute_driver.validate_availability_zone(compute_zone)

View File

@ -124,12 +124,8 @@ class FailoverController(base.BaseController):
def __init__(self, amp_id):
super().__init__()
if CONF.api_settings.default_provider_driver == constants.AMPHORAV1:
topic = cfg.CONF.oslo_messaging.topic
version = "1.0"
else:
topic = constants.TOPIC_AMPHORA_V2
version = "2.0"
topic = constants.TOPIC_AMPHORA_V2
version = "2.0"
self.target = messaging.Target(
namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT,
topic=topic, version=version, fanout=False)
@ -170,12 +166,8 @@ class AmphoraUpdateController(base.BaseController):
def __init__(self, amp_id):
super().__init__()
if CONF.api_settings.default_provider_driver == constants.AMPHORAV1:
topic = cfg.CONF.oslo_messaging.topic
version = "1.0"
else:
topic = constants.TOPIC_AMPHORA_V2
version = "2.0"
topic = constants.TOPIC_AMPHORA_V2
version = "2.0"
self.target = messaging.Target(
namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT,
topic=topic, version=version, fanout=False)

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_reports import guru_meditation_report as gmr
from octavia.common import service as octavia_service
from octavia.controller.queue.v1 import consumer as consumer_v1
from octavia.controller.queue.v2 import consumer as consumer_v2
from octavia import version
@ -33,8 +32,6 @@ def main():
gmr.TextGuruMeditation.setup_autorun(version)
sm = cotyledon.ServiceManager()
sm.add(consumer_v1.ConsumerService, workers=CONF.controller_worker.workers,
args=(CONF,))
sm.add(consumer_v2.ConsumerService,
workers=CONF.controller_worker.workers, args=(CONF,))
oslo_config_glue.setup(sm, CONF, reload_method="mutate")

View File

@ -796,8 +796,6 @@ RBAC_ROLES_DEPRECATED_REASON = (
# PROVIDERS
OCTAVIA = 'octavia'
AMPHORAV2 = 'amphorav2'
# Deprecated in Z, to be removed
AMPHORAV1 = 'amphorav1'
# systemctl commands
DISABLE = 'disable'

View File

@ -23,7 +23,6 @@ from oslo_log import log as logging
from oslo_utils import excutils
from octavia.common import constants
from octavia.controller.worker.v1 import controller_worker as cw1
from octavia.controller.worker.v2 import controller_worker as cw2
from octavia.db import api as db_api
from octavia.db import repositories as repo
@ -58,10 +57,7 @@ def update_stats_on_done(stats, fut):
class HealthManager(object):
def __init__(self, exit_event):
if CONF.api_settings.default_provider_driver == constants.AMPHORAV1:
self.cw = cw1.ControllerWorker()
else:
self.cw = cw2.ControllerWorker()
self.cw = cw2.ControllerWorker()
self.threads = CONF.health_manager.failover_threads
self.executor = futures.ThreadPoolExecutor(max_workers=self.threads)
self.amp_repo = repo.AmphoraRepository()

View File

@ -19,8 +19,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy.orm import exc as sqlalchemy_exceptions
from octavia.common import constants
from octavia.controller.worker.v1 import controller_worker as cw1
from octavia.controller.worker.v2 import controller_worker as cw2
from octavia.db import api as db_api
from octavia.db import repositories as repo
@ -78,10 +76,7 @@ class DatabaseCleanup(object):
class CertRotation(object):
def __init__(self):
self.threads = CONF.house_keeping.cert_rotate_threads
if CONF.api_settings.default_provider_driver == constants.AMPHORAV1:
self.cw = cw1.ControllerWorker()
else:
self.cw = cw2.ControllerWorker()
self.cw = cw2.ControllerWorker()
def rotate(self):
"""Check the amphora db table for expiring auth certs."""

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,64 +0,0 @@
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cotyledon
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from octavia.common import rpc
from octavia.controller.queue.v1 import endpoints
LOG = logging.getLogger(__name__)
class ConsumerService(cotyledon.Service):
def __init__(self, worker_id, conf):
super().__init__(worker_id)
self.conf = conf
self.topic = conf.oslo_messaging.topic
self.server = conf.host
self.endpoints = []
self.access_policy = dispatcher.DefaultRPCAccessPolicy
self.message_listener = None
def run(self):
LOG.info('Starting consumer...')
target = messaging.Target(topic=self.topic, server=self.server,
fanout=False)
self.endpoints = [endpoints.Endpoints()]
self.message_listener = rpc.get_server(
target, self.endpoints,
executor='threading',
access_policy=self.access_policy
)
self.message_listener.start()
def terminate(self):
if self.message_listener:
LOG.info('Stopping consumer...')
self.message_listener.stop()
LOG.info('Consumer successfully stopped. Waiting for final '
'messages to be processed...')
self.message_listener.wait()
if self.endpoints:
LOG.info('Shutting down endpoint worker executors...')
for e in self.endpoints:
try:
e.worker.executor.shutdown()
except AttributeError:
pass
super().terminate()

View File

@ -1,160 +0,0 @@
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from stevedore import driver as stevedore_driver
from octavia.common import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Endpoints(object):
# API version history:
# 1.0 - Initial version.
target = messaging.Target(
namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT,
version='1.1')
def __init__(self):
self.worker = stevedore_driver.DriverManager(
namespace='octavia.plugins',
name=CONF.octavia_plugins,
invoke_on_load=True
).driver
def create_load_balancer(self, context, load_balancer_id,
flavor=None, availability_zone=None):
LOG.info('Creating load balancer \'%s\'...', load_balancer_id)
self.worker.create_load_balancer(load_balancer_id, flavor,
availability_zone)
def update_load_balancer(self, context, load_balancer_id,
load_balancer_updates):
LOG.info('Updating load balancer \'%s\'...', load_balancer_id)
self.worker.update_load_balancer(load_balancer_id,
load_balancer_updates)
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
LOG.info('Deleting load balancer \'%s\'...', load_balancer_id)
self.worker.delete_load_balancer(load_balancer_id, cascade)
def failover_load_balancer(self, context, load_balancer_id):
LOG.info('Failing over amphora in load balancer \'%s\'...',
load_balancer_id)
self.worker.failover_loadbalancer(load_balancer_id)
def failover_amphora(self, context, amphora_id):
LOG.info('Failing over amphora \'%s\'...',
amphora_id)
self.worker.failover_amphora(amphora_id)
def create_listener(self, context, listener_id):
LOG.info('Creating listener \'%s\'...', listener_id)
self.worker.create_listener(listener_id)
def update_listener(self, context, listener_id, listener_updates):
LOG.info('Updating listener \'%s\'...', listener_id)
self.worker.update_listener(listener_id, listener_updates)
def delete_listener(self, context, listener_id):
LOG.info('Deleting listener \'%s\'...', listener_id)
self.worker.delete_listener(listener_id)
def create_pool(self, context, pool_id):
LOG.info('Creating pool \'%s\'...', pool_id)
self.worker.create_pool(pool_id)
def update_pool(self, context, pool_id, pool_updates):
LOG.info('Updating pool \'%s\'...', pool_id)
self.worker.update_pool(pool_id, pool_updates)
def delete_pool(self, context, pool_id):
LOG.info('Deleting pool \'%s\'...', pool_id)
self.worker.delete_pool(pool_id)
def create_health_monitor(self, context, health_monitor_id):
LOG.info('Creating health monitor \'%s\'...', health_monitor_id)
self.worker.create_health_monitor(health_monitor_id)
def update_health_monitor(self, context, health_monitor_id,
health_monitor_updates):
LOG.info('Updating health monitor \'%s\'...', health_monitor_id)
self.worker.update_health_monitor(health_monitor_id,
health_monitor_updates)
def delete_health_monitor(self, context, health_monitor_id):
LOG.info('Deleting health monitor \'%s\'...', health_monitor_id)
self.worker.delete_health_monitor(health_monitor_id)
def create_member(self, context, member_id):
LOG.info('Creating member \'%s\'...', member_id)
self.worker.create_member(member_id)
def update_member(self, context, member_id, member_updates):
LOG.info('Updating member \'%s\'...', member_id)
self.worker.update_member(member_id, member_updates)
def batch_update_members(self, context, old_member_ids, new_member_ids,
updated_members):
updated_member_ids = [m.get('id') for m in updated_members]
LOG.info(
'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', '
'updated=\'%(updated)s\'...',
{'old': old_member_ids, 'new': new_member_ids,
'updated': updated_member_ids})
self.worker.batch_update_members(
old_member_ids, new_member_ids, updated_members)
def delete_member(self, context, member_id):
LOG.info('Deleting member \'%s\'...', member_id)
self.worker.delete_member(member_id)
def create_l7policy(self, context, l7policy_id):
LOG.info('Creating l7policy \'%s\'...', l7policy_id)
self.worker.create_l7policy(l7policy_id)
def update_l7policy(self, context, l7policy_id, l7policy_updates):
LOG.info('Updating l7policy \'%s\'...', l7policy_id)
self.worker.update_l7policy(l7policy_id, l7policy_updates)
def delete_l7policy(self, context, l7policy_id):
LOG.info('Deleting l7policy \'%s\'...', l7policy_id)
self.worker.delete_l7policy(l7policy_id)
def create_l7rule(self, context, l7rule_id):
LOG.info('Creating l7rule \'%s\'...', l7rule_id)
self.worker.create_l7rule(l7rule_id)
def update_l7rule(self, context, l7rule_id, l7rule_updates):
LOG.info('Updating l7rule \'%s\'...', l7rule_id)
self.worker.update_l7rule(l7rule_id, l7rule_updates)
def delete_l7rule(self, context, l7rule_id):
LOG.info('Deleting l7rule \'%s\'...', l7rule_id)
self.worker.delete_l7rule(l7rule_id)
def update_amphora_agent_config(self, context, amphora_id):
LOG.info('Updating amphora \'%s\' agent configuration...',
amphora_id)
self.worker.update_amphora_agent_config(amphora_id)
def delete_amphora(self, context, amphora_id):
LOG.info('Deleting amphora \'%s\'...', amphora_id)
self.worker.delete_amphora(amphora_id)

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,610 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import cert_task
from octavia.controller.worker.v1.tasks import compute_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import network_tasks
from octavia.controller.worker.v1.tasks import retry_tasks
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class AmphoraFlows(object):
def get_create_amphora_flow(self):
"""Creates a flow to create an amphora.
:returns: The flow for creating the amphora
"""
create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW)
create_amphora_flow.add(database_tasks.CreateAmphoraInDB(
provides=constants.AMPHORA_ID))
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
requires=constants.AMPHORA_ID))
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
provides=constants.SERVER_PEM))
create_amphora_flow.add(
database_tasks.UpdateAmphoraDBCertExpiration(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
create_amphora_flow.add(compute_tasks.CertComputeCreate(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR, constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID),
provides=constants.COMPUTE_OBJ))
create_amphora_flow.add(database_tasks.UpdateAmphoraInfo(
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
provides=constants.AMPHORA))
create_amphora_flow.add(
amphora_driver_tasks.AmphoraComputeConnectivityWait(
requires=constants.AMPHORA))
create_amphora_flow.add(database_tasks.ReloadAmphora(
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize(
requires=constants.AMPHORA))
create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB(
requires=constants.AMPHORA))
return create_amphora_flow
def _get_post_map_lb_subflow(self, prefix, role):
"""Set amphora type after mapped to lb."""
sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW
post_map_amp_to_lb = linear_flow.Flow(
sf_name)
post_map_amp_to_lb.add(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate(
name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK,
requires=(constants.AMPHORA, constants.FLAVOR)))
if role == constants.ROLE_MASTER:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB(
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_BACKUP:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB(
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_STANDALONE:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB(
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
return post_map_amp_to_lb
def _get_create_amp_for_lb_subflow(self, prefix, role):
"""Create a new amphora for lb."""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
create_amp_for_lb_subflow = linear_flow.Flow(sf_name)
create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB(
name=sf_name + '-' + constants.CREATE_AMPHORA_INDB,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA_ID))
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
provides=constants.SERVER_PEM))
create_amp_for_lb_subflow.add(
database_tasks.UpdateAmphoraDBCertExpiration(
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR, constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB,
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait(
name=sf_name + '-' + constants.COMPUTE_WAIT,
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID,
constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_OBJ))
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo(
name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO,
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
provides=constants.AMPHORA))
create_amp_for_lb_subflow.add(
amphora_driver_tasks.AmphoraComputeConnectivityWait(
name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT,
requires=constants.AMPHORA))
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
requires=constants.AMPHORA))
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraAllocatedInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
if role == constants.ROLE_MASTER:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB(
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_BACKUP:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB(
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_STANDALONE:
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraStandAloneInDB(
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
return create_amp_for_lb_subflow
def get_amphora_for_lb_subflow(
self, prefix, role=constants.ROLE_STANDALONE):
return self._get_create_amp_for_lb_subflow(prefix, role)
def get_delete_amphora_flow(
self, amphora,
retry_attempts=CONF.controller_worker.amphora_delete_retries,
retry_interval=(
CONF.controller_worker.amphora_delete_retry_interval)):
"""Creates a subflow to delete an amphora and it's port.
This flow is idempotent and safe to retry.
:param amphora: An amphora object.
:param retry_attempts: The number of times the flow is retried.
:param retry_interval: The time to wait, in seconds, between retries.
:returns: The subflow for deleting the amphora.
:raises AmphoraNotFound: The referenced Amphora was not found.
"""
delete_amphora_flow = linear_flow.Flow(
name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id,
retry=retry_tasks.SleepingRetryTimesController(
name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' +
amphora.id,
attempts=retry_attempts, interval=retry_interval))
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(
database_tasks.MarkAmphoraPendingDeleteInDB(
name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy(
name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(compute_tasks.ComputeDelete(
name=constants.DELETE_AMPHORA + '-' + amphora.id,
inject={constants.AMPHORA: amphora,
constants.PASSIVE_FAILURE: True}))
delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
if amphora.vrrp_port_id:
delete_amphora_flow.add(network_tasks.DeletePort(
name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' +
str(amphora.vrrp_port_id)),
inject={constants.PORT_ID: amphora.vrrp_port_id,
constants.PASSIVE_FAILURE: True}))
# TODO(johnsom) What about cleaning up any member ports?
# maybe we should get the list of attached ports prior to delete
# and call delete on them here. Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
return delete_amphora_flow
def get_vrrp_subflow(self, prefix, timeout_dict=None,
create_vrrp_group=True):
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
vrrp_subflow = linear_flow.Flow(sf_name)
# Optimization for failover flow. No reason to call this
# when configuring the secondary amphora.
if create_vrrp_group:
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
requires=constants.LOADBALANCER_ID))
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG))
# VRRP update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')
# We have three tasks to run in order, per amphora
amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-0-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-1-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
update_amps_subflow.add(amp_0_subflow)
update_amps_subflow.add(amp_1_subflow)
vrrp_subflow.add(update_amps_subflow)
return vrrp_subflow
def cert_rotate_amphora_flow(self):
"""Implement rotation for amphora's cert.
1. Create a new certificate
2. Upload the cert to amphora
3. update the newly created certificate info to amphora
4. update the cert_busy flag to be false after rotation
:returns: The flow for updating an amphora
"""
rotated_amphora_flow = linear_flow.Flow(
constants.CERT_ROTATE_AMPHORA_FLOW)
rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA))
# create a new certificate, the returned value is the newly created
# certificate
rotated_amphora_flow.add(cert_task.GenerateServerPEMTask(
provides=constants.SERVER_PEM))
# update it in amphora task
rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload(
requires=(constants.AMPHORA, constants.SERVER_PEM)))
# update the newly created certificate info to amphora
rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
# update the cert_busy flag to be false after rotation
rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse(
requires=constants.AMPHORA))
return rotated_amphora_flow
def update_amphora_config_flow(self):
"""Creates a flow to update the amphora agent configuration.
:returns: The flow for updating an amphora
"""
update_amphora_flow = linear_flow.Flow(
constants.UPDATE_AMPHORA_CONFIG_FLOW)
update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA))
update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate(
requires=(constants.AMPHORA, constants.FLAVOR)))
return update_amphora_flow
def get_amphora_for_lb_failover_subflow(
self, prefix, role=constants.ROLE_STANDALONE,
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False):
"""Creates a new amphora that will be used in a failover flow.
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
:provides: amphora_id, amphora
:param prefix: The flow name prefix to use on the flow and tasks.
:param role: The role this amphora will have in the topology.
:param failed_amp_vrrp_port_id: The base port ID of the failed amp.
:param is_vrrp_ipv6: True if the base port IP is IPv6.
:return: A Taskflow sub-flow that will create the amphora.
"""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW
amp_for_failover_flow = linear_flow.Flow(sf_name)
# Try to allocate or boot an amphora instance (unconfigured)
amp_for_failover_flow.add(self.get_amphora_for_lb_subflow(
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
role=role))
# Create the VIP base (aka VRRP) port for the amphora.
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
requires=(constants.VIP, constants.VIP_SG_ID,
constants.AMPHORA_ID),
provides=constants.BASE_PORT))
# Attach the VIP base (aka VRRP) port to the amphora.
amp_for_failover_flow.add(compute_tasks.AttachPort(
name=prefix + '-' + constants.ATTACH_PORT,
requires=(constants.AMPHORA, constants.PORT),
rebind={constants.PORT: constants.BASE_PORT}))
# Update the amphora database record with the VIP base port info.
amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails(
name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT)))
# Make sure the amphora in the flow storage is up to date
# or the vrrp_ip will be empty
amp_for_failover_flow.add(database_tasks.ReloadAmphora(
name=prefix + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID, provides=constants.AMPHORA))
# Update the amphora networking for the plugged VIP port
amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
provides=constants.AMPHORAE_NETWORK_CONFIG))
# Disable the base (vrrp) port on the failed amphora
# This prevents a DAD failure when bringing up the new amphora.
# Keepalived will handle this for act/stdby.
if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and
is_vrrp_ipv6):
amp_for_failover_flow.add(network_tasks.AdminDownPort(
name=prefix + '-' + constants.ADMIN_DOWN_PORT,
inject={constants.PORT_ID: failed_amp_vrrp_port_id}))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
requires=(constants.AMPHORA, constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
# Plug member ports
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.AVAILABILITY_ZONE),
provides=constants.DELTA))
amp_for_failover_flow.add(network_tasks.HandleNetworkDelta(
name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
requires=(constants.AMPHORA, constants.DELTA),
provides=constants.UPDATED_PORTS))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS)))
return amp_for_failover_flow
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
"""Get a Taskflow flow to failover an amphora.
1. Build a replacement amphora.
2. Delete the old amphora.
3. Update the amphorae listener configurations.
4. Update the VRRP configurations if needed.
:param failed_amphora: The amphora object to failover.
:param lb_amp_count: The number of amphora on this load balancer.
:returns: The flow that will provide the failover.
"""
failover_amp_flow = linear_flow.Flow(
constants.FAILOVER_AMPHORA_FLOW)
# Revert amphora to status ERROR if this flow goes wrong
failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if failed_amphora.role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amphora.role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amphora.id,
"load_balancer_id": failed_amphora.load_balancer_id,
"lb_network_ip": failed_amphora.lb_network_ip,
"compute_id": failed_amphora.compute_id,
"role": amp_role})
failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID(
requires=constants.LOADBALANCER_ID,
provides=constants.VIP_SG_ID))
is_vrrp_ipv6 = False
if failed_amphora.load_balancer_id:
if failed_amphora.vrrp_ip:
is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip)
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the
# compute service failing to boot an instance for other reasons.
# TODO(johnsom) Move this back out to run for spares after
# delete amphora API is available.
failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=failed_amphora.role,
failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id,
is_vrrp_ipv6=is_vrrp_ipv6))
failover_amp_flow.add(
self.get_delete_amphora_flow(
failed_amphora,
retry_attempts=CONF.controller_worker.amphora_delete_retries,
retry_interval=(
CONF.controller_worker.amphora_delete_retry_interval)))
failover_amp_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if not failed_amphora.load_balancer_id:
# This is an unallocated amphora (bogus), we are done.
return failover_amp_flow
failover_amp_flow.add(database_tasks.GetLoadBalancer(
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora.load_balancer_id},
provides=constants.LOADBALANCER))
failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
name=constants.GET_AMPHORAE_FROM_LB,
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora.load_balancer_id},
provides=constants.AMPHORAE))
# Setup timeouts for our requests to the amphorae
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_retry_interval}
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
for amp_index in range(0, lb_amp_count):
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(update_amps_subflow)
# Configure and enable keepalived in the amphora
if lb_amp_count == 2:
failover_amp_flow.add(
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
timeout_dict, create_vrrp_group=False))
# Reload the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "U" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "W" cycle
reload_listener_subflow = unordered_flow.Flow(
constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)
for amp_index in range(0, lb_amp_count):
reload_listener_subflow.add(
amphora_driver_tasks.AmphoraIndexListenersReload(
name=(str(amp_index) + '-' +
constants.AMPHORA_RELOAD_LISTENER),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(reload_listener_subflow)
# Remove any extraneous ports
# Note: Nova sometimes fails to delete ports attached to an instance.
# For example, if you create an LB with a listener, then
# 'openstack server delete' the amphora, you will see the vrrp
# port attached to that instance will remain after the instance
# is deleted.
# TODO(johnsom) Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
# Mark LB ACTIVE
failover_amp_flow.add(
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
requires=constants.LOADBALANCER))
return failover_amp_flow

View File

@ -1,105 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
class HealthMonitorFlows(object):
def get_create_health_monitor_flow(self):
"""Create a flow to create a health monitor
:returns: The flow for creating a health monitor
"""
create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW)
create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
requires=[constants.HEALTH_MON,
constants.LISTENERS,
constants.LOADBALANCER]))
create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB(
requires=constants.HEALTH_MON))
create_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB(
requires=constants.HEALTH_MON))
create_hm_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_hm_flow
def get_delete_health_monitor_flow(self):
"""Create a flow to delete a health monitor
:returns: The flow for deleting a health monitor
"""
delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW)
delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
requires=[constants.HEALTH_MON,
constants.LISTENERS,
constants.LOADBALANCER]))
delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB(
requires=constants.HEALTH_MON))
delete_hm_flow.add(model_tasks.
DeleteModelObject(rebind={constants.OBJECT:
constants.HEALTH_MON}))
delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB(
requires=constants.HEALTH_MON))
delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota(
requires=constants.HEALTH_MON))
delete_hm_flow.add(
database_tasks.UpdatePoolMembersOperatingStatusInDB(
requires=constants.POOL,
inject={constants.OPERATING_STATUS: constants.NO_MONITOR}))
delete_hm_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_hm_flow
def get_update_health_monitor_flow(self):
"""Create a flow to update a health monitor
:returns: The flow for updating a health monitor
"""
update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW)
update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
requires=[constants.HEALTH_MON,
constants.LISTENERS,
constants.LOADBALANCER]))
update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB(
requires=constants.HEALTH_MON))
update_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_hm_flow.add(database_tasks.UpdateHealthMonInDB(
requires=[constants.HEALTH_MON, constants.UPDATE_DICT]))
update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB(
requires=constants.HEALTH_MON))
update_hm_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_hm_flow

View File

@ -1,94 +0,0 @@
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
class L7PolicyFlows(object):
def get_create_l7policy_flow(self):
"""Create a flow to create an L7 policy
:returns: The flow for creating an L7 policy
"""
create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW)
create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
requires=[constants.L7POLICY,
constants.LISTENERS,
constants.LOADBALANCER]))
create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB(
requires=constants.L7POLICY))
create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB(
requires=constants.L7POLICY))
create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_l7policy_flow
def get_delete_l7policy_flow(self):
"""Create a flow to delete an L7 policy
:returns: The flow for deleting an L7 policy
"""
delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW)
delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
requires=[constants.L7POLICY,
constants.LISTENERS,
constants.LOADBALANCER]))
delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB(
requires=constants.L7POLICY))
delete_l7policy_flow.add(model_tasks.DeleteModelObject(
rebind={constants.OBJECT: constants.L7POLICY}))
delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB(
requires=constants.L7POLICY))
delete_l7policy_flow.add(database_tasks.DecrementL7policyQuota(
requires=constants.L7POLICY))
delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_l7policy_flow
def get_update_l7policy_flow(self):
"""Create a flow to update an L7 policy
:returns: The flow for updating an L7 policy
"""
update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW)
update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
requires=[constants.L7POLICY,
constants.LISTENERS,
constants.LOADBALANCER]))
update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB(
requires=constants.L7POLICY))
update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB(
requires=[constants.L7POLICY, constants.UPDATE_DICT]))
update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB(
requires=constants.L7POLICY))
update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_l7policy_flow

View File

@ -1,100 +0,0 @@
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
class L7RuleFlows(object):
def get_create_l7rule_flow(self):
"""Create a flow to create an L7 rule
:returns: The flow for creating an L7 rule
"""
create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW)
create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
requires=[constants.L7RULE,
constants.LISTENERS,
constants.LOADBALANCER]))
create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB(
requires=constants.L7RULE))
create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB(
requires=constants.L7RULE))
create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
requires=constants.L7POLICY))
create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_l7rule_flow
def get_delete_l7rule_flow(self):
"""Create a flow to delete an L7 rule
:returns: The flow for deleting an L7 rule
"""
delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW)
delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
requires=[constants.L7RULE,
constants.LISTENERS,
constants.LOADBALANCER]))
delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB(
requires=constants.L7RULE))
delete_l7rule_flow.add(model_tasks.DeleteModelObject(
rebind={constants.OBJECT: constants.L7RULE}))
delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB(
requires=constants.L7RULE))
delete_l7rule_flow.add(database_tasks.DecrementL7ruleQuota(
requires=constants.L7RULE))
delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
requires=constants.L7POLICY))
delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_l7rule_flow
def get_update_l7rule_flow(self):
"""Create a flow to update an L7 rule
:returns: The flow for updating an L7 rule
"""
update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW)
update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
requires=[constants.L7RULE,
constants.LISTENERS,
constants.LOADBALANCER]))
update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB(
requires=constants.L7RULE))
update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB(
requires=[constants.L7RULE, constants.UPDATE_DICT]))
update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB(
requires=constants.L7RULE))
update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
requires=constants.L7POLICY))
update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_l7rule_flow

View File

@ -1,128 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import network_tasks
class ListenerFlows(object):
def get_create_listener_flow(self):
"""Create a flow to create a listener
:returns: The flow for creating a listener
"""
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_listener_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
create_listener_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return create_listener_flow
def get_create_all_listeners_flow(self):
"""Create a flow to create all listeners
:returns: The flow for creating all listeners
"""
create_all_listeners_flow = linear_flow.Flow(
constants.CREATE_LISTENERS_FLOW)
create_all_listeners_flow.add(
database_tasks.GetListenersFromLoadbalancer(
requires=constants.LOADBALANCER,
provides=constants.LISTENERS))
create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_all_listeners_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
return create_all_listeners_flow
def get_delete_listener_flow(self):
"""Create a flow to delete a listener
:returns: The flow for deleting a listener
"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=constants.LISTENER))
delete_listener_flow.add(amphora_driver_tasks.ListenerDelete(
requires=constants.LISTENER))
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
return delete_listener_flow
def get_delete_listener_internal_flow(self, listener_name):
"""Create a flow to delete a listener and l7policies internally
(will skip deletion on the amp and marking LB active)
:returns: The flow for deleting a listener
"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
# Should cascade delete all L7 policies
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
name='delete_update_vip_' + listener_name,
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
name='delete_listener_in_db_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
name='decrement_listener_quota_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
return delete_listener_flow
def get_update_listener_flow(self):
"""Create a flow to update a listener
:returns: The flow for updating a listener
"""
update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW)
update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_listener_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
update_listener_flow.add(database_tasks.UpdateListenerInDB(
requires=[constants.LISTENER, constants.UPDATE_DICT]))
update_listener_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return update_listener_flow

View File

@ -1,686 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow
from octavia.common import constants
from octavia.common import exceptions
from octavia.common import utils
from octavia.controller.worker.v1.flows import amphora_flows
from octavia.controller.worker.v1.flows import listener_flows
from octavia.controller.worker.v1.flows import member_flows
from octavia.controller.worker.v1.flows import pool_flows
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import compute_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import network_tasks
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LoadBalancerFlows(object):
def __init__(self):
self.amp_flows = amphora_flows.AmphoraFlows()
self.listener_flows = listener_flows.ListenerFlows()
self.pool_flows = pool_flows.PoolFlows()
self.member_flows = member_flows.MemberFlows()
def get_create_load_balancer_flow(self, topology, listeners=None):
"""Creates a conditional graph flow that allocates a loadbalancer.
:raises InvalidTopology: Invalid topology specified
:return: The graph flow for creating a loadbalancer.
"""
f_name = constants.CREATE_LOADBALANCER_FLOW
lb_create_flow = linear_flow.Flow(f_name)
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
requires=constants.LOADBALANCER_ID))
# allocate VIP
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER
))
lb_create_flow.add(network_tasks.AllocateVIP(
requires=constants.LOADBALANCER,
provides=constants.VIP))
lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
requires=constants.LOADBALANCER_ID))
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
requires=constants.LOADBALANCER,
provides=constants.SUBNET))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
lb_create_flow.add(*self._create_active_standby_topology())
elif topology == constants.TOPOLOGY_SINGLE:
lb_create_flow.add(*self._create_single_topology())
else:
LOG.error("Unknown topology: %s. Unable to build load balancer.",
topology)
raise exceptions.InvalidTopology(topology=topology)
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add(
self.get_post_lb_amp_association_flow(post_amp_prefix, topology))
if listeners:
lb_create_flow.add(*self._create_listeners_flow())
lb_create_flow.add(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
return lb_create_flow
def _create_single_topology(self):
sf_name = (constants.ROLE_STANDALONE + '-' +
constants.AMP_PLUG_NET_SUBFLOW)
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_STANDALONE,
role=constants.ROLE_STANDALONE)
amp_for_lb_net_flow.add(amp_for_lb_flow)
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
return amp_for_lb_net_flow
def _create_active_standby_topology(
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
# When we boot up amphora for an active/standby topology,
# we should leverage the Nova anti-affinity capabilities
# to place the amphora on different hosts, also we need to check
# if anti-affinity-flag is enabled or not:
anti_affinity = CONF.nova.enable_anti_affinity
flows = []
if anti_affinity:
# we need to create a server group first
flows.append(
compute_tasks.NovaServerGroupCreate(
name=lf_name + '-' +
constants.CREATE_SERVER_GROUP_FLOW,
requires=(constants.LOADBALANCER_ID),
provides=constants.SERVER_GROUP_ID))
# update server group id in lb table
flows.append(
database_tasks.UpdateLBServerGroupInDB(
name=lf_name + '-' +
constants.UPDATE_LB_SERVERGROUPID_FLOW,
requires=(constants.LOADBALANCER_ID,
constants.SERVER_GROUP_ID)))
f_name = constants.CREATE_LOADBALANCER_FLOW
amps_flow = unordered_flow.Flow(f_name)
master_sf_name = (constants.ROLE_MASTER + '-' +
constants.AMP_PLUG_NET_SUBFLOW)
master_amp_sf = linear_flow.Flow(master_sf_name)
master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name))
backup_sf_name = (constants.ROLE_BACKUP + '-' +
constants.AMP_PLUG_NET_SUBFLOW)
backup_amp_sf = linear_flow.Flow(backup_sf_name)
backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name))
amps_flow.add(master_amp_sf, backup_amp_sf)
return flows + [amps_flow]
def _get_amp_net_subflow(self, sf_name):
flows = []
flows.append(network_tasks.PlugVIPAmpphora(
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.SUBNET),
provides=constants.AMP_DATA))
flows.append(network_tasks.ApplyQosAmphora(
name=sf_name + '-' + constants.APPLY_QOS_AMP,
requires=(constants.LOADBALANCER, constants.AMP_DATA,
constants.UPDATE_DICT)))
flows.append(database_tasks.UpdateAmphoraVIPData(
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
requires=constants.AMP_DATA))
flows.append(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
flows.append(database_tasks.ReloadLoadBalancer(
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
flows.append(network_tasks.GetAmphoraNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=(constants.LOADBALANCER, constants.AMPHORA),
provides=constants.AMPHORA_NETWORK_CONFIG))
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
rebind={constants.AMPHORAE_NETWORK_CONFIG:
constants.AMPHORA_NETWORK_CONFIG},
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
return flows
def _create_listeners_flow(self):
flows = []
flows.append(
database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER
)
)
flows.append(
network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE),
provides=constants.DELTAS
)
)
flows.append(
network_tasks.HandleNetworkDeltas(
requires=constants.DELTAS, provides=constants.UPDATED_PORTS
)
)
flows.append(
network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG
)
)
flows.append(
amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS,
constants.AMPHORAE_NETWORK_CONFIG)
)
)
flows.append(
self.listener_flows.get_create_all_listeners_flow()
)
return flows
def get_post_lb_amp_association_flow(self, prefix, topology):
"""Reload the loadbalancer and create networking subflows for
created/allocated amphorae.
:return: Post amphorae association subflow
"""
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
post_create_LB_flow = linear_flow.Flow(sf_name)
post_create_LB_flow.add(
database_tasks.ReloadLoadBalancer(
name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE))
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
post_create_LB_flow.add(vrrp_subflow)
post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
return post_create_LB_flow
def _get_delete_listeners_flow(self, lb):
"""Sets up an internal delete flow
Because task flow doesn't support loops we store each listener
we want to delete in the store part and then rebind
:param lb: load balancer
:return: (flow, store) -- flow for the deletion and store with all
the listeners stored properly
"""
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
store = {}
for listener in lb.listeners:
listener_name = 'listener_' + listener.id
store[listener_name] = listener
listeners_delete_flow.add(
self.listener_flows.get_delete_listener_internal_flow(
listener_name))
return (listeners_delete_flow, store)
def get_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.
:returns: The flow for deleting a load balancer
"""
return self._get_delete_load_balancer_flow(lb, False)
def _get_delete_pools_flow(self, lb):
"""Sets up an internal delete flow
Because task flow doesn't support loops we store each pool
we want to delete in the store part and then rebind
:param lb: load balancer
:return: (flow, store) -- flow for the deletion and store with all
the listeners stored properly
"""
pools_delete_flow = unordered_flow.Flow('pool_delete_flow')
store = {}
for pool in lb.pools:
pool_name = 'pool' + pool.id
store[pool_name] = pool
pools_delete_flow.add(
self.pool_flows.get_delete_pool_flow_internal(
pool_name))
return (pools_delete_flow, store)
def _get_delete_load_balancer_flow(self, lb, cascade):
store = {}
delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)
delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
delete_LB_flow.add(compute_tasks.NovaServerGroupDelete(
requires=constants.SERVER_GROUP_ID))
delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(
requires=constants.LOADBALANCER))
if cascade:
(listeners_delete, store) = self._get_delete_listeners_flow(lb)
(pools_delete, pool_store) = self._get_delete_pools_flow(lb)
store.update(pool_store)
delete_LB_flow.add(pools_delete)
delete_LB_flow.add(listeners_delete)
delete_LB_flow.add(network_tasks.UnplugVIP(
requires=constants.LOADBALANCER))
delete_LB_flow.add(network_tasks.DeallocateVIP(
requires=constants.LOADBALANCER))
delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
requires=constants.LOADBALANCER))
return (delete_LB_flow, store)
def get_cascade_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.
:returns: The flow for deleting a load balancer
"""
return self._get_delete_load_balancer_flow(lb, True)
def get_update_load_balancer_flow(self):
"""Creates a flow to update a load balancer.
:returns: The flow for update a load balancer
"""
update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW)
update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
update_LB_flow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.UPDATE_DICT)))
update_LB_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
update_LB_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
return update_LB_flow
def get_failover_LB_flow(self, amps, lb):
"""Failover a load balancer.
1. Validate the VIP port is correct and present.
2. Build a replacement amphora.
3. Delete the failed amphora.
4. Configure the replacement amphora listeners.
5. Configure VRRP for the listeners.
6. Build the second replacement amphora.
7. Delete the second failed amphora.
8. Delete any extraneous amphora.
9. Configure the listeners on the new amphorae.
10. Configure the VRRP on the new amphorae.
11. Reload the listener configurations to pick up VRRP changes.
12. Mark the load balancer back to ACTIVE.
:returns: The flow that will provide the failover.
"""
# Pick one amphora to be failed over if any exist.
failed_amp = None
if amps:
failed_amp = amps.pop()
failover_LB_flow = linear_flow.Flow(
constants.FAILOVER_LOADBALANCER_FLOW)
# Revert LB to provisioning_status ERROR if this flow goes wrong
failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
# Setup timeouts for our requests to the amphorae
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_retry_interval}
if failed_amp:
if failed_amp.role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amp.role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amp.id,
"load_balancer_id": lb.id,
"lb_network_ip": failed_amp.lb_network_ip,
"compute_id": failed_amp.compute_id,
"role": amp_role})
failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Check that the VIP port exists and is ok
failover_LB_flow.add(
network_tasks.AllocateVIPforFailover(
requires=constants.LOADBALANCER, provides=constants.VIP))
# Update the database with the VIP information
failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
# Make sure the SG has the correct rules and re-apply to the
# VIP port. It is not used on the VIP port, but will help lock
# the SG as in use.
failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup(
requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID))
new_amp_role = constants.ROLE_STANDALONE
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
new_amp_role = constants.ROLE_BACKUP
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the compute
# service failing to boot an instance for other reasons.
if failed_amp:
failed_vrrp_is_ipv6 = False
if failed_amp.vrrp_ip:
failed_vrrp_is_ipv6 = utils.is_ipv6(failed_amp.vrrp_ip)
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=new_amp_role,
failed_amp_vrrp_port_id=failed_amp.vrrp_port_id,
is_vrrp_ipv6=failed_vrrp_is_ipv6))
else:
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=new_amp_role))
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
name=constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
# Delete the failed amp
if failed_amp:
failover_LB_flow.add(
self.amp_flows.get_delete_amphora_flow(failed_amp))
# Update the data stored in the flow from the database
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
# Configure the listener(s)
# We will run update on this amphora again later if this is
# an active/standby load balancer because we want this amp
# functional as soon as possible. It must run again to update
# the configurations for the new peers.
failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate(
name=constants.AMP_LISTENER_UPDATE,
requires=(constants.LOADBALANCER, constants.AMPHORA),
inject={constants.TIMEOUT_DICT: timeout_dict}))
# Bring up the new "backup" amphora VIP now to reduce the outage
# on the final failover. This dropped the outage from 8-9 seconds
# to less than one in my lab.
# This does mean some steps have to be repeated later to reconfigure
# for the second amphora as a peer.
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB(
name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
requires=constants.LOADBALANCER_ID))
failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
name=(new_amp_role + '-' +
constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID),
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
provides=constants.FIRST_AMP_NETWORK_CONFIGS))
failover_LB_flow.add(
amphora_driver_tasks.AmphoraUpdateVRRPInterface(
name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORA,
inject={constants.TIMEOUT_DICT: timeout_dict},
provides=constants.FIRST_AMP_VRRP_INTERFACE))
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID, constants.AMPHORA),
rebind={constants.AMPHORAE_NETWORK_CONFIG:
constants.FIRST_AMP_NETWORK_CONFIGS,
constants.AMP_VRRP_INT:
constants.FIRST_AMP_VRRP_INTERFACE},
inject={constants.TIMEOUT_DICT: timeout_dict}))
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart(
name=new_amp_role + '-' + constants.AMP_VRRP_START,
requires=constants.AMPHORA,
inject={constants.TIMEOUT_DICT: timeout_dict}))
# Start the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "V" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "X" cycle
failover_LB_flow.add(amphora_driver_tasks.ListenersStart(
name=new_amp_role + '-' + constants.AMP_LISTENER_START,
requires=(constants.LOADBALANCER, constants.AMPHORA)))
# #### Work on standby amphora if needed #####
new_amp_role = constants.ROLE_MASTER
failed_amp = None
if amps:
failed_amp = amps.pop()
if failed_amp:
if failed_amp.role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amp.role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amp.id,
"load_balancer_id": lb.id,
"lb_network_ip": failed_amp.lb_network_ip,
"compute_id": failed_amp.compute_id,
"role": amp_role})
failover_LB_flow.add(
database_tasks.MarkAmphoraPendingDeleteInDB(
name=(new_amp_role + '-' +
constants.MARK_AMPHORA_PENDING_DELETE),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
name=(new_amp_role + '-' +
constants.MARK_AMPHORA_HEALTH_BUSY),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the
# compute service failing to boot an instance for other reasons.
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=(new_amp_role + '-' +
constants.FAILOVER_LOADBALANCER_FLOW),
role=new_amp_role))
failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
name=constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
# Delete the failed amp
if failed_amp:
failover_LB_flow.add(
self.amp_flows.get_delete_amphora_flow(
failed_amp))
failover_LB_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
name=(new_amp_role + '-' +
constants.DISABLE_AMP_HEALTH_MONITORING),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Remove any extraneous amphora
# Note: This runs in all topology situations.
# It should run before the act/stdby final listener update so
# that we don't bother attempting to update dead amphorae.
delete_extra_amps_flow = unordered_flow.Flow(
constants.DELETE_EXTRA_AMPHORAE_FLOW)
for amp in amps:
LOG.debug('Found extraneous amphora %s on load balancer %s. '
'Deleting.', amp.id, lb.id)
delete_extra_amps_flow.add(
self.amp_flows.get_delete_amphora_flow(amp))
failover_LB_flow.add(delete_extra_amps_flow)
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
# Update the data stored in the flow from the database
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE))
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
# Setup parallel flows for each amp. We don't know the new amp
# details at flow creation time, so setup a subflow for each
# amp on the LB, they let the task index into a list of amps
# to find the amphora it should work on.
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=(constants.AMPHORA + '-0-' +
constants.AMP_LISTENER_UPDATE),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=(constants.AMPHORA + '-1-' +
constants.AMP_LISTENER_UPDATE),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
failover_LB_flow.add(update_amps_subflow)
# Configure and enable keepalived in the amphora
failover_LB_flow.add(self.amp_flows.get_vrrp_subflow(
new_amp_role + '-' + constants.GET_VRRP_SUBFLOW,
timeout_dict, create_vrrp_group=False))
# #### End of standby ####
# Reload the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "V" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "X" cycle
failover_LB_flow.add(
amphora_driver_tasks.AmphoraIndexListenersReload(
name=(new_amp_role + '-' +
constants.AMPHORA_RELOAD_LISTENER),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
# Remove any extraneous ports
# Note: Nova sometimes fails to delete ports attached to an instance.
# For example, if you create an LB with a listener, then
# 'openstack server delete' the amphora, you will see the vrrp
# port attached to that instance will remain after the instance
# is deleted.
# TODO(johnsom) Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
# Mark LB ACTIVE
failover_LB_flow.add(
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
requires=constants.LOADBALANCER))
return failover_LB_flow

View File

@ -1,230 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
from octavia.controller.worker.v1.tasks import network_tasks
class MemberFlows(object):
def get_create_member_flow(self):
"""Create a flow to create a member
:returns: The flow for creating a member
"""
create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW)
create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB(
requires=constants.MEMBER))
create_member_flow.add(network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE),
provides=constants.DELTAS))
create_member_flow.add(network_tasks.HandleNetworkDeltas(
requires=(constants.DELTAS, constants.LOADBALANCER),
provides=constants.UPDATED_PORTS))
create_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG))
create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS,
constants.AMPHORAE_NETWORK_CONFIG)))
create_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
create_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
create_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER,
constants.LISTENERS)))
return create_member_flow
def get_delete_member_flow(self):
"""Create a flow to delete a member
:returns: The flow for deleting a member
"""
delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW)
delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB(
requires=constants.MEMBER))
delete_member_flow.add(network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE),
provides=constants.DELTAS))
delete_member_flow.add(network_tasks.HandleNetworkDeltas(
requires=(constants.DELTAS, constants.LOADBALANCER),
provides=constants.UPDATED_PORTS))
delete_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG))
delete_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS,
constants.AMPHORAE_NETWORK_CONFIG)))
delete_member_flow.add(model_tasks.
DeleteModelObject(rebind={constants.OBJECT:
constants.MEMBER}))
delete_member_flow.add(database_tasks.DeleteMemberInDB(
requires=constants.MEMBER))
delete_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
delete_member_flow.add(database_tasks.DecrementMemberQuota(
requires=constants.MEMBER))
delete_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
delete_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return delete_member_flow
def get_update_member_flow(self):
"""Create a flow to update a member
:returns: The flow for updating a member
"""
update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW)
update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB(
requires=constants.MEMBER))
update_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_member_flow.add(database_tasks.UpdateMemberInDB(
requires=[constants.MEMBER, constants.UPDATE_DICT]))
update_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
update_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
update_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return update_member_flow
def get_batch_update_members_flow(self, old_members, new_members,
updated_members):
"""Create a flow to batch update members
:returns: The flow for batch updating members
"""
batch_update_members_flow = linear_flow.Flow(
constants.BATCH_UPDATE_MEMBERS_FLOW)
unordered_members_flow = unordered_flow.Flow(
constants.UNORDERED_MEMBER_UPDATES_FLOW)
unordered_members_active_flow = unordered_flow.Flow(
constants.UNORDERED_MEMBER_ACTIVE_FLOW)
# Delete old members
unordered_members_flow.add(
lifecycle_tasks.MembersToErrorOnRevertTask(
inject={constants.MEMBERS: old_members},
name='{flow}-deleted'.format(
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
for m in old_members:
unordered_members_flow.add(
model_tasks.DeleteModelObject(
inject={constants.OBJECT: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW)))
unordered_members_flow.add(database_tasks.DeleteMemberInDB(
inject={constants.MEMBER: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.DELETE_MEMBER_INDB)))
unordered_members_flow.add(database_tasks.DecrementMemberQuota(
inject={constants.MEMBER: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW)))
# Create new members
unordered_members_flow.add(
lifecycle_tasks.MembersToErrorOnRevertTask(
inject={constants.MEMBERS: new_members},
name='{flow}-created'.format(
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
for m in new_members:
unordered_members_active_flow.add(
database_tasks.MarkMemberActiveInDB(
inject={constants.MEMBER: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))
# Update existing members
unordered_members_flow.add(
lifecycle_tasks.MembersToErrorOnRevertTask(
# updated_members is a list of (obj, dict), only pass `obj`
inject={constants.MEMBERS: [m[0] for m in updated_members]},
name='{flow}-updated'.format(
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
for m, um in updated_members:
um.pop('id', None)
unordered_members_active_flow.add(
database_tasks.MarkMemberActiveInDB(
inject={constants.MEMBER: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))
batch_update_members_flow.add(unordered_members_flow)
# Done, do real updates
batch_update_members_flow.add(network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE),
provides=constants.DELTAS))
batch_update_members_flow.add(network_tasks.HandleNetworkDeltas(
requires=(constants.DELTAS, constants.LOADBALANCER),
provides=constants.UPDATED_PORTS))
batch_update_members_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG))
batch_update_members_flow.add(
amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS,
constants.AMPHORAE_NETWORK_CONFIG)))
# Update the Listener (this makes the changes active on the Amp)
batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
# Mark all the members ACTIVE here, then pool then LB/Listeners
batch_update_members_flow.add(unordered_members_active_flow)
batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
batch_update_members_flow.add(
database_tasks.MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER,
constants.LISTENERS)))
return batch_update_members_flow

View File

@ -1,127 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
class PoolFlows(object):
def get_create_pool_flow(self):
"""Create a flow to create a pool
:returns: The flow for creating a pool
"""
create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW)
create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL,
constants.LISTENERS,
constants.LOADBALANCER]))
create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB(
requires=constants.POOL))
create_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
create_pool_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_pool_flow
def get_delete_pool_flow(self):
"""Create a flow to delete a pool
:returns: The flow for deleting a pool
"""
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL,
constants.LISTENERS,
constants.LOADBALANCER]))
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
requires=constants.POOL))
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
requires=constants.POOL, provides=constants.POOL_CHILD_COUNT))
delete_pool_flow.add(model_tasks.DeleteModelObject(
rebind={constants.OBJECT: constants.POOL}))
delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
delete_pool_flow.add(database_tasks.DeletePoolInDB(
requires=constants.POOL))
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
requires=[constants.POOL, constants.POOL_CHILD_COUNT]))
delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_pool_flow
def get_delete_pool_flow_internal(self, name):
"""Create a flow to delete a pool, etc.
:returns: The flow for deleting a pool
"""
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
# health monitor should cascade
# members should cascade
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
name='mark_pool_pending_delete_in_db_' + name,
requires=constants.POOL,
rebind={constants.POOL: name}))
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
name='count_pool_children_for_quota_' + name,
requires=constants.POOL,
provides=constants.POOL_CHILD_COUNT,
rebind={constants.POOL: name}))
delete_pool_flow.add(model_tasks.DeleteModelObject(
name='delete_model_object_' + name,
rebind={constants.OBJECT: name}))
delete_pool_flow.add(database_tasks.DeletePoolInDB(
name='delete_pool_in_db_' + name,
requires=constants.POOL,
rebind={constants.POOL: name}))
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
name='decrement_pool_quota_' + name,
requires=[constants.POOL, constants.POOL_CHILD_COUNT],
rebind={constants.POOL: name}))
return delete_pool_flow
def get_update_pool_flow(self):
"""Create a flow to update a pool
:returns: The flow for updating a pool
"""
update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW)
update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL,
constants.LISTENERS,
constants.LOADBALANCER]))
update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB(
requires=constants.POOL))
update_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER))
update_pool_flow.add(database_tasks.UpdatePoolInDB(
requires=[constants.POOL, constants.UPDATE_DICT]))
update_pool_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_pool_flow

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,453 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cryptography import fernet
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import driver as stevedore_driver
from taskflow import task
from taskflow.types import failure
from octavia.amphorae.backends.agent import agent_jinja_cfg
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker import task_utils as task_utilities
from octavia.db import api as db_apis
from octavia.db import repositories as repo
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BaseAmphoraTask(task.Task):
"""Base task to load drivers common to the tasks."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.amphora_driver = stevedore_driver.DriverManager(
namespace='octavia.amphora.drivers',
name=CONF.controller_worker.amphora_driver,
invoke_on_load=True
).driver
self.amphora_repo = repo.AmphoraRepository()
self.listener_repo = repo.ListenerRepository()
self.loadbalancer_repo = repo.LoadBalancerRepository()
self.task_utils = task_utilities.TaskUtils()
class AmpListenersUpdate(BaseAmphoraTask):
"""Task to update the listeners on one amphora."""
def execute(self, loadbalancer, amphora, timeout_dict=None):
# Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let
# health manager fix it.
try:
# Make sure we have a fresh load balancer object
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer.id)
self.amphora_driver.update_amphora_listeners(
loadbalancer, amphora, timeout_dict)
except Exception as e:
LOG.error('Failed to update listeners on amphora %s. Skipping '
'this amphora as it is failing to update due to: %s',
amphora.id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
class AmphoraIndexListenerUpdate(BaseAmphoraTask):
"""Task to update the listeners on one amphora."""
def execute(self, loadbalancer, amphora_index, amphorae,
timeout_dict=None):
# Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let
# health manager fix it.
try:
# Make sure we have a fresh load balancer object
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer.id)
self.amphora_driver.update_amphora_listeners(
loadbalancer, amphorae[amphora_index], timeout_dict)
except Exception as e:
amphora_id = amphorae[amphora_index].id
LOG.error('Failed to update listeners on amphora %s. Skipping '
'this amphora as it is failing to update due to: %s',
amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
class ListenersUpdate(BaseAmphoraTask):
"""Task to update amphora with all specified listeners' configurations."""
def execute(self, loadbalancer):
"""Execute updates per listener for an amphora."""
self.amphora_driver.update(loadbalancer)
def revert(self, loadbalancer, *args, **kwargs):
"""Handle failed listeners updates."""
LOG.warning("Reverting listeners updates.")
for listener in loadbalancer.listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
class ListenersStart(BaseAmphoraTask):
"""Task to start all listeners on the vip."""
def execute(self, loadbalancer, amphora=None):
"""Execute listener start routines for listeners on an amphora."""
if loadbalancer.listeners:
self.amphora_driver.start(loadbalancer, amphora)
LOG.debug("Started the listeners on the vip")
def revert(self, loadbalancer, *args, **kwargs):
"""Handle failed listeners starts."""
LOG.warning("Reverting listeners starts.")
for listener in loadbalancer.listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
class AmphoraIndexListenersReload(BaseAmphoraTask):
"""Task to reload all listeners on an amphora."""
def execute(self, loadbalancer, amphora_index, amphorae,
timeout_dict=None):
"""Execute listener reload routines for listeners on an amphora."""
if loadbalancer.listeners:
try:
self.amphora_driver.reload(
loadbalancer, amphorae[amphora_index], timeout_dict)
except Exception as e:
amphora_id = amphorae[amphora_index].id
LOG.warning('Failed to reload listeners on amphora %s. '
'Skipping this amphora as it is failing to '
'reload due to: %s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
class ListenerDelete(BaseAmphoraTask):
"""Task to delete the listener on the vip."""
def execute(self, listener):
"""Execute listener delete routines for an amphora."""
# TODO(rm_work): This is only relevant because of UDP listeners now.
self.amphora_driver.delete(listener)
LOG.debug("Deleted the listener on the vip")
def revert(self, listener, *args, **kwargs):
"""Handle a failed listener delete."""
LOG.warning("Reverting listener delete.")
self.task_utils.mark_listener_prov_status_error(listener.id)
class AmphoraGetInfo(BaseAmphoraTask):
"""Task to get information on an amphora."""
def execute(self, amphora):
"""Execute get_info routine for an amphora."""
self.amphora_driver.get_info(amphora)
class AmphoraGetDiagnostics(BaseAmphoraTask):
"""Task to get diagnostics on the amphora and the loadbalancers."""
def execute(self, amphora):
"""Execute get_diagnostic routine for an amphora."""
self.amphora_driver.get_diagnostics(amphora)
class AmphoraFinalize(BaseAmphoraTask):
"""Task to finalize the amphora before any listeners are configured."""
def execute(self, amphora):
"""Execute finalize_amphora routine."""
self.amphora_driver.finalize_amphora(amphora)
LOG.debug("Finalized the amphora.")
def revert(self, result, amphora, *args, **kwargs):
"""Handle a failed amphora finalize."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting amphora finalize.")
self.task_utils.mark_amphora_status_error(amphora.id)
class AmphoraPostNetworkPlug(BaseAmphoraTask):
"""Task to notify the amphora post network plug."""
def execute(self, amphora, ports, amphora_network_config):
"""Execute post_network_plug routine."""
for port in ports:
self.amphora_driver.post_network_plug(
amphora, port, amphora_network_config)
LOG.debug("post_network_plug called on compute instance "
"%(compute_id)s for port %(port_id)s",
{"compute_id": amphora.compute_id, "port_id": port.id})
def revert(self, result, amphora, *args, **kwargs):
"""Handle a failed post network plug."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting post network plug.")
self.task_utils.mark_amphora_status_error(amphora.id)
class AmphoraePostNetworkPlug(BaseAmphoraTask):
"""Task to notify the amphorae post network plug."""
def execute(self, loadbalancer, updated_ports, amphorae_network_config):
"""Execute post_network_plug routine."""
amp_post_plug = AmphoraPostNetworkPlug()
# We need to make sure we have the fresh list of amphora
amphorae = self.amphora_repo.get_all(
db_apis.get_session(), load_balancer_id=loadbalancer.id,
status=constants.AMPHORA_ALLOCATED)[0]
for amphora in amphorae:
if amphora.id in updated_ports:
amp_post_plug.execute(amphora, updated_ports[amphora.id],
amphorae_network_config[amphora.id])
def revert(self, result, loadbalancer, updated_ports, *args, **kwargs):
"""Handle a failed post network plug."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting post network plug.")
amphorae = self.amphora_repo.get_all(
db_apis.get_session(), load_balancer_id=loadbalancer.id,
status=constants.AMPHORA_ALLOCATED)[0]
for amphora in amphorae:
self.task_utils.mark_amphora_status_error(amphora.id)
class AmphoraPostVIPPlug(BaseAmphoraTask):
"""Task to notify the amphora post VIP plug."""
def execute(self, amphora, loadbalancer, amphorae_network_config):
"""Execute post_vip_routine."""
self.amphora_driver.post_vip_plug(
amphora, loadbalancer, amphorae_network_config)
LOG.debug("Notified amphora of vip plug")
def revert(self, result, amphora, loadbalancer, *args, **kwargs):
"""Handle a failed amphora vip plug notification."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting post vip plug.")
self.task_utils.mark_amphora_status_error(amphora.id)
class AmphoraePostVIPPlug(BaseAmphoraTask):
"""Task to notify the amphorae post VIP plug."""
def execute(self, loadbalancer, amphorae_network_config):
"""Execute post_vip_plug across the amphorae."""
amp_post_vip_plug = AmphoraPostVIPPlug()
for amphora in loadbalancer.amphorae:
amp_post_vip_plug.execute(amphora,
loadbalancer,
amphorae_network_config)
class AmphoraCertUpload(BaseAmphoraTask):
"""Upload a certificate to the amphora."""
def execute(self, amphora, server_pem):
"""Execute cert_update_amphora routine."""
LOG.debug("Upload cert in amphora REST driver")
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
self.amphora_driver.upload_cert_amp(amphora, fer.decrypt(server_pem))
class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
"""Task to get and update the VRRP interface device name from amphora."""
def execute(self, amphora, timeout_dict=None):
try:
interface = self.amphora_driver.get_interface_from_ip(
amphora, amphora.vrrp_ip, timeout_dict=timeout_dict)
except Exception as e:
# This can occur when an active/standby LB has no listener
LOG.error('Failed to get amphora VRRP interface on amphora '
'%s. Skipping this amphora as it is failing due to: '
'%s', amphora.id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
return None
self.amphora_repo.update(db_apis.get_session(), amphora.id,
vrrp_interface=interface)
return interface
class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
"""Task to get and update the VRRP interface device name from amphora."""
def execute(self, amphora_index, amphorae, timeout_dict=None):
amphora_id = amphorae[amphora_index].id
try:
interface = self.amphora_driver.get_interface_from_ip(
amphorae[amphora_index], amphorae[amphora_index].vrrp_ip,
timeout_dict=timeout_dict)
except Exception as e:
# This can occur when an active/standby LB has no listener
LOG.error('Failed to get amphora VRRP interface on amphora '
'%s. Skipping this amphora as it is failing due to: '
'%s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
return None
self.amphora_repo.update(db_apis.get_session(), amphora_id,
vrrp_interface=interface)
return interface
class AmphoraVRRPUpdate(BaseAmphoraTask):
"""Task to update the VRRP configuration of an amphora."""
def execute(self, loadbalancer_id, amphorae_network_config, amphora,
amp_vrrp_int, timeout_dict=None):
"""Execute update_vrrp_conf."""
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
# Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let
# health manager fix it.
amphora.vrrp_interface = amp_vrrp_int
try:
self.amphora_driver.update_vrrp_conf(
loadbalancer, amphorae_network_config, amphora, timeout_dict)
except Exception as e:
LOG.error('Failed to update VRRP configuration amphora %s. '
'Skipping this amphora as it is failing to update due '
'to: %s', amphora.id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora.id)
class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
"""Task to update the VRRP configuration of an amphora."""
def execute(self, loadbalancer_id, amphorae_network_config, amphora_index,
amphorae, amp_vrrp_int, timeout_dict=None):
"""Execute update_vrrp_conf."""
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
# Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let
# health manager fix it.
amphora_id = amphorae[amphora_index].id
amphorae[amphora_index].vrrp_interface = amp_vrrp_int
try:
self.amphora_driver.update_vrrp_conf(
loadbalancer, amphorae_network_config, amphorae[amphora_index],
timeout_dict)
except Exception as e:
LOG.error('Failed to update VRRP configuration amphora %s. '
'Skipping this amphora as it is failing to update due '
'to: %s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
return
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
class AmphoraVRRPStart(BaseAmphoraTask):
"""Task to start keepalived on an amphora.
This will reload keepalived if it is already running.
"""
def execute(self, amphora, timeout_dict=None):
self.amphora_driver.start_vrrp_service(amphora, timeout_dict)
LOG.debug("Started VRRP on amphora %s.", amphora.id)
class AmphoraIndexVRRPStart(BaseAmphoraTask):
"""Task to start keepalived on an amphora.
This will reload keepalived if it is already running.
"""
def execute(self, amphora_index, amphorae, timeout_dict=None):
amphora_id = amphorae[amphora_index].id
try:
self.amphora_driver.start_vrrp_service(amphorae[amphora_index],
timeout_dict)
except Exception as e:
LOG.error('Failed to start VRRP on amphora %s. '
'Skipping this amphora as it is failing to start due '
'to: %s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
return
LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id)
class AmphoraComputeConnectivityWait(BaseAmphoraTask):
"""Task to wait for the compute instance to be up."""
def execute(self, amphora):
"""Execute get_info routine for an amphora until it responds."""
try:
amp_info = self.amphora_driver.get_info(amphora)
LOG.debug('Successfuly connected to amphora %s: %s',
amphora.id, amp_info)
except driver_except.TimeOutException:
LOG.error("Amphora compute instance failed to become reachable. "
"This either means the compute driver failed to fully "
"boot the instance inside the timeout interval or the "
"instance is not reachable via the lb-mgmt-net.")
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
raise
class AmphoraConfigUpdate(BaseAmphoraTask):
"""Task to push a new amphora agent configuration to the amphora."""
def execute(self, amphora, flavor):
# Extract any flavor based settings
if flavor:
topology = flavor.get(constants.LOADBALANCER_TOPOLOGY,
CONF.controller_worker.loadbalancer_topology)
else:
topology = CONF.controller_worker.loadbalancer_topology
# Build the amphora agent config
agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater()
agent_config = agent_cfg_tmpl.build_agent_config(amphora.id, topology)
# Push the new configuration to the amphora
try:
self.amphora_driver.update_amphora_agent_config(amphora,
agent_config)
except driver_except.AmpDriverNotImplementedError:
LOG.error('Amphora %s does not support agent configuration '
'update. Please update the amphora image for this '
'amphora. Skipping.', amphora.id)

View File

@ -1,51 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cryptography import fernet
from oslo_config import cfg
from stevedore import driver as stevedore_driver
from taskflow import task
from octavia.common import utils
CONF = cfg.CONF
class BaseCertTask(task.Task):
"""Base task to load drivers common to the tasks."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cert_generator = stevedore_driver.DriverManager(
namespace='octavia.cert_generator',
name=CONF.certificates.cert_generator,
invoke_on_load=True,
).driver
class GenerateServerPEMTask(BaseCertTask):
"""Create the server certs for the agent comm
Use the amphora_id for the CN
"""
def execute(self, amphora_id):
cert = self.cert_generator.generate_cert_key_pair(
cn=amphora_id,
validity=CONF.certificates.cert_validity_time)
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
return fer.encrypt(cert.certificate + cert.private_key)

View File

@ -1,335 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from cryptography import fernet
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import driver as stevedore_driver
from taskflow import task
from taskflow.types import failure
import tenacity
from octavia.amphorae.backends.agent import agent_jinja_cfg
from octavia.common import constants
from octavia.common import exceptions
from octavia.common.jinja.logging import logging_jinja_cfg
from octavia.common.jinja import user_data_jinja_cfg
from octavia.common import utils
from octavia.controller.worker import amphora_rate_limit
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BaseComputeTask(task.Task):
"""Base task to load drivers common to the tasks."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.compute = stevedore_driver.DriverManager(
namespace='octavia.compute.drivers',
name=CONF.controller_worker.compute_driver,
invoke_on_load=True
).driver
self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit()
class ComputeCreate(BaseComputeTask):
"""Create the compute instance for a new amphora."""
def execute(self, amphora_id, server_group_id, config_drive_files=None,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
ports=None, flavor=None, availability_zone=None):
"""Create an amphora
:returns: an amphora
"""
ports = ports or []
network_ids = CONF.controller_worker.amp_boot_network_list[:]
config_drive_files = config_drive_files or {}
user_data = None
LOG.debug("Compute create execute for amphora with id %s", amphora_id)
user_data_config_drive = CONF.controller_worker.user_data_config_drive
key_name = CONF.controller_worker.amp_ssh_key_name
# Apply an Octavia flavor customizations
if flavor:
topology = flavor.get(constants.LOADBALANCER_TOPOLOGY,
CONF.controller_worker.loadbalancer_topology)
amp_compute_flavor = flavor.get(
constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id)
amp_image_tag = flavor.get(
constants.AMP_IMAGE_TAG, CONF.controller_worker.amp_image_tag)
else:
topology = CONF.controller_worker.loadbalancer_topology
amp_compute_flavor = CONF.controller_worker.amp_flavor_id
amp_image_tag = CONF.controller_worker.amp_image_tag
if availability_zone:
amp_availability_zone = availability_zone.get(
constants.COMPUTE_ZONE)
amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK)
if amp_network:
network_ids = [amp_network]
else:
amp_availability_zone = None
try:
if CONF.haproxy_amphora.build_rate_limit != -1:
self.rate_limit.add_to_build_request_queue(
amphora_id, build_type_priority)
agent_cfg = agent_jinja_cfg.AgentJinjaTemplater()
config_drive_files['/etc/octavia/amphora-agent.conf'] = (
agent_cfg.build_agent_config(amphora_id, topology))
logging_cfg = logging_jinja_cfg.LoggingJinjaTemplater(
CONF.amphora_agent.logging_template_override)
config_drive_files['/etc/rsyslog.d/10-rsyslog.conf'] = (
logging_cfg.build_logging_config())
udtemplater = user_data_jinja_cfg.UserDataJinjaCfg()
user_data = udtemplater.build_user_data_config(
config_drive_files if user_data_config_drive else {})
if user_data_config_drive:
config_drive_files = None
compute_id = self.compute.build(
name="amphora-" + amphora_id,
amphora_flavor=amp_compute_flavor,
image_tag=amp_image_tag,
image_owner=CONF.controller_worker.amp_image_owner_id,
key_name=key_name,
sec_groups=CONF.controller_worker.amp_secgroup_list,
network_ids=network_ids,
port_ids=[port.id for port in ports],
config_drive_files=config_drive_files,
user_data=user_data,
server_group_id=server_group_id,
availability_zone=amp_availability_zone)
LOG.info("Server created with id: %s for amphora id: %s",
compute_id, amphora_id)
return compute_id
except Exception:
LOG.exception("Compute create for amphora id: %s failed",
amphora_id)
raise
def revert(self, result, amphora_id, *args, **kwargs):
"""This method will revert the creation of the
amphora. So it will just delete it in this flow
"""
if isinstance(result, failure.Failure):
return
compute_id = result
LOG.warning("Reverting compute create for amphora with id "
"%(amp)s and compute id: %(comp)s",
{'amp': amphora_id, 'comp': compute_id})
try:
self.compute.delete(compute_id)
except Exception:
LOG.exception("Reverting compute create failed")
class CertComputeCreate(ComputeCreate):
def execute(self, amphora_id, server_pem, server_group_id,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
ports=None, flavor=None, availability_zone=None):
"""Create an amphora
:returns: an amphora
"""
# load client certificate
with open(CONF.controller_worker.client_ca,
'r', encoding='utf-8') as client_ca:
ca = client_ca.read()
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
config_drive_files = {
'/etc/octavia/certs/server.pem': fer.decrypt(
server_pem).decode('utf-8'),
'/etc/octavia/certs/client_ca.pem': ca}
return super().execute(
amphora_id, config_drive_files=config_drive_files,
build_type_priority=build_type_priority,
server_group_id=server_group_id, ports=ports, flavor=flavor,
availability_zone=availability_zone)
class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
"""Delete the amphorae on a load balancer.
Iterate through amphorae, deleting them
"""
def execute(self, loadbalancer):
for amp in loadbalancer.amphorae:
# The compute driver will already handle NotFound
try:
self.compute.delete(amp.compute_id)
except Exception:
LOG.exception("Compute delete for amphora id: %s failed",
amp.id)
raise
class ComputeDelete(BaseComputeTask):
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(CONF.compute.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.compute.retry_backoff,
min=CONF.compute.retry_interval,
max=CONF.compute.retry_max), reraise=True)
def execute(self, amphora, passive_failure=False):
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
LOG.debug('Compute delete execute for amphora with ID %s and '
'compute ID: %s', amphora.id, amphora.compute_id)
else:
LOG.warning('Retrying compute delete of %s attempt %s of %s.',
amphora.compute_id,
self.execute.retry.statistics[
constants.ATTEMPT_NUMBER],
self.execute.retry.stop.max_attempt_number)
# Let the Taskflow engine know we are working and alive
# Don't use get with a default for 'attempt_number', we need to fail
# if that number is missing.
self.update_progress(
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
self.execute.retry.stop.max_attempt_number)
try:
self.compute.delete(amphora.compute_id)
except Exception:
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
self.execute.retry.stop.max_attempt_number):
LOG.warning('Compute delete for amphora id: %s failed. '
'Retrying.', amphora.id)
raise
if passive_failure:
LOG.exception('Compute delete for compute ID: %s on amphora '
'ID: %s failed. This resource will be abandoned '
'and should manually be cleaned up once the '
'compute service is functional.',
amphora.compute_id, amphora.id)
else:
LOG.exception('Compute delete for compute ID: %s on amphora '
'ID: %s failed. The compute service has failed. '
'Aborting and reverting.', amphora.compute_id,
amphora.id)
raise
class ComputeActiveWait(BaseComputeTask):
"""Wait for the compute driver to mark the amphora active."""
def execute(self, compute_id, amphora_id, availability_zone):
"""Wait for the compute driver to mark the amphora active
:raises: Generic exception if the amphora is not active
:returns: An amphora object
"""
if availability_zone:
amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK)
else:
amp_network = None
for i in range(CONF.controller_worker.amp_active_retries):
amp, fault = self.compute.get_amphora(compute_id, amp_network)
if amp.status == constants.ACTIVE:
if CONF.haproxy_amphora.build_rate_limit != -1:
self.rate_limit.remove_from_build_req_queue(amphora_id)
return amp
if amp.status == constants.ERROR:
raise exceptions.ComputeBuildException(fault=fault)
time.sleep(CONF.controller_worker.amp_active_wait_sec)
raise exceptions.ComputeWaitTimeoutException(id=compute_id)
class NovaServerGroupCreate(BaseComputeTask):
def execute(self, loadbalancer_id):
"""Create a server group by nova client api
:param loadbalancer_id: will be used for server group's name
:param policy: will used for server group's policy
:raises: Generic exception if the server group is not created
:returns: server group's id
"""
name = 'octavia-lb-' + loadbalancer_id
server_group = self.compute.create_server_group(
name, CONF.nova.anti_affinity_policy)
LOG.debug("Server Group created with id: %s for load balancer id: "
"%s", server_group.id, loadbalancer_id)
return server_group.id
def revert(self, result, *args, **kwargs):
"""This method will revert the creation of the
:param result: here it refers to server group id
"""
server_group_id = result
LOG.warning("Reverting server group create with id:%s",
server_group_id)
try:
self.compute.delete_server_group(server_group_id)
except Exception as e:
LOG.error("Failed to delete server group. Resources may "
"still be in use for server group: %(sg)s due to "
"error: %(except)s",
{'sg': server_group_id, 'except': str(e)})
class NovaServerGroupDelete(BaseComputeTask):
def execute(self, server_group_id):
if server_group_id is not None:
self.compute.delete_server_group(server_group_id)
else:
return
class AttachPort(BaseComputeTask):
def execute(self, amphora, port):
"""Attach a port to an amphora instance.
:param amphora: The amphora to attach the port to.
:param port: The port to attach to the amphora.
:returns: None
"""
LOG.debug('Attaching port: %s to compute: %s',
port.id, amphora.compute_id)
self.compute.attach_network_or_port(amphora.compute_id,
port_id=port.id)
def revert(self, amphora, port, *args, **kwargs):
"""Revert our port attach.
:param amphora: The amphora to detach the port from.
:param port: The port to attach to the amphora.
"""
LOG.warning('Reverting port: %s attach to compute: %s',
port.id, amphora.compute_id)
try:
self.compute.detach_port(amphora.compute_id, port.id)
except Exception as e:
LOG.error('Failed to detach port %s from compute %s for revert '
'due to %s.', port.id, amphora.compute_id, str(e))

File diff suppressed because it is too large Load Diff

View File

@ -1,173 +0,0 @@
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from taskflow import task
from octavia.controller.worker import task_utils as task_utilities
class BaseLifecycleTask(task.Task):
"""Base task to instansiate common classes."""
def __init__(self, **kwargs):
self.task_utils = task_utilities.TaskUtils()
super().__init__(**kwargs)
class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask):
"""Task to checkpoint Amphora lifecycle milestones."""
def execute(self, amphora_id):
pass
def revert(self, amphora_id, *args, **kwargs):
self.task_utils.mark_amphora_status_error(amphora_id)
class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask):
"""Task to checkpoint Amphora lifecycle milestones."""
def execute(self, amphora):
pass
def revert(self, amphora, *args, **kwargs):
super().revert(amphora.id)
class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a member to ERROR on revert."""
def execute(self, health_mon, listeners, loadbalancer):
pass
def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_health_mon_prov_status_error(health_mon.id)
self.task_utils.mark_pool_prov_status_active(health_mon.pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)
class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a l7policy to ERROR on revert."""
def execute(self, l7policy, listeners, loadbalancer):
pass
def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)
class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a l7rule to ERROR on revert."""
def execute(self, l7rule, listeners, loadbalancer):
pass
def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)
class ListenerToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a listener to ERROR on revert."""
def execute(self, listener):
pass
def revert(self, listener, *args, **kwargs):
self.task_utils.mark_listener_prov_status_error(listener.id)
self.task_utils.mark_loadbalancer_prov_status_active(
listener.load_balancer.id)
class ListenersToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set listeners to ERROR on revert."""
def execute(self, listeners, loadbalancer):
pass
def revert(self, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set the load balancer to ERROR on revert."""
def execute(self, loadbalancer_id):
pass
def revert(self, loadbalancer_id, *args, **kwargs):
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask):
"""Task to set the load balancer to ERROR on revert."""
def execute(self, loadbalancer):
pass
def revert(self, loadbalancer, *args, **kwargs):
super().revert(loadbalancer.id)
class MemberToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a member to ERROR on revert."""
def execute(self, member, listeners, loadbalancer, pool):
pass
def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs):
self.task_utils.mark_member_prov_status_error(member.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)
self.task_utils.mark_pool_prov_status_active(pool.id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
class MembersToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set members to ERROR on revert."""
def execute(self, members, listeners, loadbalancer, pool):
pass
def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs):
for m in members:
self.task_utils.mark_member_prov_status_error(m.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)
self.task_utils.mark_pool_prov_status_active(pool.id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
class PoolToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a pool to ERROR on revert."""
def execute(self, pool, listeners, loadbalancer):
pass
def revert(self, pool, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_pool_prov_status_error(pool.id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(listener.id)

View File

@ -1,41 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow import task
class DeleteModelObject(task.Task):
"""Task to delete an object in a model."""
def execute(self, object):
object.delete()
class UpdateAttributes(task.Task):
"""Task to update an object for changes."""
def execute(self, object, update_dict):
"""Update an object and its associated resources.
Note: This relies on the data_model update() methods to handle complex
objects with nested objects (LoadBalancer.vip,
Pool.session_persistence, etc.)
:param object: The object will be updated.
:param update_dict: The updates dictionary.
:returns: None
"""
object.update(update_dict)

View File

@ -1,970 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from taskflow import task
from taskflow.types import failure
import tenacity
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker import task_utils
from octavia.db import api as db_apis
from octavia.db import repositories
from octavia.network import base
from octavia.network import data_models as n_data_models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseNetworkTask(task.Task):
"""Base task to load drivers common to the tasks."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._network_driver = None
self.task_utils = task_utils.TaskUtils()
self.lb_repo = repositories.LoadBalancerRepository()
@property
def network_driver(self):
if self._network_driver is None:
self._network_driver = utils.get_network_driver()
return self._network_driver
class CalculateAmphoraDelta(BaseNetworkTask):
default_provides = constants.DELTA
def execute(self, loadbalancer, amphora, availability_zone):
LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
vip_subnet_to_net_map = {
loadbalancer.vip.subnet_id:
loadbalancer.vip.network_id,
}
# Figure out what networks we want
# seed with lb network(s)
if (availability_zone and
availability_zone.get(constants.MANAGEMENT_NETWORK)):
management_nets = [
availability_zone.get(constants.MANAGEMENT_NETWORK)]
else:
management_nets = CONF.controller_worker.amp_boot_network_list
desired_subnet_to_net_map = {}
for mgmt_net_id in management_nets:
for subnet_id in self.network_driver.get_network(
mgmt_net_id).subnets:
desired_subnet_to_net_map[subnet_id] = mgmt_net_id
desired_subnet_to_net_map.update(vip_subnet_to_net_map)
for pool in loadbalancer.pools:
for member in pool.members:
if (member.subnet_id and
member.provisioning_status !=
constants.PENDING_DELETE):
member_network = self.network_driver.get_subnet(
member.subnet_id).network_id
desired_subnet_to_net_map[member.subnet_id] = (
member_network)
desired_network_ids = set(desired_subnet_to_net_map.values())
desired_subnet_ids = set(desired_subnet_to_net_map)
# Calculate Network deltas
nics = self.network_driver.get_plugged_networks(
amphora.compute_id)
# we don't have two nics in the same network
network_to_nic_map = {nic.network_id: nic for nic in nics}
plugged_network_ids = set(network_to_nic_map)
del_ids = plugged_network_ids - desired_network_ids
delete_nics = [n_data_models.Interface(
network_id=net_id,
port_id=network_to_nic_map[net_id].port_id)
for net_id in del_ids]
add_ids = desired_network_ids - plugged_network_ids
add_nics = [n_data_models.Interface(
network_id=add_net_id,
fixed_ips=[
n_data_models.FixedIP(
subnet_id=subnet_id)
for subnet_id, net_id in desired_subnet_to_net_map.items()
if net_id == add_net_id])
for add_net_id in add_ids]
# Calculate member Subnet deltas
plugged_subnets = {}
for nic in network_to_nic_map.values():
for fixed_ip in nic.fixed_ips or []:
plugged_subnets[fixed_ip.subnet_id] = nic.network_id
plugged_subnet_ids = set(plugged_subnets)
del_subnet_ids = plugged_subnet_ids - desired_subnet_ids
add_subnet_ids = desired_subnet_ids - plugged_subnet_ids
def _subnet_updates(subnet_ids, subnets):
updates = []
for s in subnet_ids:
network_id = subnets[s]
nic = network_to_nic_map.get(network_id)
port_id = nic.port_id if nic else None
updates.append({
constants.SUBNET_ID: s,
constants.NETWORK_ID: network_id,
constants.PORT_ID: port_id
})
return updates
add_subnets = _subnet_updates(add_subnet_ids,
desired_subnet_to_net_map)
del_subnets = _subnet_updates(del_subnet_ids,
plugged_subnets)
delta = n_data_models.Delta(
amphora_id=amphora.id,
compute_id=amphora.compute_id,
add_nics=add_nics, delete_nics=delete_nics,
add_subnets=add_subnets,
delete_subnets=del_subnets)
return delta
class CalculateDelta(BaseNetworkTask):
"""Task to calculate the delta between
the nics on the amphora and the ones
we need. Returns a list for
plumbing them.
"""
default_provides = constants.DELTAS
def execute(self, loadbalancer, availability_zone):
"""Compute which NICs need to be plugged
for the amphora to become operational.
:param loadbalancer: the loadbalancer to calculate deltas for all
amphorae
:param availability_zone: availability zone metadata dict
:returns: dict of octavia.network.data_models.Delta keyed off amphora
id
"""
calculate_amp = CalculateAmphoraDelta()
deltas = {}
for amphora in filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
delta = calculate_amp.execute(loadbalancer, amphora,
availability_zone)
deltas[amphora.id] = delta
return deltas
class GetPlumbedNetworks(BaseNetworkTask):
"""Task to figure out the NICS on an amphora.
This will likely move into the amphora driver
:returns: Array of networks
"""
default_provides = constants.NICS
def execute(self, amphora):
"""Get plumbed networks for the amphora."""
LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id)
return self.network_driver.get_plugged_networks(amphora.compute_id)
class PlugNetworks(BaseNetworkTask):
"""Task to plug the networks.
This uses the delta to add all missing networks/nics
"""
def execute(self, amphora, delta):
"""Update the amphora networks for the delta."""
LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id)
if not delta:
LOG.debug("No network deltas for amphora id: %s", amphora.id)
return
# add nics
for nic in delta.add_nics:
self.network_driver.plug_network(amphora.compute_id,
nic.network_id)
def revert(self, amphora, delta, *args, **kwargs):
"""Handle a failed network plug by removing all nics added."""
LOG.warning("Unable to plug networks for amp id %s", amphora.id)
if not delta:
return
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(amphora.compute_id,
nic.network_id)
except base.NetworkNotFound:
pass
class UnPlugNetworks(BaseNetworkTask):
"""Task to unplug the networks
Loop over all nics and unplug them
based on delta
"""
def execute(self, amphora, delta):
"""Unplug the networks."""
LOG.debug("Unplug network for amphora")
if not delta:
LOG.debug("No network deltas for amphora id: %s", amphora.id)
return
for nic in delta.delete_nics:
try:
self.network_driver.unplug_network(amphora.compute_id,
nic.network_id)
except base.NetworkNotFound:
LOG.debug("Network %d not found", nic.network_id)
except Exception:
LOG.exception("Unable to unplug network")
# TODO(xgerman) follow up if that makes sense
class GetMemberPorts(BaseNetworkTask):
def execute(self, loadbalancer, amphora):
vip_port = self.network_driver.get_port(loadbalancer.vip.port_id)
member_ports = []
interfaces = self.network_driver.get_plugged_networks(
amphora.compute_id)
for interface in interfaces:
port = self.network_driver.get_port(interface.port_id)
if vip_port.network_id == port.network_id:
continue
port.network = self.network_driver.get_network(port.network_id)
for fixed_ip in port.fixed_ips:
if amphora.lb_network_ip == fixed_ip.ip_address:
break
fixed_ip.subnet = self.network_driver.get_subnet(
fixed_ip.subnet_id)
# Only add the port to the list if the IP wasn't the mgmt IP
else:
member_ports.append(port)
return member_ports
class HandleNetworkDelta(BaseNetworkTask):
"""Task to plug and unplug networks
Plug or unplug networks based on delta
"""
def _fill_port_info(self, port):
port.network = self.network_driver.get_network(port.network_id)
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self.network_driver.get_subnet(
fixed_ip.subnet_id)
def execute(self, amphora, delta):
"""Handle network plugging based off deltas."""
updated_ports = {}
for nic in delta.add_nics:
subnet_id = nic.fixed_ips[0].subnet_id
interface = self.network_driver.plug_network(
amphora.compute_id, nic.network_id)
port = self.network_driver.get_port(interface.port_id)
# nova may plugged undesired subnets (it plugs one of the subnets
# of the network), we can safely unplug the subnets we don't need,
# the desired subnet will be added in the 'ADD_SUBNETS' loop.
extra_subnets = [
fixed_ip.subnet_id
for fixed_ip in port.fixed_ips
if fixed_ip.subnet_id != subnet_id]
for subnet_id in extra_subnets:
port = self.network_driver.unplug_fixed_ip(
port_id=interface.port_id, subnet_id=subnet_id)
self._fill_port_info(port)
updated_ports[port.network_id] = port
for update in delta.add_subnets:
network_id = update[constants.NETWORK_ID]
# Get already existing port from Deltas or
# newly created port from updated_ports dict
port_id = (update[constants.PORT_ID] or
updated_ports[network_id].id)
subnet_id = update[constants.SUBNET_ID]
# Avoid duplicated subnets
has_subnet = False
if network_id in updated_ports:
has_subnet = any(
fixed_ip.subnet_id == subnet_id
for fixed_ip in updated_ports[network_id].fixed_ips)
if not has_subnet:
port = self.network_driver.plug_fixed_ip(
port_id=port_id, subnet_id=subnet_id)
self._fill_port_info(port)
updated_ports[network_id] = port
for update in delta.delete_subnets:
network_id = update[constants.NETWORK_ID]
port_id = update[constants.PORT_ID]
subnet_id = update[constants.SUBNET_ID]
port = self.network_driver.unplug_fixed_ip(
port_id=port_id, subnet_id=subnet_id)
self._fill_port_info(port)
# In neutron, when removing an ipv6 subnet (with slaac) from a
# port, it just ignores it.
# https://bugs.launchpad.net/neutron/+bug/1945156
# When it happens, don't add the port to the updated_ports dict
has_subnet = any(
fixed_ip.subnet_id == subnet_id
for fixed_ip in port.fixed_ips)
if not has_subnet:
updated_ports[network_id] = port
for nic in delta.delete_nics:
network_id = nic.network_id
try:
self.network_driver.unplug_network(
amphora.compute_id, network_id)
except base.NetworkNotFound:
LOG.debug("Network %s not found", network_id)
except Exception:
LOG.exception("Unable to unplug network")
port_id = nic.port_id
try:
self.network_driver.delete_port(port_id)
except Exception:
LOG.exception("Unable to delete the port")
updated_ports.pop(network_id, None)
return {amphora.id: list(updated_ports.values())}
def revert(self, result, amphora, delta, *args, **kwargs):
"""Handle a network plug or unplug failures."""
if isinstance(result, failure.Failure):
return
if not delta:
return
LOG.warning("Unable to plug networks for amp id %s",
delta.amphora_id)
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except Exception:
LOG.exception("Unable to unplug network %s",
nic.network_id)
port_id = nic.port_id
try:
self.network_driver.delete_port(port_id)
except Exception:
LOG.exception("Unable to delete port %s", port_id)
class HandleNetworkDeltas(BaseNetworkTask):
"""Task to plug and unplug networks
Loop through the deltas and plug or unplug
networks based on delta
"""
def execute(self, deltas, loadbalancer):
"""Handle network plugging based off deltas."""
amphorae = {amp.id: amp for amp in loadbalancer.amphorae}
updated_ports = {}
handle_delta = HandleNetworkDelta()
for amp_id, delta in deltas.items():
ret = handle_delta.execute(amphorae[amp_id], delta)
updated_ports.update(ret)
return updated_ports
def revert(self, result, deltas, *args, **kwargs):
"""Handle a network plug or unplug failures."""
if isinstance(result, failure.Failure):
return
if not deltas:
return
for amp_id, delta in deltas.items():
LOG.warning("Unable to plug networks for amp id %s",
delta.amphora_id)
if not delta:
return
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except Exception:
LOG.exception("Unable to unplug network %s",
nic.network_id)
port_id = nic.port_id
try:
self.network_driver.delete_port(port_id)
except Exception:
LOG.exception("Unable to delete port %s", port_id)
class PlugVIP(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id)
amps_data = self.network_driver.plug_vip(loadbalancer,
loadbalancer.vip)
return amps_data
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to plumb a vip."""
if isinstance(result, failure.Failure):
return
LOG.warning("Unable to plug VIP for loadbalancer id %s",
loadbalancer.id)
try:
# Make sure we have the current port IDs for cleanup
for amp_data in result:
for amphora in filter(
# pylint: disable=cell-var-from-loop
lambda amp: amp.id == amp_data.id,
loadbalancer.amphorae):
amphora.vrrp_port_id = amp_data.vrrp_port_id
amphora.ha_port_id = amp_data.ha_port_id
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
except Exception as e:
LOG.error("Failed to unplug VIP. Resources may still "
"be in use from vip: %(vip)s due to error: %(except)s",
{'vip': loadbalancer.vip.ip_address, 'except': str(e)})
class UpdateVIPSecurityGroup(BaseNetworkTask):
"""Task to setup SG for LB."""
def execute(self, loadbalancer_id):
"""Task to setup SG for LB.
Task is idempotent and safe to retry.
"""
LOG.debug("Setting up VIP SG for load balancer id: %s",
loadbalancer_id)
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
sg_id = self.network_driver.update_vip_sg(loadbalancer,
loadbalancer.vip)
LOG.info("Set up VIP SG %s for load balancer %s complete",
sg_id if sg_id else "None", loadbalancer_id)
return sg_id
class GetSubnetFromVIP(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
subnet = self.network_driver.get_subnet(loadbalancer.vip.subnet_id)
LOG.info("Got subnet %s for load balancer %s",
loadbalancer.vip.subnet_id if subnet else "None",
loadbalancer.id)
return subnet
class PlugVIPAmpphora(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer, amphora, subnet):
"""Plumb a vip to an amphora."""
LOG.debug("Plumbing VIP for amphora id: %s", amphora.id)
amp_data = self.network_driver.plug_aap_port(
loadbalancer, loadbalancer.vip, amphora, subnet)
return amp_data
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
"""Handle a failure to plumb a vip."""
if isinstance(result, failure.Failure):
return
LOG.warning("Unable to plug VIP for amphora id %s "
"load balancer id %s",
amphora.id, loadbalancer.id)
try:
amphora.vrrp_port_id = result.vrrp_port_id
amphora.ha_port_id = result.ha_port_id
self.network_driver.unplug_aap_port(loadbalancer.vip,
amphora, subnet)
except Exception as e:
LOG.error('Failed to unplug AAP port. Resources may still be in '
'use for VIP: %s due to error: %s', loadbalancer.vip,
str(e))
class UnplugVIP(BaseNetworkTask):
"""Task to unplug the vip."""
def execute(self, loadbalancer):
"""Unplug the vip."""
LOG.debug("Unplug vip on amphora")
try:
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
except Exception:
LOG.exception("Unable to unplug vip from load balancer %s",
loadbalancer.id)
class AllocateVIP(BaseNetworkTask):
"""Task to allocate a VIP."""
def execute(self, loadbalancer):
"""Allocate a vip to the loadbalancer."""
LOG.debug("Allocating vip port id %s, subnet id %s, ip address %s for "
"load balancer %s",
loadbalancer.vip.port_id,
loadbalancer.vip.subnet_id,
loadbalancer.vip.ip_address,
loadbalancer.id)
# allocated_vips returns (vip, add_vips), skipping the 2nd element as
# amphorav1 doesn't support add_vips
vip = self.network_driver.allocate_vip(loadbalancer)[0]
LOG.info("Allocated vip with port id %s, subnet id %s, ip address %s "
"for load balancer %s",
loadbalancer.vip.port_id,
loadbalancer.vip.subnet_id,
loadbalancer.vip.ip_address,
loadbalancer.id)
return vip
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to allocate vip."""
if isinstance(result, failure.Failure):
LOG.exception("Unable to allocate VIP")
return
vip = result
LOG.warning("Deallocating vip %s", vip.ip_address)
try:
self.network_driver.deallocate_vip(vip)
except Exception as e:
LOG.error("Failed to deallocate VIP. Resources may still "
"be in use from vip: %(vip)s due to error: %(except)s",
{'vip': vip.ip_address, 'except': str(e)})
class AllocateVIPforFailover(AllocateVIP):
"""Task to allocate/validate the VIP for a failover flow."""
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to allocate vip."""
if isinstance(result, failure.Failure):
LOG.exception("Unable to allocate VIP")
return
vip = result
LOG.info("Failover revert is not deallocating vip %s because this is "
"a failover.", vip.ip_address)
class DeallocateVIP(BaseNetworkTask):
"""Task to deallocate a VIP."""
def execute(self, loadbalancer):
"""Deallocate a VIP."""
LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address)
# NOTE(blogan): this is kind of ugly but sufficient for now. Drivers
# will need access to the load balancer that the vip is/was attached
# to. However the data model serialization for the vip does not give a
# backref to the loadbalancer if accessed through the loadbalancer.
vip = loadbalancer.vip
vip.load_balancer = loadbalancer
self.network_driver.deallocate_vip(vip)
class UpdateVIP(BaseNetworkTask):
"""Task to update a VIP."""
def execute(self, loadbalancer):
LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id)
self.network_driver.update_vip(loadbalancer)
class UpdateVIPForDelete(BaseNetworkTask):
"""Task to update a VIP for listener delete flows."""
def execute(self, loadbalancer):
LOG.debug("Updating VIP for listener delete on load_balancer %s.",
loadbalancer.id)
self.network_driver.update_vip(loadbalancer, for_delete=True)
class GetAmphoraNetworkConfigs(BaseNetworkTask):
"""Task to retrieve amphora network details."""
def execute(self, loadbalancer, amphora=None):
LOG.debug("Retrieving vip network details.")
return self.network_driver.get_network_configs(loadbalancer,
amphora=amphora)
class GetAmphoraNetworkConfigsByID(BaseNetworkTask):
"""Task to retrieve amphora network details."""
def execute(self, loadbalancer_id, amphora_id=None):
LOG.debug("Retrieving vip network details.")
amp_repo = repositories.AmphoraRepository()
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
amphora = amp_repo.get(db_apis.get_session(), id=amphora_id)
return self.network_driver.get_network_configs(loadbalancer,
amphora=amphora)
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
"""Task to retrieve amphorae network details."""
def execute(self, loadbalancer_id):
LOG.debug("Retrieving vip network details.")
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
return self.network_driver.get_network_configs(loadbalancer)
class FailoverPreparationForAmphora(BaseNetworkTask):
"""Task to prepare an amphora for failover."""
def execute(self, amphora):
LOG.debug("Prepare amphora %s for failover.", amphora.id)
self.network_driver.failover_preparation(amphora)
class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask):
"""Task retrieving all the port ids on an amphora, except lb network."""
def execute(self, amphora):
LOG.debug("Retrieve all but the lb network port id on amphora %s.",
amphora.id)
interfaces = self.network_driver.get_plugged_networks(
compute_id=amphora.compute_id)
ports = []
for interface_ in interfaces:
if interface_.port_id not in ports:
port = self.network_driver.get_port(port_id=interface_.port_id)
ips = port.fixed_ips
lb_network = False
for ip in ips:
if ip.ip_address == amphora.lb_network_ip:
lb_network = True
if not lb_network:
ports.append(port)
return ports
class PlugPorts(BaseNetworkTask):
"""Task to plug neutron ports into a compute instance."""
def execute(self, amphora, ports):
for port in ports:
LOG.debug('Plugging port ID: %(port_id)s into compute instance: '
'%(compute_id)s.',
{'port_id': port.id, 'compute_id': amphora.compute_id})
self.network_driver.plug_port(amphora, port)
class ApplyQos(BaseNetworkTask):
"""Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
if not amps_data:
amps_data = loadbalancer.amphorae
amps_data = [amp
for amp in amps_data
if amp.status == constants.AMPHORA_ALLOCATED]
apply_qos = ApplyQosAmphora()
for amp_data in amps_data:
apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data,
qos_policy_id)
def execute(self, loadbalancer, amps_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
if not qos_policy_id and (
not update_dict or (
'vip' not in update_dict or
'qos_policy_id' not in update_dict['vip'])):
return
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
request_qos_id = loadbalancer.vip.qos_policy_id
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
is_revert=True,
request_qos_id=request_qos_id)
class ApplyQosAmphora(BaseNetworkTask):
"""Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id,
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
try:
self.network_driver.apply_qos_on_port(qos_policy_id,
amp_data.vrrp_port_id)
except Exception:
if not is_revert:
raise
LOG.warning('Failed to undo qos policy %(qos_id)s '
'on vrrp port: %(port)s from '
'amphorae: %(amp)s',
{'qos_id': request_qos_id,
'port': amp_data.vrrp_port_id,
'amp': [amp.id for amp in amp_data]})
def execute(self, loadbalancer, amp_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
if not qos_policy_id and (
update_dict and (
'vip' not in update_dict or
'qos_policy_id' not in update_dict['vip'])):
return
self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id)
def revert(self, result, loadbalancer, amp_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
try:
request_qos_id = loadbalancer.vip.qos_policy_id
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_port(loadbalancer, amp_data,
orig_qos_id, is_revert=True,
request_qos_id=request_qos_id)
except Exception as e:
LOG.error('Failed to remove QoS policy: %s from port: %s due '
'to error: %s', orig_qos_id, amp_data.vrrp_port_id,
str(e))
class DeletePort(BaseNetworkTask):
"""Task to delete a network port."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, port_id, passive_failure=False):
"""Delete the network port."""
if port_id is None:
return
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
LOG.debug("Deleting network port %s", port_id)
else:
LOG.warning('Retrying network port %s delete attempt %s of %s.',
port_id,
self.execute.retry.statistics[
constants.ATTEMPT_NUMBER],
self.execute.retry.stop.max_attempt_number)
# Let the Taskflow engine know we are working and alive
# Don't use get with a default for 'attempt_number', we need to fail
# if that number is missing.
self.update_progress(
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
self.execute.retry.stop.max_attempt_number)
try:
self.network_driver.delete_port(port_id)
except Exception:
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
self.execute.retry.stop.max_attempt_number):
LOG.warning('Network port delete for port id: %s failed. '
'Retrying.', port_id)
raise
if passive_failure:
LOG.exception('Network port delete for port ID: %s failed. '
'This resource will be abandoned and should '
'manually be cleaned up once the '
'network service is functional.', port_id)
# Let's at least attempt to disable it so if the instance
# comes back from the dead it doesn't conflict with anything.
try:
self.network_driver.admin_down_port(port_id)
LOG.info('Successfully disabled (admin down) network port '
'%s that failed to delete.', port_id)
except Exception:
LOG.warning('Attempt to disable (admin down) network port '
'%s failed. The network service has failed. '
'Continuing.', port_id)
else:
LOG.exception('Network port delete for port ID: %s failed. '
'The network service has failed. '
'Aborting and reverting.', port_id)
raise
class CreateVIPBasePort(BaseNetworkTask):
"""Task to create the VIP base port for an amphora."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, vip, vip_sg_id, amphora_id):
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}]
sg_id = []
if vip_sg_id:
sg_id = [vip_sg_id]
port = self.network_driver.create_port(
vip.network_id, name=port_name, fixed_ips=fixed_ips,
secondary_ips=[vip.ip_address], security_group_ids=sg_id,
qos_policy_id=vip.qos_policy_id)
LOG.info('Created port %s with ID %s for amphora %s',
port_name, port.id, amphora_id)
return port
def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
for port in result:
self.network_driver.delete_port(port.id)
LOG.info('Deleted port %s with ID %s for amphora %s due to a '
'revert.', port_name, port.id, amphora_id)
except Exception as e:
LOG.error('Failed to delete port %s. Resources may still be in '
'use for a port intended for amphora %s due to error '
'%s. Search for a port named %s',
result, amphora_id, str(e), port_name)
class AdminDownPort(BaseNetworkTask):
def execute(self, port_id):
try:
self.network_driver.set_port_admin_state_up(port_id, False)
except base.PortNotFound:
return
for i in range(CONF.networking.max_retries):
port = self.network_driver.get_port(port_id)
if port.status == constants.DOWN:
LOG.debug('Disabled port: %s', port_id)
return
LOG.debug('Port %s is %s instead of DOWN, waiting.',
port_id, port.status)
time.sleep(CONF.networking.retry_interval)
LOG.error('Port %s failed to go DOWN. Port status is still %s. '
'Ignoring and continuing.', port_id, port.status)
def revert(self, result, port_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
self.network_driver.set_port_admin_state_up(port_id, True)
except Exception as e:
LOG.error('Failed to bring port %s admin up on revert due to: %s.',
port_id, str(e))
class GetVIPSecurityGroupID(BaseNetworkTask):
def execute(self, loadbalancer_id):
sg_name = utils.get_vip_security_group_name(loadbalancer_id)
try:
security_group = self.network_driver.get_security_group(sg_name)
if security_group:
return security_group.id
except base.SecurityGroupNotFound:
with excutils.save_and_reraise_exception() as ctxt:
if self.network_driver.sec_grp_enabled:
LOG.error('VIP security group %s was not found.', sg_name)
else:
ctxt.reraise = False
return None

View File

@ -1,74 +0,0 @@
# Copyright 2019 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from taskflow import retry
LOG = logging.getLogger(__name__)
class SleepingRetryTimesController(retry.Times):
"""A retry controller to attempt subflow retries a number of times.
This retry controller overrides the Times on_failure to inject a
sleep interval between retries.
It also adds a log message when all of the retries are exhausted.
:param attempts: number of attempts to retry the associated subflow
before giving up
:type attempts: int
:param name: Meaningful name for this atom, should be something that is
distinguishable and understandable for notification,
debugging, storing and any other similar purposes.
:param provides: A set, string or list of items that
this will be providing (or could provide) to others, used
to correlate and associate the thing/s this atom
produces, if it produces anything at all.
:param requires: A set or list of required inputs for this atom's
``execute`` method.
:param rebind: A dict of key/value pairs used to define argument
name conversions for inputs to this atom's ``execute``
method.
:param revert_all: when provided this will cause the full flow to revert
when the number of attempts that have been tried
has been reached (when false, it will only locally
revert the associated subflow)
:type revert_all: bool
:param interval: Interval, in seconds, between retry attempts.
:type interval: int
"""
def __init__(self, attempts=1, name=None, provides=None, requires=None,
auto_extract=True, rebind=None, revert_all=False, interval=1):
super().__init__(
attempts, name, provides, requires, auto_extract, rebind,
revert_all)
self._interval = interval
def on_failure(self, history, *args, **kwargs):
if len(history) < self._attempts:
LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and '
'retrying.',
self.name[self.name.startswith('retry-') and
len('retry-'):], len(history),
self._attempts, self._interval)
time.sleep(self._interval)
return retry.RETRY
return self._revert_action
def revert(self, history, *args, **kwargs):
LOG.error('%s retries with interval %s seconds have failed for %s. '
'Giving up.', len(history), self._interval, self.name)

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,824 +0,0 @@
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from oslo_utils import uuidutils
from octavia.api.drivers.amphora_driver.v1 import driver
from octavia.common import constants as consts
from octavia.network import base as network_base
from octavia.tests.common import sample_data_models
from octavia.tests.unit import base
class TestAmphoraDriver(base.TestRpc):
def setUp(self):
super().setUp()
self.amp_driver = driver.AmphoraProviderDriver()
self.sample_data = sample_data_models.SampleDriverDataModels()
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.return_value = (self.sample_data.db_vip,
[])
provider_vip_dict, add_vips = self.amp_driver.create_vip_port(
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict)
self.assertEqual([], add_vips)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port_without_port_security_enabled(
self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
network = mock.MagicMock()
network.port_security_enabled = False
mock_net_driver.get_network.return_value = network
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
self.assertRaises(exceptions.DriverError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port_failed(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.side_effect = (
network_base.AllocateVIPException())
self.assertRaises(exceptions.DriverError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_with_additional_vips(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
additional_vips = [{
consts.SUBNET_ID: uuidutils.generate_uuid()
}]
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict, additional_vips)
# Load Balancer
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_create(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_create(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.FLAVOR: None,
consts.AVAILABILITY_ZONE: None}
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_delete(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_delete(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
'cascade': False}
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_failover(self, mock_cast):
self.amp_driver.loadbalancer_failover(self.sample_data.lb_id)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id}
mock_cast.assert_called_with({}, 'failover_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
lb_dict = {'enabled': True}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_name(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
lb_dict = {'name': 'Great LB'}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_qos(self, mock_cast):
qos_policy_id = uuidutils.generate_uuid()
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id,
vip_qos_policy_id=qos_policy_id)
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
# Listener
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id,
protocol=consts.PROTOCOL_HTTPS,
alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS)
self.amp_driver.listener_create(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'create_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create_unsupported_alpn(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id,
protocol=consts.PROTOCOL_HTTPS)
provider_listener.alpn_protocols = ['http/1.1', 'eureka']
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.listener_create,
provider_listener)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create_unsupported_protocol(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id,
protocol='UNSUPPORTED_PROTO')
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.listener_create,
provider_listener)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_delete(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_delete(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'delete_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, admin_state_up=False)
listener_dict = {'enabled': False}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update_name(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, name='Great Listener')
listener_dict = {'name': 'Great Listener'}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update_unsupported_alpn(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id,
alpn_protocols=['http/1.1', 'eureka'])
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.listener_update,
old_provider_listener,
provider_listener)
# Pool
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id,
lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN,
alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS)
self.amp_driver.pool_create(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'create_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create_unsupported_algorithm(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_create,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create_unsupported_alpn(self, mock_cast):
provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id)
provider_pool.alpn_protocols = ['http/1.1', 'eureka']
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_create,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_delete(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
self.amp_driver.pool_delete(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'delete_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, admin_state_up=True,
ca_tls_container_data='CA DATA', ca_tls_container_ref='CA REF',
crl_container_data='CRL DATA', crl_container_ref='CRL REF',
description='TEST DESCRIPTION', name='TEST NAME',
lb_algorithm=consts.LB_ALGORITHM_SOURCE_IP,
session_persistence='FAKE SP', tls_container_data='TLS DATA',
tls_container_ref='TLS REF', tls_enabled=False)
pool_dict = {'description': 'TEST DESCRIPTION',
'lb_algorithm': 'SOURCE_IP', 'name': 'TEST NAME',
'session_persistence': 'FAKE SP', 'tls_enabled': False,
'enabled': True, 'tls_certificate_id': 'TLS REF',
'ca_tls_certificate_id': 'CA REF',
'crl_container_id': 'CRL REF'}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_name(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, name='Great pool',
admin_state_up=True, tls_enabled=True)
pool_dict = {'name': 'Great pool',
'enabled': True,
'tls_enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_unsupported_algorithm(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_update,
old_provider_pool,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_unsupported_alpn(self, mock_cast):
old_provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
listener_id=self.sample_data.pool1_id,
alpn_protocols=['http/1.1', 'eureka'])
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_update,
old_provider_pool,
provider_pool)
# Member
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "fe80::1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_create,
provider_member)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_delete(self, mock_cast):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_delete(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'delete_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, admin_state_up=True)
member_dict = {'enabled': True}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update_name(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, name='Great member')
member_dict = {'name': 'Great member'}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_no_admin_addr(self, mock_cast,
mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id,
monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_clear_already_empty(
self, mock_cast, mock_pool_get, mock_session):
"""Expect that we will pass an empty payload if directed.
Logic for whether or not to attempt this will be done above the driver
layer, so our driver is responsible to forward the request even if it
is a perceived no-op.
"""
mock_pool = mock.MagicMock()
mock_pool_get.return_value = mock_pool
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, [])
payload = {'old_member_ids': [],
'new_member_ids': [],
'updated_members': []}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
# Health Monitor
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_create(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_create(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'create_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_delete(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_delete(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='fe80::1', monitor_address='fe80::2',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_batch_update,
self.sample_data.pool1_id, prov_members)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True,
max_retries=1, max_retries_down=2)
hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update_name(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, name='Great HM')
hm_dict = {'name': 'Great HM'}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
# L7 Policy
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.ListenerRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_create(self, mock_cast, mock_listener_get, mock_session):
mock_listener = mock.MagicMock()
mock_listener.protocol = consts.PROTOCOL_HTTP
mock_listener_get.return_value = mock_listener
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_create(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'create_l7policy', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.ListenerRepository.get')
def test_l7policy_create_invalid_listener_protocol(self, mock_listener_get,
mock_session):
mock_listener = mock.MagicMock()
mock_listener.protocol = consts.PROTOCOL_UDP
mock_listener_get.return_value = mock_listener
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.l7policy_create,
provider_l7policy)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_delete(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_delete(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'delete_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True)
l7policy_dict = {'enabled': True}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update_name(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy')
l7policy_dict = {'name': 'Great L7Policy'}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
# L7 Rules
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_create(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_create(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'create_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_delete(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_delete(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'delete_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True)
l7rule_dict = {'enabled': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update_invert(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, invert=True)
l7rule_dict = {'invert': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
# Flavor
def test_get_supported_flavor_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_flavor_metadata
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', test_schema):
result = self.amp_driver.get_supported_flavor_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.get_supported_flavor_metadata)
def test_validate_flavor(self):
ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE}
self.amp_driver.validate_flavor(ref_dict)
# Test bad flavor metadata value is bad
ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test bad flavor metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_flavor, 'bogus')
# Availability Zone
def test_get_supported_availability_zone_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_availability_zone_metadata
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema):
result = self.amp_driver.get_supported_availability_zone_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(
exceptions.DriverError,
self.amp_driver.get_supported_availability_zone_metadata)
def test_validate_availability_zone(self):
with mock.patch('stevedore.driver.DriverManager.driver') as m_driver:
m_driver.validate_availability_zone.return_value = None
ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'}
self.amp_driver.validate_availability_zone(ref_dict)
# Test bad availability zone metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_availability_zone,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_availability_zone,
'bogus')

View File

@ -43,15 +43,13 @@ class TestHealthManager(base.TestCase):
super().setUp()
@mock.patch('octavia.db.api.wait_for_connection')
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
'get_stale_amphora')
@mock.patch('octavia.db.api.get_session')
def test_health_check_stale_amphora(self, session_mock, get_stale_amp_mock,
failover_mockv2, failover_mock,
failover_mock,
db_wait_mock):
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="health_manager", heartbeat_timeout=5)
@ -87,15 +85,13 @@ class TestHealthManager(base.TestCase):
self.assertRaises(TestException, hm.health_check)
self.assertEqual(4, mock_session.rollback.call_count)
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
'get_stale_amphora', return_value=None)
@mock.patch('octavia.db.api.get_session')
def test_health_check_nonstale_amphora(self, session_mock,
get_stale_amp_mock, failover_mockv2,
get_stale_amp_mock,
failover_mock):
get_stale_amp_mock.side_effect = [None, TestException('test')]
@ -104,20 +100,15 @@ class TestHealthManager(base.TestCase):
hm.health_check()
session_mock.assert_called_once_with(autocommit=False)
if CONF.api_settings.default_provider_driver == 'amphorav2':
self.assertFalse(failover_mockv2.called)
else:
self.assertFalse(failover_mock.called)
self.assertFalse(failover_mock.called)
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
'get_stale_amphora', return_value=None)
@mock.patch('octavia.db.api.get_session')
def test_health_check_exit(self, session_mock, get_stale_amp_mock,
failover_mockv2, failover_mock):
failover_mock):
get_stale_amp_mock.return_value = None
exit_event = threading.Event()
@ -125,20 +116,15 @@ class TestHealthManager(base.TestCase):
hm.health_check()
session_mock.assert_called_once_with(autocommit=False)
if CONF.api_settings.default_provider_driver == 'amphorav2':
self.assertFalse(failover_mockv2.called)
else:
self.assertFalse(failover_mock.called)
self.assertFalse(failover_mock.called)
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker.failover_amphora')
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
'get_stale_amphora', return_value=None)
@mock.patch('octavia.db.api.get_session')
def test_health_check_db_error(self, session_mock, get_stale_amp_mock,
failover_mockv2, failover_mock):
failover_mock):
get_stale_amp_mock.return_value = None
mock_session = mock.MagicMock()

View File

@ -158,73 +158,12 @@ class TestCertRotation(base.TestCase):
super().setUp()
self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF))
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.amphora_cert_rotation')
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_expired_amphora_with_exception(self, session,
cert_exp_amp_mock,
amp_cert_mock
):
self.CONF.config(group="api_settings",
default_provider_driver='amphorav1')
amphora = mock.MagicMock()
amphora.id = AMPHORA_ID
session.return_value = session
cert_exp_amp_mock.side_effect = [amphora, TestException(
'break_while')]
cr = house_keeping.CertRotation()
self.assertRaises(TestException, cr.rotate)
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.amphora_cert_rotation')
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_expired_amphora_without_exception(self, session,
cert_exp_amp_mock,
amp_cert_mock
):
self.CONF.config(group="api_settings",
default_provider_driver='amphorav1')
amphora = mock.MagicMock()
amphora.id = AMPHORA_ID
session.return_value = session
cert_exp_amp_mock.side_effect = [amphora, None]
cr = house_keeping.CertRotation()
self.assertIsNone(cr.rotate())
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
@mock.patch('octavia.controller.worker.v1.controller_worker.'
'ControllerWorker.amphora_cert_rotation')
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_non_expired_amphora(self, session,
cert_exp_amp_mock,
amp_cert_mock):
self.CONF.config(group="api_settings",
default_provider_driver='amphorav1')
session.return_value = session
cert_exp_amp_mock.return_value = None
cr = house_keeping.CertRotation()
cr.rotate()
self.assertFalse(amp_cert_mock.called)
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker.amphora_cert_rotation')
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_expired_amphora_with_exception_amphorav2(
def test_cert_rotation_expired_amphora_with_exception(
self, session, cert_exp_amp_mock, amp_cert_mock):
self.CONF.config(group="api_settings",
default_provider_driver='amphora')
@ -245,7 +184,7 @@ class TestCertRotation(base.TestCase):
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_expired_amphora_without_exception_amphorav2(
def test_cert_rotation_expired_amphora_without_exception(
self, session, cert_exp_amp_mock, amp_cert_mock):
self.CONF.config(group="api_settings",
default_provider_driver='amphora')
@ -265,7 +204,7 @@ class TestCertRotation(base.TestCase):
@mock.patch('octavia.db.repositories.AmphoraRepository.'
'get_cert_expiring_amphora')
@mock.patch('octavia.db.api.get_session')
def test_cert_rotation_non_expired_amphora_amphorav2(
def test_cert_rotation_non_expired_amphora(
self, session, cert_exp_amp_mock, amp_cert_mock):
self.CONF.config(group="api_settings",
default_provider_driver='amphora')

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,61 +0,0 @@
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import oslo_messaging as messaging
from octavia.controller.queue.v1 import consumer
from octavia.controller.queue.v1 import endpoints
from octavia.tests.unit import base
class TestConsumer(base.TestRpc):
def setUp(self):
super().setUp()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="oslo_messaging", topic='foo_topic')
conf.config(host='test-hostname')
self.conf = conf.conf
@mock.patch.object(messaging, 'Target')
@mock.patch.object(endpoints, 'Endpoints')
@mock.patch.object(messaging, 'get_rpc_server')
def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target):
mock_rpc_server_rv = mock.Mock()
mock_rpc_server.return_value = mock_rpc_server_rv
mock_endpoint_rv = mock.Mock()
mock_endpoint.return_value = mock_endpoint_rv
mock_target_rv = mock.Mock()
mock_target.return_value = mock_target_rv
consumer.ConsumerService(1, self.conf).run()
mock_target.assert_called_once_with(topic='foo_topic',
server='test-hostname',
fanout=False)
mock_endpoint.assert_called_once_with()
@mock.patch.object(messaging, 'get_rpc_server')
def test_consumer_terminate(self, mock_rpc_server):
mock_rpc_server_rv = mock.Mock()
mock_rpc_server.return_value = mock_rpc_server_rv
cons = consumer.ConsumerService(1, self.conf)
cons.run()
cons.terminate()
mock_rpc_server_rv.stop.assert_called_once_with()
mock_rpc_server_rv.wait.assert_called_once_with()

View File

@ -1,189 +0,0 @@
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.controller.queue.v1 import endpoints
from octavia.controller.worker.v1 import controller_worker
from octavia.tests.unit import base
class TestEndpoints(base.TestCase):
def setUp(self):
super().setUp()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(octavia_plugins='hot_plug_plugin')
mock_class = mock.create_autospec(controller_worker.ControllerWorker)
self.worker_patcher = mock.patch('octavia.controller.queue.v1.'
'endpoints.stevedore_driver')
self.worker_patcher.start().ControllerWorker = mock_class
self.ep = endpoints.Endpoints()
self.context = {}
self.resource_updates = {}
self.resource_id = 1234
self.server_group_id = 3456
self.flavor_id = uuidutils.generate_uuid()
self.availability_zone = uuidutils.generate_uuid()
def test_create_load_balancer(self):
self.ep.create_load_balancer(self.context, self.resource_id,
flavor=self.flavor_id,
availability_zone=self.availability_zone)
self.ep.worker.create_load_balancer.assert_called_once_with(
self.resource_id, self.flavor_id, self.availability_zone)
def test_create_load_balancer_no_flavor_or_az(self):
self.ep.create_load_balancer(self.context, self.resource_id)
self.ep.worker.create_load_balancer.assert_called_once_with(
self.resource_id, None, None)
def test_update_load_balancer(self):
self.ep.update_load_balancer(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_load_balancer.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_load_balancer(self):
self.ep.delete_load_balancer(self.context, self.resource_id)
self.ep.worker.delete_load_balancer.assert_called_once_with(
self.resource_id, False)
def test_failover_load_balancer(self):
self.ep.failover_load_balancer(self.context, self.resource_id)
self.ep.worker.failover_loadbalancer.assert_called_once_with(
self.resource_id)
def test_failover_amphora(self):
self.ep.failover_amphora(self.context, self.resource_id)
self.ep.worker.failover_amphora.assert_called_once_with(
self.resource_id)
def test_create_listener(self):
self.ep.create_listener(self.context, self.resource_id)
self.ep.worker.create_listener.assert_called_once_with(
self.resource_id)
def test_update_listener(self):
self.ep.update_listener(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_listener.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_listener(self):
self.ep.delete_listener(self.context, self.resource_id)
self.ep.worker.delete_listener.assert_called_once_with(
self.resource_id)
def test_create_pool(self):
self.ep.create_pool(self.context, self.resource_id)
self.ep.worker.create_pool.assert_called_once_with(
self.resource_id)
def test_update_pool(self):
self.ep.update_pool(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_pool.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_pool(self):
self.ep.delete_pool(self.context, self.resource_id)
self.ep.worker.delete_pool.assert_called_once_with(
self.resource_id)
def test_create_health_monitor(self):
self.ep.create_health_monitor(self.context, self.resource_id)
self.ep.worker.create_health_monitor.assert_called_once_with(
self.resource_id)
def test_update_health_monitor(self):
self.ep.update_health_monitor(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_health_monitor.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_health_monitor(self):
self.ep.delete_health_monitor(self.context, self.resource_id)
self.ep.worker.delete_health_monitor.assert_called_once_with(
self.resource_id)
def test_create_member(self):
self.ep.create_member(self.context, self.resource_id)
self.ep.worker.create_member.assert_called_once_with(
self.resource_id)
def test_update_member(self):
self.ep.update_member(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_member.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_batch_update_members(self):
self.ep.batch_update_members(
self.context, [9], [11], [self.resource_updates])
self.ep.worker.batch_update_members.assert_called_once_with(
[9], [11], [self.resource_updates])
def test_delete_member(self):
self.ep.delete_member(self.context, self.resource_id)
self.ep.worker.delete_member.assert_called_once_with(
self.resource_id)
def test_create_l7policy(self):
self.ep.create_l7policy(self.context, self.resource_id)
self.ep.worker.create_l7policy.assert_called_once_with(
self.resource_id)
def test_update_l7policy(self):
self.ep.update_l7policy(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_l7policy.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_l7policy(self):
self.ep.delete_l7policy(self.context, self.resource_id)
self.ep.worker.delete_l7policy.assert_called_once_with(
self.resource_id)
def test_create_l7rule(self):
self.ep.create_l7rule(self.context, self.resource_id)
self.ep.worker.create_l7rule.assert_called_once_with(
self.resource_id)
def test_update_l7rule(self):
self.ep.update_l7rule(self.context, self.resource_id,
self.resource_updates)
self.ep.worker.update_l7rule.assert_called_once_with(
self.resource_id, self.resource_updates)
def test_delete_l7rule(self):
self.ep.delete_l7rule(self.context, self.resource_id)
self.ep.worker.delete_l7rule.assert_called_once_with(
self.resource_id)
def test_update_amphora_agent_config(self):
self.ep.update_amphora_agent_config(self.context, self.resource_id)
self.ep.worker.update_amphora_agent_config.assert_called_once_with(
self.resource_id)
def test_delete_amphora(self):
self.ep.delete_amphora(self.context, self.resource_id)
self.ep.worker.delete_amphora.assert_called_once_with(
self.resource_id)

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,474 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import data_models
from octavia.controller.worker.v1.flows import amphora_flows
import octavia.tests.unit.base as base
AUTH_VERSION = '2'
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestAmphoraFlows(base.TestCase):
def setUp(self):
super().setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker",
amphora_driver='amphora_haproxy_rest_driver')
self.conf.config(group="nova", enable_anti_affinity=False)
self.AmpFlow = amphora_flows.AmphoraFlows()
self.amp1 = data_models.Amphora(id=1)
self.amp2 = data_models.Amphora(id=2)
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid())
self.lb = data_models.LoadBalancer(
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
def test_get_create_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_cert_backup_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_bogus_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_delete_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4)
self.assertIsInstance(amp_flow, flow.Flow)
# This flow injects the required data at flow compile time.
self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(0, len(amp_flow.requires))
def test_get_failover_flow_act_stdby(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER,
load_balancer_id=uuidutils.generate_uuid())
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 2)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.UPDATED_PORTS, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(13, len(amp_flow.provides))
def test_get_failover_flow_standalone(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE,
load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.UPDATED_PORTS, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
def test_get_failover_flow_bogus_role(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(),
role='bogus')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
print(amp_flow.requires)
self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(1, len(amp_flow.provides))
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow()
self.assertIsInstance(amp_rotate_flow, flow.Flow)
self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides)
self.assertIn(constants.AMPHORA, amp_rotate_flow.requires)
self.assertEqual(1, len(amp_rotate_flow.provides))
self.assertEqual(2, len(amp_rotate_flow.requires))
def test_get_vrrp_subflow(self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123')
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_vrrp_subflow_dont_create_vrrp_group(
self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123',
create_vrrp_group=False)
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_update_amphora_config_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.update_amphora_config_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertEqual(2, len(amp_flow.requires))
self.assertEqual(0, len(amp_flow.provides))
def test_get_amphora_for_lb_failover_flow_single(self,
mock_get_net_driver):
FAILED_PORT_ID = uuidutils.generate_uuid()
TEST_PREFIX = 'test_prefix'
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
TEST_PREFIX, role=constants.ROLE_STANDALONE,
failed_amp_vrrp_port_id=FAILED_PORT_ID, is_vrrp_ipv6=True)
self.assertIsInstance(get_amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
self.assertIn(constants.VIP, get_amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
self.assertIn(constants.UPDATED_PORTS, get_amp_flow.provides)
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
self.assertIn(constants.DELTA, get_amp_flow.provides)
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)
def test_get_amphora_for_lb_failover_flow_act_stdby(self,
mock_get_net_driver):
TEST_PREFIX = 'test_prefix'
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
TEST_PREFIX, role=constants.ROLE_MASTER)
self.assertIsInstance(get_amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
self.assertIn(constants.VIP, get_amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
self.assertIn(constants.UPDATED_PORTS, get_amp_flow.provides)
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
self.assertIn(constants.DELTA, get_amp_flow.provides)
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)

View File

@ -1,72 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import health_monitor_flows
import octavia.tests.unit.base as base
class TestHealthMonitorFlows(base.TestCase):
def setUp(self):
self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows()
super().setUp()
def test_get_create_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_create_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.POOL, health_mon_flow.requires)
self.assertEqual(4, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
def test_get_delete_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_delete_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.POOL, health_mon_flow.requires)
self.assertEqual(4, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
def test_get_update_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_update_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires)
self.assertEqual(5, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))

View File

@ -1,67 +0,0 @@
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import l7policy_flows
import octavia.tests.unit.base as base
class TestL7PolicyFlows(base.TestCase):
def setUp(self):
self.L7PolicyFlow = l7policy_flows.L7PolicyFlows()
super().setUp()
def test_get_create_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
self.assertEqual(3, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))
def test_get_delete_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
self.assertEqual(3, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))
def test_get_update_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires)
self.assertEqual(4, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))

View File

@ -1,67 +0,0 @@
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import l7rule_flows
import octavia.tests.unit.base as base
class TestL7RuleFlows(base.TestCase):
def setUp(self):
self.L7RuleFlow = l7rule_flows.L7RuleFlows()
super().setUp()
def test_get_create_l7rule_flow(self):
l7rule_flow = self.L7RuleFlow.get_create_l7rule_flow()
self.assertIsInstance(l7rule_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
self.assertEqual(4, len(l7rule_flow.requires))
self.assertEqual(0, len(l7rule_flow.provides))
def test_get_delete_l7rule_flow(self):
l7rule_flow = self.L7RuleFlow.get_delete_l7rule_flow()
self.assertIsInstance(l7rule_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
self.assertIn(constants.L7RULE, l7rule_flow.requires)
self.assertEqual(4, len(l7rule_flow.requires))
self.assertEqual(0, len(l7rule_flow.provides))
def test_get_update_l7rule_flow(self):
l7rule_flow = self.L7RuleFlow.get_update_l7rule_flow()
self.assertIsInstance(l7rule_flow, flow.Flow)
self.assertIn(constants.L7RULE, l7rule_flow.requires)
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
self.assertIn(constants.UPDATE_DICT, l7rule_flow.requires)
self.assertEqual(5, len(l7rule_flow.requires))
self.assertEqual(0, len(l7rule_flow.provides))

View File

@ -1,91 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import listener_flows
import octavia.tests.unit.base as base
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestListenerFlows(base.TestCase):
def setUp(self):
self.ListenerFlow = listener_flows.ListenerFlows()
super().setUp()
def test_get_create_listener_flow(self, mock_get_net_driver):
listener_flow = self.ListenerFlow.get_create_listener_flow()
self.assertIsInstance(listener_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertIn(constants.LISTENERS, listener_flow.requires)
self.assertEqual(2, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides))
def test_get_delete_listener_flow(self, mock_get_net_driver):
listener_flow = self.ListenerFlow.get_delete_listener_flow()
self.assertIsInstance(listener_flow, flow.Flow)
self.assertIn(constants.LISTENER, listener_flow.requires)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertEqual(2, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides))
def test_get_delete_listener_internal_flow(self, mock_get_net_driver):
listener_flow = self.ListenerFlow.get_delete_listener_internal_flow(
'test-listener')
self.assertIsInstance(listener_flow, flow.Flow)
self.assertIn('test-listener', listener_flow.requires)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertEqual(2, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides))
def test_get_update_listener_flow(self, mock_get_net_driver):
listener_flow = self.ListenerFlow.get_update_listener_flow()
self.assertIsInstance(listener_flow, flow.Flow)
self.assertIn(constants.LISTENER, listener_flow.requires)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertIn(constants.UPDATE_DICT, listener_flow.requires)
self.assertIn(constants.LISTENERS, listener_flow.requires)
self.assertEqual(4, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides))
def test_get_create_all_listeners_flow(self, mock_get_net_driver):
listeners_flow = self.ListenerFlow.get_create_all_listeners_flow()
self.assertIsInstance(listeners_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER, listeners_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, listeners_flow.requires)
self.assertIn(constants.LOADBALANCER, listeners_flow.provides)
self.assertEqual(2, len(listeners_flow.requires))
self.assertEqual(2, len(listeners_flow.provides))

View File

@ -1,430 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import exceptions
from octavia.controller.worker.v1.flows import load_balancer_flows
import octavia.tests.unit.base as base
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestLoadBalancerFlows(base.TestCase):
def setUp(self):
super().setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker",
amphora_driver='amphora_haproxy_rest_driver')
self.conf.config(group="nova", enable_anti_affinity=False)
self.LBFlow = load_balancer_flows.LoadBalancerFlows()
def test_get_create_load_balancer_flow(self, mock_get_net_driver):
amp_flow = self.LBFlow.get_create_load_balancer_flow(
constants.TOPOLOGY_SINGLE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
def test_get_create_active_standby_load_balancer_flow(
self, mock_get_net_driver):
amp_flow = self.LBFlow.get_create_load_balancer_flow(
constants.TOPOLOGY_ACTIVE_STANDBY)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
def test_get_create_anti_affinity_active_standby_load_balancer_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self._LBFlow = load_balancer_flows.LoadBalancerFlows()
amp_flow = self._LBFlow.get_create_load_balancer_flow(
constants.TOPOLOGY_ACTIVE_STANDBY)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_create_bogus_topology_load_balancer_flow(
self, mock_get_net_driver):
self.assertRaises(exceptions.InvalidTopology,
self.LBFlow.get_create_load_balancer_flow,
'BOGUS')
def test_get_delete_load_balancer_flow(self, mock_get_net_driver):
lb_mock = mock.Mock()
listener_mock = mock.Mock()
listener_mock.id = '123'
lb_mock.listeners = [listener_mock]
lb_flow, store = self.LBFlow.get_delete_load_balancer_flow(lb_mock)
self.assertIsInstance(lb_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires)
self.assertEqual(0, len(lb_flow.provides))
self.assertEqual(2, len(lb_flow.requires))
def test_get_delete_load_balancer_flow_cascade(self, mock_get_net_driver):
lb_mock = mock.Mock()
listener_mock = mock.Mock()
listener_mock.id = '123'
lb_mock.listeners = [listener_mock]
pool_mock = mock.Mock()
pool_mock.id = '345'
lb_mock.pools = [pool_mock]
l7_mock = mock.Mock()
l7_mock.id = '678'
listener_mock.l7policies = [l7_mock]
lb_flow, store = self.LBFlow.get_cascade_delete_load_balancer_flow(
lb_mock)
self.assertIsInstance(lb_flow, flow.Flow)
self.assertEqual({'listener_123': listener_mock,
'pool345': pool_mock}, store)
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertEqual(1, len(lb_flow.provides))
self.assertEqual(4, len(lb_flow.requires))
def test_get_update_load_balancer_flow(self, mock_get_net_driver):
lb_flow = self.LBFlow.get_update_load_balancer_flow()
self.assertIsInstance(lb_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertIn(constants.UPDATE_DICT, lb_flow.requires)
self.assertEqual(0, len(lb_flow.provides))
self.assertEqual(2, len(lb_flow.requires))
def test_get_post_lb_amp_association_flow(self, mock_get_net_driver):
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
'123', constants.TOPOLOGY_SINGLE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
# Test Active/Standby path
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
'123', constants.TOPOLOGY_ACTIVE_STANDBY)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(4, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
'123', constants.TOPOLOGY_ACTIVE_STANDBY)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(4, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_create_load_balancer_flows_single_listeners(
self, mock_get_net_driver):
create_flow = (
self.LBFlow.get_create_load_balancer_flow(
constants.TOPOLOGY_SINGLE, True
)
)
self.assertIsInstance(create_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, create_flow.requires)
self.assertIn(constants.UPDATE_DICT, create_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, create_flow.requires)
self.assertIn(constants.FLAVOR, create_flow.requires)
self.assertIn(constants.AVAILABILITY_ZONE, create_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, create_flow.requires)
self.assertIn(constants.LISTENERS, create_flow.provides)
self.assertIn(constants.SUBNET, create_flow.provides)
self.assertIn(constants.AMPHORA, create_flow.provides)
self.assertIn(constants.AMPHORA_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
self.assertIn(constants.LOADBALANCER, create_flow.provides)
self.assertIn(constants.DELTAS, create_flow.provides)
self.assertIn(constants.UPDATED_PORTS, create_flow.provides)
self.assertIn(constants.VIP, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.SERVER_PEM, create_flow.provides)
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides)
self.assertEqual(6, len(create_flow.requires))
self.assertEqual(14, len(create_flow.provides))
def test_get_create_load_balancer_flows_active_standby_listeners(
self, mock_get_net_driver):
create_flow = (
self.LBFlow.get_create_load_balancer_flow(
constants.TOPOLOGY_ACTIVE_STANDBY, True
)
)
self.assertIsInstance(create_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, create_flow.requires)
self.assertIn(constants.UPDATE_DICT, create_flow.requires)
self.assertIn(constants.LISTENERS, create_flow.provides)
self.assertIn(constants.AMPHORA, create_flow.provides)
self.assertIn(constants.AMPHORA_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
self.assertIn(constants.LOADBALANCER, create_flow.provides)
self.assertIn(constants.DELTAS, create_flow.provides)
self.assertIn(constants.UPDATED_PORTS, create_flow.provides)
self.assertIn(constants.VIP, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
create_flow.provides)
self.assertEqual(6, len(create_flow.requires))
self.assertEqual(16, len(create_flow.provides),
create_flow.provides)
def _test_get_failover_LB_flow_single(self, amphorae):
lb_mock = mock.MagicMock()
lb_mock.id = uuidutils.generate_uuid()
lb_mock.topology = constants.TOPOLOGY_SINGLE
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
self.assertIsInstance(failover_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
self.assertIn(constants.FLAVOR, failover_flow.requires)
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
self.assertIn(constants.UPDATED_PORTS, failover_flow.provides)
self.assertIn(constants.AMPHORA, failover_flow.provides)
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
failover_flow.provides)
self.assertIn(constants.BASE_PORT, failover_flow.provides)
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
self.assertIn(constants.DELTA, failover_flow.provides)
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
self.assertIn(constants.VIP, failover_flow.provides)
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
self.assertEqual(6, len(failover_flow.requires),
failover_flow.requires)
self.assertEqual(12, len(failover_flow.provides),
failover_flow.provides)
def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver):
self._test_get_failover_LB_flow_single([])
def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = constants.ROLE_STANDALONE
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = None
amphora_mock.vrrp_ip = None
self._test_get_failover_LB_flow_single([amphora_mock])
def test_get_failover_LB_flow_one_bogus_amp_single(self,
mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = 'bogus'
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = None
amphora_mock.vrrp_ip = None
self._test_get_failover_LB_flow_single([amphora_mock])
def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora2_mock = mock.MagicMock()
amphora2_mock.role = constants.ROLE_STANDALONE
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
amphora2_mock.compute_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_port_id = None
amphora2_mock.vrrp_ip = None
self._test_get_failover_LB_flow_single([amphora_mock, amphora2_mock])
def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae):
lb_mock = mock.MagicMock()
lb_mock.id = uuidutils.generate_uuid()
lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
self.assertIsInstance(failover_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
self.assertIn(constants.FLAVOR, failover_flow.requires)
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
self.assertIn(constants.UPDATED_PORTS, failover_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, failover_flow.provides)
self.assertIn(constants.AMPHORA, failover_flow.provides)
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
self.assertIn(constants.AMPHORAE, failover_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
failover_flow.provides)
self.assertIn(constants.BASE_PORT, failover_flow.provides)
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
self.assertIn(constants.DELTA, failover_flow.provides)
self.assertIn(constants.FIRST_AMP_NETWORK_CONFIGS,
failover_flow.provides)
self.assertIn(constants.FIRST_AMP_VRRP_INTERFACE,
failover_flow.provides)
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
self.assertIn(constants.VIP, failover_flow.provides)
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
self.assertEqual(6, len(failover_flow.requires),
failover_flow.requires)
self.assertEqual(16, len(failover_flow.provides),
failover_flow.provides)
def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver):
self._test_get_failover_LB_flow_no_amps_act_stdby([])
def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae):
amphora_mock = mock.MagicMock()
amphora_mock.role = constants.ROLE_MASTER
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = None
amphora_mock.vrrp_ip = None
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock])
def test_get_failover_LB_flow_two_amps_act_stdby(self,
mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = constants.ROLE_MASTER
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora_mock.vrrp_ip = '192.0.2.46'
amphora2_mock = mock.MagicMock()
amphora2_mock.role = constants.ROLE_BACKUP
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
amphora2_mock.compute_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_ip = '2001:db8::46'
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
amphora2_mock])
def test_get_failover_LB_flow_three_amps_act_stdby(self,
mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = constants.ROLE_MASTER
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora_mock.vrrp_ip = '192.0.2.46'
amphora2_mock = mock.MagicMock()
amphora2_mock.role = constants.ROLE_BACKUP
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
amphora2_mock.compute_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_ip = '2001:db8::46'
amphora3_mock = mock.MagicMock()
amphora3_mock.vrrp_ip = None
self._test_get_failover_LB_flow_no_amps_act_stdby(
[amphora_mock, amphora2_mock, amphora3_mock])
def test_get_failover_LB_flow_two_amps_bogus_act_stdby(
self, mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = 'bogus'
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora_mock.vrrp_ip = '192.0.2.46'
amphora2_mock = mock.MagicMock()
amphora2_mock.role = constants.ROLE_MASTER
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
amphora2_mock.compute_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_ip = '2001:db8::46'
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
amphora2_mock])
def test_get_failover_LB_flow_two_amps_standalone_act_stdby(
self, mock_get_net_driver):
amphora_mock = mock.MagicMock()
amphora_mock.role = constants.ROLE_STANDALONE
amphora_mock.lb_network_id = uuidutils.generate_uuid()
amphora_mock.compute_id = uuidutils.generate_uuid()
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora_mock.vrrp_ip = '192.0.2.46'
amphora2_mock = mock.MagicMock()
amphora2_mock.role = constants.ROLE_MASTER
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
amphora2_mock.compute_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
amphora2_mock.vrrp_ip = '2001:db8::46'
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
amphora2_mock])

View File

@ -1,106 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import member_flows
import octavia.tests.unit.base as base
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestMemberFlows(base.TestCase):
def setUp(self):
self.MemberFlow = member_flows.MemberFlows()
super().setUp()
def test_get_create_member_flow(self, mock_get_net_driver):
member_flow = self.MemberFlow.get_create_member_flow()
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.MEMBER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.POOL, member_flow.requires)
self.assertIn(constants.MEMBER, member_flow.requires)
self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires)
self.assertIn(constants.DELTAS, member_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides)
self.assertIn(constants.UPDATED_PORTS, member_flow.provides)
self.assertEqual(6, len(member_flow.requires))
self.assertEqual(3, len(member_flow.provides))
def test_get_delete_member_flow(self, mock_get_net_driver):
member_flow = self.MemberFlow.get_delete_member_flow()
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.MEMBER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, member_flow.requires)
self.assertIn(constants.POOL, member_flow.requires)
self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides)
self.assertIn(constants.DELTAS, member_flow.provides)
self.assertIn(constants.UPDATED_PORTS, member_flow.provides)
self.assertEqual(6, len(member_flow.requires))
self.assertEqual(3, len(member_flow.provides))
def test_get_update_member_flow(self, mock_get_net_driver):
member_flow = self.MemberFlow.get_update_member_flow()
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.MEMBER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.POOL, member_flow.requires)
self.assertIn(constants.UPDATE_DICT, member_flow.requires)
self.assertEqual(5, len(member_flow.requires))
self.assertEqual(0, len(member_flow.provides))
def test_get_batch_update_members_flow(self, mock_get_net_driver):
member_flow = self.MemberFlow.get_batch_update_members_flow(
[], [], [])
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.POOL, member_flow.requires)
self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires)
self.assertIn(constants.DELTAS, member_flow.provides)
self.assertIn(constants.UPDATED_PORTS, member_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides)
self.assertEqual(5, len(member_flow.requires))
self.assertEqual(3, len(member_flow.provides))

View File

@ -1,77 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import pool_flows
import octavia.tests.unit.base as base
class TestPoolFlows(base.TestCase):
def setUp(self):
self.PoolFlow = pool_flows.PoolFlows()
super().setUp()
def test_get_create_pool_flow(self):
pool_flow = self.PoolFlow.get_create_pool_flow()
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertEqual(3, len(pool_flow.requires))
self.assertEqual(0, len(pool_flow.provides))
def test_get_delete_pool_flow(self):
pool_flow = self.PoolFlow.get_delete_pool_flow()
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertIn(constants.POOL, pool_flow.requires)
self.assertEqual(3, len(pool_flow.requires))
self.assertEqual(1, len(pool_flow.provides))
def test_get_delete_pool_flow_internal(self):
pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test')
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn('test', pool_flow.requires)
self.assertEqual(1, len(pool_flow.requires))
self.assertEqual(1, len(pool_flow.provides))
def test_get_update_pool_flow(self):
pool_flow = self.PoolFlow.get_update_pool_flow()
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.POOL, pool_flow.requires)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertIn(constants.UPDATE_DICT, pool_flow.requires)
self.assertEqual(4, len(pool_flow.requires))
self.assertEqual(0, len(pool_flow.provides))

View File

@ -1,11 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,792 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from cryptography import fernet
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.common import constants
from octavia.common import data_models
from octavia.common import utils
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.db import repositories as repo
import octavia.tests.unit.base as base
AMP_ID = uuidutils.generate_uuid()
COMPUTE_ID = uuidutils.generate_uuid()
LISTENER_ID = uuidutils.generate_uuid()
LB_ID = uuidutils.generate_uuid()
CONN_MAX_RETRIES = 10
CONN_RETRY_INTERVAL = 6
FAKE_CONFIG_FILE = 'fake config file'
_amphora_mock = mock.MagicMock()
_amphora_mock.id = AMP_ID
_amphora_mock.status = constants.AMPHORA_ALLOCATED
_amphora_mock.vrrp_ip = '198.51.100.65'
_load_balancer_mock = mock.MagicMock()
_load_balancer_mock.id = LB_ID
_listener_mock = mock.MagicMock()
_listener_mock.id = LISTENER_ID
_load_balancer_mock.listeners = [_listener_mock]
_vip_mock = mock.MagicMock()
_load_balancer_mock.vip = _vip_mock
_LB_mock = mock.MagicMock()
_amphorae_mock = [_amphora_mock]
_amphora_network_config_mock = mock.MagicMock()
_amphorae_network_config_mock = {
_amphora_mock.id: _amphora_network_config_mock}
_network_mock = mock.MagicMock()
_port_mock = mock.MagicMock()
_ports_mock = [_port_mock]
_session_mock = mock.MagicMock()
@mock.patch('octavia.db.repositories.AmphoraRepository.update')
@mock.patch('octavia.db.repositories.ListenerRepository.update')
@mock.patch('octavia.db.repositories.ListenerRepository.get',
return_value=_listener_mock)
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG')
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
@mock.patch('stevedore.driver.DriverManager.driver')
class TestAmphoraDriverTasks(base.TestCase):
def setUp(self):
_LB_mock.amphorae = [_amphora_mock]
_LB_mock.id = LB_ID
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="haproxy_amphora",
active_connection_max_retries=CONN_MAX_RETRIES)
conf.config(group="haproxy_amphora",
active_connection_retry_interval=CONN_RETRY_INTERVAL)
conf.config(group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
constants.REQ_READ_TIMEOUT: 2,
constants.CONN_MAX_RETRIES: 3,
constants.CONN_RETRY_INTERVAL: 4}
super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amp_listeners_update(self,
mock_lb_repo_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
mock_lb_repo_get.return_value = _LB_mock
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock,
self.timeout_dict)
mock_driver.update_amphora_listeners.assert_called_once_with(
_LB_mock, _amphora_mock, self.timeout_dict)
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock,
self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, AMP_ID, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphorae_listeners_update(self,
mock_lb_repo_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
mock_lb_repo_get.return_value = _LB_mock
amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate()
amp_list_update_obj.execute(_load_balancer_mock, 0,
[_amphora_mock], self.timeout_dict)
mock_driver.update_amphora_listeners.assert_called_once_with(
_LB_mock, _amphora_mock, self.timeout_dict)
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
amp_list_update_obj.execute(_load_balancer_mock, 0,
[_amphora_mock], self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, AMP_ID, status=constants.ERROR)
def test_listener_update(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
listener_update_obj = amphora_driver_tasks.ListenersUpdate()
listener_update_obj.execute(_load_balancer_mock)
mock_driver.update.assert_called_once_with(_load_balancer_mock)
# Test the revert
amp = listener_update_obj.revert(_load_balancer_mock)
repo.ListenerRepository.update.assert_called_once_with(
_session_mock,
id=LISTENER_ID,
provisioning_status=constants.ERROR)
self.assertIsNone(amp)
# Test the revert with exception
repo.ListenerRepository.update.reset_mock()
mock_listener_repo_update.side_effect = Exception('fail')
amp = listener_update_obj.revert(_load_balancer_mock)
repo.ListenerRepository.update.assert_called_once_with(
_session_mock,
id=LISTENER_ID,
provisioning_status=constants.ERROR)
self.assertIsNone(amp)
def test_listeners_update(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
listeners_update_obj = amphora_driver_tasks.ListenersUpdate()
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
vip = data_models.Vip(ip_address='10.0.0.1')
lb = data_models.LoadBalancer(id='lb1', listeners=listeners, vip=vip)
listeners_update_obj.execute(lb)
mock_driver.update.assert_called_once_with(lb)
self.assertEqual(1, mock_driver.update.call_count)
# Test the revert
amp = listeners_update_obj.revert(lb)
expected_db_calls = [mock.call(_session_mock,
id=listeners[0].id,
provisioning_status=constants.ERROR),
mock.call(_session_mock,
id=listeners[1].id,
provisioning_status=constants.ERROR)]
repo.ListenerRepository.update.assert_has_calls(expected_db_calls)
self.assertEqual(2, repo.ListenerRepository.update.call_count)
self.assertIsNone(amp)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_amphora_index_listeners_reload(
self, mock_prov_status_error, mock_driver, mock_generate_uuid,
mock_log, mock_get_session, mock_listener_repo_get,
mock_listener_repo_update, mock_amphora_repo_update):
amphora_mock = mock.MagicMock()
listeners_reload_obj = (
amphora_driver_tasks.AmphoraIndexListenersReload())
mock_lb = mock.MagicMock()
mock_listener = mock.MagicMock()
mock_listener.id = '12345'
mock_driver.reload.side_effect = [mock.DEFAULT, Exception('boom')]
# Test no listeners
mock_lb.listeners = None
listeners_reload_obj.execute(mock_lb, 0, None)
mock_driver.reload.assert_not_called()
# Test with listeners
mock_driver.start.reset_mock()
mock_lb.listeners = [mock_listener]
listeners_reload_obj.execute(mock_lb, 0, [amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
self.timeout_dict)
# Test with reload exception
mock_driver.reload.reset_mock()
listeners_reload_obj.execute(mock_lb, 0, [amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, amphora_mock.id, status=constants.ERROR)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_listeners_start(self,
mock_prov_status_error,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
listeners_start_obj = amphora_driver_tasks.ListenersStart()
mock_lb = mock.MagicMock()
mock_listener = mock.MagicMock()
mock_listener.id = '12345'
# Test no listeners
mock_lb.listeners = None
listeners_start_obj.execute(mock_lb)
mock_driver.start.assert_not_called()
# Test with listeners
mock_driver.start.reset_mock()
mock_lb.listeners = [mock_listener]
listeners_start_obj.execute(mock_lb)
mock_driver.start.assert_called_once_with(mock_lb, None)
# Test revert
mock_lb.listeners = [mock_listener]
listeners_start_obj.revert(mock_lb)
mock_prov_status_error.assert_called_once_with('12345')
def test_listener_delete(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
listener_delete_obj = amphora_driver_tasks.ListenerDelete()
listener_delete_obj.execute(_listener_mock)
mock_driver.delete.assert_called_once_with(_listener_mock)
# Test the revert
amp = listener_delete_obj.revert(_listener_mock)
repo.ListenerRepository.update.assert_called_once_with(
_session_mock,
id=LISTENER_ID,
provisioning_status=constants.ERROR)
self.assertIsNone(amp)
# Test the revert with exception
repo.ListenerRepository.update.reset_mock()
mock_listener_repo_update.side_effect = Exception('fail')
amp = listener_delete_obj.revert(_listener_mock)
repo.ListenerRepository.update.assert_called_once_with(
_session_mock,
id=LISTENER_ID,
provisioning_status=constants.ERROR)
self.assertIsNone(amp)
def test_amphora_get_info(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo()
amphora_get_info_obj.execute(_amphora_mock)
mock_driver.get_info.assert_called_once_with(
_amphora_mock)
def test_amphora_get_diagnostics(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_get_diagnostics_obj = (amphora_driver_tasks.
AmphoraGetDiagnostics())
amphora_get_diagnostics_obj.execute(_amphora_mock)
mock_driver.get_diagnostics.assert_called_once_with(
_amphora_mock)
def test_amphora_finalize(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize()
amphora_finalize_obj.execute(_amphora_mock)
mock_driver.finalize_amphora.assert_called_once_with(
_amphora_mock)
# Test revert
amp = amphora_finalize_obj.revert(None, _amphora_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert with exception
repo.AmphoraRepository.update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
amp = amphora_finalize_obj.revert(None, _amphora_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_finalize_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
repo.AmphoraRepository.update.assert_not_called()
def test_amphora_post_network_plug(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_post_network_plug_obj = (amphora_driver_tasks.
AmphoraPostNetworkPlug())
amphora_post_network_plug_obj.execute(_amphora_mock, _ports_mock,
_amphora_network_config_mock)
(mock_driver.post_network_plug.
assert_called_once_with)(_amphora_mock, _port_mock,
_amphora_network_config_mock)
# Test revert
amp = amphora_post_network_plug_obj.revert(None, _amphora_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert with exception
repo.AmphoraRepository.update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
amp = amphora_post_network_plug_obj.revert(None, _amphora_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_network_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.AmphoraRepository.get_all')
def test_amphorae_post_network_plug(self, mock_amp_get_all, mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
mock_driver.get_network.return_value = _network_mock
_amphora_mock.id = AMP_ID
_amphora_mock.compute_id = COMPUTE_ID
mock_amp_get_all.return_value = [[_amphora_mock], None]
amphora_post_network_plug_obj = (amphora_driver_tasks.
AmphoraePostNetworkPlug())
port_mock = mock.Mock()
_deltas_mock = {_amphora_mock.id: [port_mock]}
amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock,
_amphorae_network_config_mock)
(mock_driver.post_network_plug.
assert_called_once_with(_amphora_mock, port_mock,
_amphora_network_config_mock))
# Test with no ports to plug
mock_driver.post_network_plug.reset_mock()
_deltas_mock = {'0': [port_mock]}
amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock,
_amphora_network_config_mock)
mock_driver.post_network_plug.assert_not_called()
# Test revert
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
_deltas_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert with exception
repo.AmphoraRepository.update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
_deltas_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_network_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
None)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_amphora_post_vip_plug(self,
mock_loadbalancer_repo_update,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphorae_net_config_mock = mock.Mock()
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug()
amphora_post_vip_plug_obj.execute(_amphora_mock,
_LB_mock,
amphorae_net_config_mock)
mock_driver.post_vip_plug.assert_called_once_with(
_amphora_mock, _LB_mock, amphorae_net_config_mock)
# Test revert
amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
repo.LoadBalancerRepository.update.assert_not_called()
self.assertIsNone(amp)
# Test revert with repo exceptions
repo.AmphoraRepository.update.reset_mock()
repo.LoadBalancerRepository.update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
mock_loadbalancer_repo_update.side_effect = Exception('fail')
amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock)
repo.AmphoraRepository.update.assert_called_once_with(
_session_mock,
id=AMP_ID,
status=constants.ERROR)
repo.LoadBalancerRepository.update.assert_not_called()
self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_vip_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
None)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_amphorae_post_vip_plug(self,
mock_loadbalancer_repo_update,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphorae_net_config_mock = mock.Mock()
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug()
amphora_post_vip_plug_obj.execute(_LB_mock,
amphorae_net_config_mock)
mock_driver.post_vip_plug.assert_called_once_with(
_amphora_mock, _LB_mock, amphorae_net_config_mock)
def test_amphora_cert_upload(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
pem_file_mock = fer.encrypt(
utils.get_compatible_value('test-pem-file'))
amphora_cert_upload_mock = amphora_driver_tasks.AmphoraCertUpload()
amphora_cert_upload_mock.execute(_amphora_mock, pem_file_mock)
mock_driver.upload_cert_amp.assert_called_once_with(
_amphora_mock, fer.decrypt(pem_file_mock))
def test_amphora_update_vrrp_interface(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
FAKE_INTERFACE = 'fake0'
_LB_mock.amphorae = _amphorae_mock
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
Exception('boom')]
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
amphora_update_vrrp_interface_obj = (
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
mock_driver.get_interface_from_ip.assert_called_once_with(
_amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR)
def test_amphora_index_update_vrrp_interface(
self, mock_driver, mock_generate_uuid, mock_log, mock_get_session,
mock_listener_repo_get, mock_listener_repo_update,
mock_amphora_repo_update):
FAKE_INTERFACE = 'fake0'
_LB_mock.amphorae = _amphorae_mock
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
Exception('boom')]
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
amphora_update_vrrp_interface_obj = (
amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface())
amphora_update_vrrp_interface_obj.execute(
0, [_amphora_mock], timeout_dict)
mock_driver.get_interface_from_ip.assert_called_once_with(
_amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_update_vrrp_interface_obj.execute(
0, [_amphora_mock], timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_update(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphorae_network_config = mock.MagicMock()
mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
Exception('boom')]
mock_lb_get.return_value = _LB_mock
amphora_vrrp_update_obj = (
amphora_driver_tasks.AmphoraVRRPUpdate())
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
_amphora_mock, 'fakeint0')
mock_driver.update_vrrp_conf.assert_called_once_with(
_LB_mock, amphorae_network_config, _amphora_mock, None)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
_amphora_mock, 'fakeint0')
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_index_vrrp_update(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphorae_network_config = mock.MagicMock()
mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
Exception('boom')]
mock_lb_get.return_value = _LB_mock
amphora_vrrp_update_obj = (
amphora_driver_tasks.AmphoraIndexVRRPUpdate())
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
0, [_amphora_mock], 'fakeint0',
timeout_dict=self.timeout_dict)
mock_driver.update_vrrp_conf.assert_called_once_with(
_LB_mock, amphorae_network_config, _amphora_mock,
self.timeout_dict)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
0, [_amphora_mock], 'fakeint0')
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR)
def test_amphora_vrrp_start(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_vrrp_start_obj = (
amphora_driver_tasks.AmphoraVRRPStart())
amphora_vrrp_start_obj.execute(_amphora_mock,
timeout_dict=self.timeout_dict)
mock_driver.start_vrrp_service.assert_called_once_with(
_amphora_mock, self.timeout_dict)
def test_amphora_index_vrrp_start(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_vrrp_start_obj = (
amphora_driver_tasks.AmphoraIndexVRRPStart())
mock_driver.start_vrrp_service.side_effect = [mock.DEFAULT,
Exception('boom')]
amphora_vrrp_start_obj.execute(0, [_amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.start_vrrp_service.assert_called_once_with(
_amphora_mock, self.timeout_dict)
# Test with a start exception
mock_driver.start_vrrp_service.reset_mock()
amphora_vrrp_start_obj.execute(0, [_amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.start_vrrp_service.assert_called_once_with(
_amphora_mock, self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR)
def test_amphora_compute_connectivity_wait(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amp_compute_conn_wait_obj = (
amphora_driver_tasks.AmphoraComputeConnectivityWait())
amp_compute_conn_wait_obj.execute(_amphora_mock)
mock_driver.get_info.assert_called_once_with(_amphora_mock)
mock_driver.get_info.side_effect = driver_except.TimeOutException()
self.assertRaises(driver_except.TimeOutException,
amp_compute_conn_wait_obj.execute, _amphora_mock)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, AMP_ID, status=constants.ERROR)
@mock.patch('octavia.amphorae.backends.agent.agent_jinja_cfg.'
'AgentJinjaTemplater.build_agent_config')
def test_amphora_config_update(self,
mock_build_config,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
mock_build_config.return_value = FAKE_CONFIG_FILE
amp_config_update_obj = amphora_driver_tasks.AmphoraConfigUpdate()
mock_driver.update_amphora_agent_config.side_effect = [
None, None, driver_except.AmpDriverNotImplementedError,
driver_except.TimeOutException]
# With Flavor
flavor = {constants.LOADBALANCER_TOPOLOGY:
constants.TOPOLOGY_ACTIVE_STANDBY}
amp_config_update_obj.execute(_amphora_mock, flavor)
mock_build_config.assert_called_once_with(
_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY)
mock_driver.update_amphora_agent_config.assert_called_once_with(
_amphora_mock, FAKE_CONFIG_FILE)
# With no Flavor
mock_driver.reset_mock()
mock_build_config.reset_mock()
amp_config_update_obj.execute(_amphora_mock, None)
mock_build_config.assert_called_once_with(
_amphora_mock.id, constants.TOPOLOGY_SINGLE)
mock_driver.update_amphora_agent_config.assert_called_once_with(
_amphora_mock, FAKE_CONFIG_FILE)
# With amphora that does not support config update
mock_driver.reset_mock()
mock_build_config.reset_mock()
amp_config_update_obj.execute(_amphora_mock, flavor)
mock_build_config.assert_called_once_with(
_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY)
mock_driver.update_amphora_agent_config.assert_called_once_with(
_amphora_mock, FAKE_CONFIG_FILE)
# With an unknown exception
mock_driver.reset_mock()
mock_build_config.reset_mock()
self.assertRaises(driver_except.TimeOutException,
amp_config_update_obj.execute,
_amphora_mock, flavor)

View File

@ -1,46 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from cryptography import fernet
from oslo_config import cfg
from octavia.certificates.common import local
from octavia.common import utils
from octavia.controller.worker.v1.tasks import cert_task
import octavia.tests.unit.base as base
CONF = cfg.CONF
class TestCertTasks(base.TestCase):
@mock.patch('stevedore.driver.DriverManager.driver')
def test_execute(self, mock_driver):
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
dummy_cert = local.LocalCert(
utils.get_compatible_value('test_cert'),
utils.get_compatible_value('test_key'))
mock_driver.generate_cert_key_pair.side_effect = [dummy_cert]
c = cert_task.GenerateServerPEMTask()
pem = c.execute('123')
self.assertEqual(
fer.decrypt(pem),
dummy_cert.get_certificate() +
dummy_cert.get_private_key()
)
mock_driver.generate_cert_key_pair.assert_called_once_with(
cn='123', validity=CONF.certificates.cert_validity_time)

View File

@ -1,634 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from cryptography import fernet
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
import tenacity
from octavia.common import constants
from octavia.common import exceptions
from octavia.common import utils
from octavia.controller.worker.v1.tasks import compute_tasks
from octavia.tests.common import utils as test_utils
import octavia.tests.unit.base as base
AMP_FLAVOR_ID = '10'
AMP_IMAGE_TAG = 'glance_tag'
AMP_SSH_KEY_NAME = None
AMP_NET = [uuidutils.generate_uuid()]
AMP_SEC_GROUPS = []
AMP_WAIT = 12
AMPHORA_ID = uuidutils.generate_uuid()
COMPUTE_ID = uuidutils.generate_uuid()
LB_NET_IP = '192.0.2.1'
PORT_ID = uuidutils.generate_uuid()
SERVER_GRPOUP_ID = uuidutils.generate_uuid()
class TestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
_amphora_mock = mock.MagicMock()
_amphora_mock.id = AMPHORA_ID
_amphora_mock.compute_id = COMPUTE_ID
_load_balancer_mock = mock.MagicMock()
_load_balancer_mock.amphorae = [_amphora_mock]
_port = mock.MagicMock()
_port.id = PORT_ID
class TestComputeTasks(base.TestCase):
def setUp(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID)
self.conf.config(
group="controller_worker", amp_image_tag=AMP_IMAGE_TAG)
self.conf.config(
group="controller_worker", amp_ssh_key_name=AMP_SSH_KEY_NAME)
self.conf.config(
group="controller_worker", amp_boot_network_list=AMP_NET)
self.conf.config(
group="controller_worker", amp_active_wait_sec=AMP_WAIT)
self.conf.config(
group="controller_worker", amp_secgroup_list=AMP_SEC_GROUPS)
self.conf.config(group="controller_worker", amp_image_owner_id='')
_amphora_mock.id = AMPHORA_ID
_amphora_mock.status = constants.AMPHORA_ALLOCATED
logging_mock = mock.MagicMock()
compute_tasks.LOG = logging_mock
super().setUp()
@mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.'
'LoggingJinjaTemplater.build_logging_config')
@mock.patch('jinja2.Environment.get_template')
@mock.patch('octavia.amphorae.backends.agent.'
'agent_jinja_cfg.AgentJinjaTemplater.'
'build_agent_config', return_value='test_conf')
@mock.patch('octavia.common.jinja.'
'user_data_jinja_cfg.UserDataJinjaCfg.'
'build_user_data_config', return_value='user_data_conf')
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_create(self, mock_driver, mock_ud_conf,
mock_conf, mock_jinja, mock_log_cfg):
image_owner_id = uuidutils.generate_uuid()
self.conf.config(
group="controller_worker", amp_image_owner_id=image_owner_id)
mock_log_cfg.return_value = 'FAKE CFG'
createcompute = compute_tasks.ComputeCreate()
mock_driver.build.return_value = COMPUTE_ID
# Test execute()
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
server_group_id=SERVER_GRPOUP_ID)
# Validate that the build method was called properly
mock_driver.build.assert_called_once_with(
name="amphora-" + _amphora_mock.id,
amphora_flavor=AMP_FLAVOR_ID,
image_tag=AMP_IMAGE_TAG,
image_owner=image_owner_id,
key_name=AMP_SSH_KEY_NAME,
sec_groups=AMP_SEC_GROUPS,
network_ids=AMP_NET,
port_ids=[PORT_ID],
config_drive_files={'/etc/octavia/'
'amphora-agent.conf': 'test_conf',
'/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'},
user_data='user_data_conf',
server_group_id=SERVER_GRPOUP_ID,
availability_zone=None)
# Make sure it returns the expected compute_id
self.assertEqual(COMPUTE_ID, compute_id)
# Test that a build exception is raised
createcompute = compute_tasks.ComputeCreate()
self.assertRaises(TypeError,
createcompute.execute,
_amphora_mock, config_drive_files='test_cert')
# Test revert()
_amphora_mock.compute_id = COMPUTE_ID
createcompute = compute_tasks.ComputeCreate()
createcompute.revert(compute_id, _amphora_mock.id)
# Validate that the delete method was called properly
mock_driver.delete.assert_called_once_with(
COMPUTE_ID)
# Test that a delete exception is not raised
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
@mock.patch('jinja2.Environment.get_template')
@mock.patch('octavia.amphorae.backends.agent.'
'agent_jinja_cfg.AgentJinjaTemplater.'
'build_agent_config', return_value='test_conf')
@mock.patch('octavia.common.jinja.'
'user_data_jinja_cfg.UserDataJinjaCfg.'
'build_user_data_config', return_value='user_data_conf')
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_create_user_data(self, mock_driver,
mock_ud_conf, mock_conf, mock_jinja):
self.conf.config(
group="controller_worker", user_data_config_drive=True)
createcompute = compute_tasks.ComputeCreate()
mock_driver.build.return_value = COMPUTE_ID
# Test execute()
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
server_group_id=None)
# Validate that the build method was called properly
mock_driver.build.assert_called_once_with(
name="amphora-" + _amphora_mock.id,
amphora_flavor=AMP_FLAVOR_ID,
image_tag=AMP_IMAGE_TAG,
image_owner='',
key_name=AMP_SSH_KEY_NAME,
sec_groups=AMP_SEC_GROUPS,
network_ids=AMP_NET,
port_ids=[PORT_ID],
config_drive_files=None,
user_data='user_data_conf',
server_group_id=None,
availability_zone=None)
# Make sure it returns the expected compute_id
self.assertEqual(COMPUTE_ID, compute_id)
# Test that a build exception is raised
createcompute = compute_tasks.ComputeCreate()
self.assertRaises(TypeError,
createcompute.execute,
_amphora_mock, config_drive_files='test_cert')
# Test revert()
_amphora_mock.compute_id = COMPUTE_ID
createcompute = compute_tasks.ComputeCreate()
createcompute.revert(compute_id, _amphora_mock.id)
# Validate that the delete method was called properly
mock_driver.delete.assert_called_once_with(
COMPUTE_ID)
# Test that a delete exception is not raised
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
@mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.'
'LoggingJinjaTemplater.build_logging_config')
@mock.patch('jinja2.Environment.get_template')
@mock.patch('octavia.amphorae.backends.agent.'
'agent_jinja_cfg.AgentJinjaTemplater.'
'build_agent_config', return_value='test_conf')
@mock.patch('octavia.common.jinja.'
'user_data_jinja_cfg.UserDataJinjaCfg.'
'build_user_data_config', return_value='user_data_conf')
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_create_availability_zone(self, mock_driver, mock_ud_conf,
mock_conf, mock_jinja,
mock_log_cfg):
image_owner_id = uuidutils.generate_uuid()
compute_zone = uuidutils.generate_uuid()
az_dict = {constants.COMPUTE_ZONE: compute_zone}
self.conf.config(
group="controller_worker", amp_image_owner_id=image_owner_id)
mock_log_cfg.return_value = 'FAKE CFG'
createcompute = compute_tasks.ComputeCreate()
mock_driver.build.return_value = COMPUTE_ID
# Test execute()
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
server_group_id=SERVER_GRPOUP_ID,
availability_zone=az_dict)
# Validate that the build method was called properly
mock_driver.build.assert_called_once_with(
name="amphora-" + _amphora_mock.id,
amphora_flavor=AMP_FLAVOR_ID,
image_tag=AMP_IMAGE_TAG,
image_owner=image_owner_id,
key_name=AMP_SSH_KEY_NAME,
sec_groups=AMP_SEC_GROUPS,
network_ids=AMP_NET,
port_ids=[PORT_ID],
config_drive_files={'/etc/octavia/'
'amphora-agent.conf': 'test_conf',
'/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'},
user_data='user_data_conf',
server_group_id=SERVER_GRPOUP_ID,
availability_zone=compute_zone)
# Make sure it returns the expected compute_id
self.assertEqual(COMPUTE_ID, compute_id)
# Test that a build exception is raised
createcompute = compute_tasks.ComputeCreate()
self.assertRaises(TypeError,
createcompute.execute,
_amphora_mock, config_drive_files='test_cert')
# Test revert()
_amphora_mock.compute_id = COMPUTE_ID
createcompute = compute_tasks.ComputeCreate()
createcompute.revert(compute_id, _amphora_mock.id)
# Validate that the delete method was called properly
mock_driver.delete.assert_called_once_with(
COMPUTE_ID)
# Test that a delete exception is not raised
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
@mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.'
'LoggingJinjaTemplater.build_logging_config')
@mock.patch('jinja2.Environment.get_template')
@mock.patch('octavia.amphorae.backends.agent.'
'agent_jinja_cfg.AgentJinjaTemplater.'
'build_agent_config', return_value='test_conf')
@mock.patch('octavia.common.jinja.'
'user_data_jinja_cfg.UserDataJinjaCfg.'
'build_user_data_config', return_value='user_data_conf')
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_create_without_ssh_access(
self, mock_driver, mock_user_data_config,
mock_conf, mock_jinja, mock_log_cfg):
createcompute = compute_tasks.ComputeCreate()
mock_driver.build.return_value = COMPUTE_ID
self.conf.config(
group="controller_worker", user_data_config_drive=False)
mock_log_cfg.return_value = 'FAKE CFG'
# Test execute()
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
server_group_id=SERVER_GRPOUP_ID)
# Validate that the build method was called properly
mock_driver.build.assert_called_once_with(
name="amphora-" + _amphora_mock.id,
amphora_flavor=AMP_FLAVOR_ID,
image_tag=AMP_IMAGE_TAG,
image_owner='',
key_name=None,
sec_groups=AMP_SEC_GROUPS,
network_ids=AMP_NET,
port_ids=[PORT_ID],
config_drive_files={'/etc/octavia/'
'amphora-agent.conf': 'test_conf',
'/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'},
user_data='user_data_conf',
server_group_id=SERVER_GRPOUP_ID,
availability_zone=None)
self.assertEqual(COMPUTE_ID, compute_id)
# Test that a build exception is raised
createcompute = compute_tasks.ComputeCreate()
self.assertRaises(TypeError,
createcompute.execute,
_amphora_mock, config_drive_files='test_cert')
# Test revert()
_amphora_mock.compute_id = COMPUTE_ID
createcompute = compute_tasks.ComputeCreate()
createcompute.revert(compute_id, _amphora_mock.id)
# Validate that the delete method was called properly
mock_driver.delete.assert_called_once_with(
COMPUTE_ID)
# Test that a delete exception is not raised
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
@mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.'
'LoggingJinjaTemplater.build_logging_config')
@mock.patch('jinja2.Environment.get_template')
@mock.patch('octavia.amphorae.backends.agent.'
'agent_jinja_cfg.AgentJinjaTemplater.'
'build_agent_config', return_value='test_conf')
@mock.patch('octavia.common.jinja.'
'user_data_jinja_cfg.UserDataJinjaCfg.'
'build_user_data_config', return_value='user_data_conf')
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_create_cert(self, mock_driver, mock_ud_conf,
mock_conf, mock_jinja, mock_log_cfg):
createcompute = compute_tasks.CertComputeCreate()
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
mock_log_cfg.return_value = 'FAKE CFG'
mock_driver.build.return_value = COMPUTE_ID
path = '/etc/octavia/certs/ca_01.pem'
self.useFixture(test_utils.OpenFixture(path, 'test'))
# Test execute()
test_cert = fer.encrypt(
utils.get_compatible_value('test_cert')
)
compute_id = createcompute.execute(_amphora_mock.id, test_cert,
server_group_id=SERVER_GRPOUP_ID
)
# Validate that the build method was called properly
mock_driver.build.assert_called_once_with(
name="amphora-" + _amphora_mock.id,
amphora_flavor=AMP_FLAVOR_ID,
image_tag=AMP_IMAGE_TAG,
image_owner='',
key_name=AMP_SSH_KEY_NAME,
sec_groups=AMP_SEC_GROUPS,
network_ids=AMP_NET,
port_ids=[],
user_data='user_data_conf',
config_drive_files={
'/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG',
'/etc/octavia/certs/server.pem': fer.decrypt(
test_cert).decode('utf-8'),
'/etc/octavia/certs/client_ca.pem': 'test',
'/etc/octavia/amphora-agent.conf': 'test_conf'},
server_group_id=SERVER_GRPOUP_ID,
availability_zone=None)
self.assertEqual(COMPUTE_ID, compute_id)
# Test that a build exception is raised
self.useFixture(test_utils.OpenFixture(path, 'test'))
createcompute = compute_tasks.ComputeCreate()
self.assertRaises(TypeError,
createcompute.execute,
_amphora_mock,
config_drive_files=test_cert)
# Test revert()
_amphora_mock.compute_id = COMPUTE_ID
createcompute = compute_tasks.ComputeCreate()
createcompute.revert(compute_id, _amphora_mock.id)
# Validate that the delete method was called properly
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
# Test that a delete exception is not raised
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
@mock.patch('octavia.controller.worker.amphora_rate_limit'
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
@mock.patch('stevedore.driver.DriverManager.driver')
@mock.patch('time.sleep')
def test_compute_wait(self,
mock_time_sleep,
mock_driver,
mock_remove_from_build_queue):
self.conf.config(group='haproxy_amphora', build_rate_limit=5)
_amphora_mock.compute_id = COMPUTE_ID
_amphora_mock.status = constants.ACTIVE
_amphora_mock.lb_network_ip = LB_NET_IP
mock_driver.get_amphora.return_value = _amphora_mock, None
computewait = compute_tasks.ComputeActiveWait()
# Test with no AZ
computewait.execute(COMPUTE_ID, AMPHORA_ID, None)
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None)
# Test with AZ
mock_driver.reset_mock()
az = {constants.MANAGEMENT_NETWORK: uuidutils.generate_uuid()}
computewait.execute(COMPUTE_ID, AMPHORA_ID, az)
mock_driver.get_amphora.assert_called_once_with(
COMPUTE_ID, az[constants.MANAGEMENT_NETWORK])
# Test with deleted amp
_amphora_mock.status = constants.DELETED
self.assertRaises(exceptions.ComputeWaitTimeoutException,
computewait.execute,
_amphora_mock, AMPHORA_ID, None)
@mock.patch('octavia.controller.worker.amphora_rate_limit'
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
@mock.patch('stevedore.driver.DriverManager.driver')
@mock.patch('time.sleep')
def test_compute_wait_error_status(self,
mock_time_sleep,
mock_driver,
mock_remove_from_build_queue):
self.conf.config(group='haproxy_amphora', build_rate_limit=5)
_amphora_mock.compute_id = COMPUTE_ID
_amphora_mock.status = constants.ACTIVE
_amphora_mock.lb_network_ip = LB_NET_IP
mock_driver.get_amphora.return_value = _amphora_mock, None
computewait = compute_tasks.ComputeActiveWait()
computewait.execute(COMPUTE_ID, AMPHORA_ID, None)
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None)
_amphora_mock.status = constants.ERROR
self.assertRaises(exceptions.ComputeBuildException,
computewait.execute,
_amphora_mock, AMPHORA_ID, None)
@mock.patch('octavia.controller.worker.amphora_rate_limit'
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
@mock.patch('stevedore.driver.DriverManager.driver')
@mock.patch('time.sleep')
def test_compute_wait_skipped(self,
mock_time_sleep,
mock_driver,
mock_remove_from_build_queue):
_amphora_mock.compute_id = COMPUTE_ID
_amphora_mock.status = constants.ACTIVE
_amphora_mock.lb_network_ip = LB_NET_IP
mock_driver.get_amphora.return_value = _amphora_mock, None
computewait = compute_tasks.ComputeActiveWait()
computewait.execute(COMPUTE_ID, AMPHORA_ID, None)
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None)
mock_remove_from_build_queue.assert_not_called()
@mock.patch('stevedore.driver.DriverManager.driver')
def test_delete_amphorae_on_load_balancer(self, mock_driver):
mock_driver.delete.side_effect = [mock.DEFAULT,
exceptions.OctaviaException('boom')]
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
delete_amps.execute(_load_balancer_mock)
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
# Test compute driver exception is raised
self.assertRaises(exceptions.OctaviaException, delete_amps.execute,
_load_balancer_mock)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_delete(self, mock_driver):
mock_driver.delete.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'),
mock.DEFAULT, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
delete_compute = compute_tasks.ComputeDelete()
# Limit the retry attempts for the test run to save time
delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2)
delete_compute.execute(_amphora_mock)
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
# Test retry after a compute exception
mock_driver.reset_mock()
delete_compute.execute(_amphora_mock)
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
mock.call(COMPUTE_ID)])
# Test passive failure
mock_driver.reset_mock()
delete_compute.execute(_amphora_mock, passive_failure=True)
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
mock.call(COMPUTE_ID)])
# Test non-passive failure
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, delete_compute.execute,
_amphora_mock, passive_failure=False)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_nova_server_group_create(self, mock_driver):
nova_sever_group_obj = compute_tasks.NovaServerGroupCreate()
server_group_test_id = '6789'
fake_server_group = mock.MagicMock()
fake_server_group.id = server_group_test_id
fake_server_group.policy = 'anti-affinity'
mock_driver.create_server_group.return_value = fake_server_group
# Test execute()
sg_id = nova_sever_group_obj.execute('123')
# Validate that the build method was called properly
mock_driver.create_server_group.assert_called_once_with(
'octavia-lb-123', 'anti-affinity')
# Make sure it returns the expected server group_id
self.assertEqual(server_group_test_id, sg_id)
# Test revert()
nova_sever_group_obj.revert(sg_id)
# Validate that the delete_server_group method was called properly
mock_driver.delete_server_group.assert_called_once_with(sg_id)
# Test revert with exception
mock_driver.reset_mock()
mock_driver.delete_server_group.side_effect = Exception('DelSGExcept')
nova_sever_group_obj.revert(sg_id)
mock_driver.delete_server_group.assert_called_once_with(sg_id)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_nova_server_group_delete_with_sever_group_id(self, mock_driver):
nova_sever_group_obj = compute_tasks.NovaServerGroupDelete()
sg_id = '6789'
nova_sever_group_obj.execute(sg_id)
mock_driver.delete_server_group.assert_called_once_with(sg_id)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_nova_server_group_delete_with_None(self, mock_driver):
nova_sever_group_obj = compute_tasks.NovaServerGroupDelete()
sg_id = None
nova_sever_group_obj.execute(sg_id)
self.assertFalse(mock_driver.delete_server_group.called, sg_id)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_attach_port(self, mock_driver):
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
amphora_mock = mock.MagicMock()
port_mock = mock.MagicMock()
amphora_mock.compute_id = COMPUTE_ID
port_mock.id = PORT_ID
attach_port_obj = compute_tasks.AttachPort()
# Test execute
attach_port_obj.execute(amphora_mock, port_mock)
mock_driver.attach_network_or_port.assert_called_once_with(
COMPUTE_ID, port_id=PORT_ID)
# Test revert
mock_driver.reset_mock()
attach_port_obj.revert(amphora_mock, port_mock)
mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID)
# Test rever exception
mock_driver.reset_mock()
mock_driver.detach_port.side_effect = [Exception('boom')]
# should not raise
attach_port_obj.revert(amphora_mock, port_mock)

View File

@ -1,415 +0,0 @@
# Copyright 2017 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_utils import uuidutils
from taskflow.types import failure
from octavia.common import data_models
from octavia.common import exceptions
from octavia.controller.worker.v1.tasks import database_tasks
import octavia.tests.unit.base as base
class TestDatabaseTasksQuota(base.TestCase):
def setUp(self):
self._tf_failure_mock = mock.Mock(spec=failure.Failure)
self.zero_pool_child_count = {'HM': 0, 'member': 0}
super().setUp()
@mock.patch('octavia.db.api.get_session', return_value='TEST')
@mock.patch('octavia.db.repositories.Repositories.decrement_quota')
@mock.patch('octavia.db.repositories.Repositories.check_quota_met')
def _test_decrement_quota(self,
task,
data_model,
mock_check_quota_met,
mock_decrement_quota,
mock_get_session):
project_id = uuidutils.generate_uuid()
test_object = mock.MagicMock()
test_object.project_id = project_id
# execute without exception
mock_decrement_quota.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_session = mock.MagicMock()
mock_get_session_local.return_value = mock_session
if data_model == data_models.Pool:
task.execute(test_object, self.zero_pool_child_count)
else:
if data_model == data_models.L7Policy:
test_object.l7rules = []
task.execute(test_object)
mock_decrement_quota.assert_called_once_with(
mock_session, data_model, project_id)
mock_session.commit.assert_called_once_with()
# execute with exception
mock_decrement_quota.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_session = mock.MagicMock()
mock_get_session_local.return_value = mock_session
mock_decrement_quota.side_effect = (
exceptions.OctaviaException('fail'))
if data_model == data_models.Pool:
self.assertRaises(exceptions.OctaviaException,
task.execute,
test_object,
self.zero_pool_child_count)
else:
self.assertRaises(exceptions.OctaviaException,
task.execute,
test_object)
mock_decrement_quota.assert_called_once_with(
mock_session, data_model, project_id)
mock_session.rollback.assert_called_once_with()
# revert with instance of failure
mock_get_session.reset_mock()
mock_check_quota_met.reset_mock()
if data_model == data_models.Pool:
task.revert(test_object,
self.zero_pool_child_count,
self._tf_failure_mock)
else:
if data_model == data_models.L7Policy:
test_object.l7rules = []
task.revert(test_object, self._tf_failure_mock)
self.assertFalse(mock_get_session.called)
self.assertFalse(mock_check_quota_met.called)
# revert
mock_check_quota_met.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_session = mock.MagicMock()
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session]
if data_model == data_models.Pool:
task.revert(test_object, self.zero_pool_child_count, None)
else:
task.revert(test_object, None)
mock_check_quota_met.assert_called_once_with(
mock_session, mock_lock_session, data_model,
project_id)
mock_lock_session.commit.assert_called_once_with()
# revert with rollback
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_session = mock.MagicMock()
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session]
mock_check_quota_met.side_effect = (
exceptions.OctaviaException('fail'))
if data_model == data_models.Pool:
task.revert(test_object, self.zero_pool_child_count, None)
else:
task.revert(test_object, None)
mock_lock_session.rollback.assert_called_once_with()
# revert with db exception
mock_check_quota_met.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_get_session_local.side_effect = Exception('fail')
if data_model == data_models.Pool:
task.revert(test_object, self.zero_pool_child_count, None)
else:
task.revert(test_object, None)
self.assertFalse(mock_check_quota_met.called)
def test_decrement_health_monitor_quota(self):
task = database_tasks.DecrementHealthMonitorQuota()
data_model = data_models.HealthMonitor
self._test_decrement_quota(task, data_model)
def test_decrement_listener_quota(self):
task = database_tasks.DecrementListenerQuota()
data_model = data_models.Listener
self._test_decrement_quota(task, data_model)
def test_decrement_loadbalancer_quota(self):
task = database_tasks.DecrementLoadBalancerQuota()
data_model = data_models.LoadBalancer
self._test_decrement_quota(task, data_model)
def test_decrement_pool_quota(self):
task = database_tasks.DecrementPoolQuota()
data_model = data_models.Pool
self._test_decrement_quota(task, data_model)
def test_decrement_member_quota(self):
task = database_tasks.DecrementMemberQuota()
data_model = data_models.Member
self._test_decrement_quota(task, data_model)
@mock.patch('octavia.db.repositories.Repositories.decrement_quota')
@mock.patch('octavia.db.repositories.Repositories.check_quota_met')
def test_decrement_pool_quota_pool_children(self,
mock_check_quota_met,
mock_decrement_quota):
pool_child_count = {'HM': 1, 'member': 2}
project_id = uuidutils.generate_uuid()
test_object = mock.MagicMock()
test_object.project_id = project_id
task = database_tasks.DecrementPoolQuota()
mock_session = mock.MagicMock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_get_session_local.return_value = mock_session
task.execute(test_object, pool_child_count)
calls = [mock.call(mock_session, data_models.Pool, project_id),
mock.call(mock_session, data_models.HealthMonitor,
project_id),
mock.call(mock_session, data_models.Member, project_id,
quantity=2)]
mock_decrement_quota.assert_has_calls(calls)
mock_session.commit.assert_called_once_with()
# revert
mock_session.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session]
task.revert(test_object, pool_child_count, None)
calls = [mock.call(mock_session, mock_lock_session,
data_models.Pool, project_id),
mock.call(mock_session, mock_lock_session,
data_models.HealthMonitor, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id)]
mock_check_quota_met.assert_has_calls(calls)
self.assertEqual(4, mock_lock_session.commit.call_count)
# revert with health monitor quota exception
mock_session.reset_mock()
mock_check_quota_met.side_effect = [None, Exception('fail'), None,
None]
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session]
task.revert(test_object, pool_child_count, None)
calls = [mock.call(mock_session, mock_lock_session,
data_models.Pool, project_id),
mock.call(mock_session, mock_lock_session,
data_models.HealthMonitor, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id)]
mock_check_quota_met.assert_has_calls(calls)
self.assertEqual(3, mock_lock_session.commit.call_count)
self.assertEqual(1, mock_lock_session.rollback.call_count)
# revert with member quota exception
mock_session.reset_mock()
mock_check_quota_met.side_effect = [None, None, None,
Exception('fail')]
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session]
task.revert(test_object, pool_child_count, None)
calls = [mock.call(mock_session, mock_lock_session,
data_models.Pool, project_id),
mock.call(mock_session, mock_lock_session,
data_models.HealthMonitor, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id),
mock.call(mock_session, mock_lock_session,
data_models.Member, project_id)]
mock_check_quota_met.assert_has_calls(calls)
self.assertEqual(3, mock_lock_session.commit.call_count)
self.assertEqual(1, mock_lock_session.rollback.call_count)
def test_count_pool_children_for_quota(self):
project_id = uuidutils.generate_uuid()
member1 = data_models.Member(id=1, project_id=project_id)
member2 = data_models.Member(id=2, project_id=project_id)
healtmon = data_models.HealthMonitor(id=1, project_id=project_id)
pool_no_children = data_models.Pool(id=1, project_id=project_id)
pool_1_mem = data_models.Pool(id=1, project_id=project_id,
members=[member1])
pool_hm = data_models.Pool(id=1, project_id=project_id,
health_monitor=healtmon)
pool_hm_2_mem = data_models.Pool(id=1, project_id=project_id,
health_monitor=healtmon,
members=[member1, member2])
task = database_tasks.CountPoolChildrenForQuota()
# Test pool with no children
result = task.execute(pool_no_children)
self.assertEqual({'HM': 0, 'member': 0}, result)
# Test pool with one member
result = task.execute(pool_1_mem)
self.assertEqual({'HM': 0, 'member': 1}, result)
# Test pool with health monitor and no members
result = task.execute(pool_hm)
self.assertEqual({'HM': 1, 'member': 0}, result)
# Test pool with health monitor and two members
result = task.execute(pool_hm_2_mem)
self.assertEqual({'HM': 1, 'member': 2}, result)
def test_decrement_l7policy_quota(self):
task = database_tasks.DecrementL7policyQuota()
data_model = data_models.L7Policy
self._test_decrement_quota(task, data_model)
@mock.patch('octavia.db.repositories.Repositories.decrement_quota')
@mock.patch('octavia.db.repositories.Repositories.check_quota_met')
def test_decrement_l7policy_quota_with_children(self,
mock_check_quota_met,
mock_decrement_quota):
project_id = uuidutils.generate_uuid()
test_l7rule1 = mock.MagicMock()
test_l7rule1.project_id = project_id
test_l7rule2 = mock.MagicMock()
test_l7rule2.project_id = project_id
test_object = mock.MagicMock()
test_object.project_id = project_id
test_object.l7rules = [test_l7rule1, test_l7rule2]
task = database_tasks.DecrementL7policyQuota()
mock_session = mock.MagicMock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_get_session_local.return_value = mock_session
task.execute(test_object)
calls = [mock.call(mock_session, data_models.L7Policy, project_id),
mock.call(mock_session, data_models.L7Rule, project_id,
quantity=2)]
mock_decrement_quota.assert_has_calls(calls)
mock_session.commit.assert_called_once_with()
# revert
mock_session.reset_mock()
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session]
task.revert(test_object, None)
calls = [mock.call(mock_session, mock_lock_session,
data_models.L7Policy, project_id),
mock.call(mock_session, mock_lock_session,
data_models.L7Rule, project_id),
mock.call(mock_session, mock_lock_session,
data_models.L7Rule, project_id)]
mock_check_quota_met.assert_has_calls(calls)
self.assertEqual(3, mock_lock_session.commit.call_count)
# revert with l7rule quota exception
mock_session.reset_mock()
mock_check_quota_met.side_effect = [None, None,
Exception('fail')]
with mock.patch('octavia.db.api.'
'get_session') as mock_get_session_local:
mock_lock_session = mock.MagicMock()
mock_get_session_local.side_effect = [mock_session,
mock_lock_session,
mock_lock_session,
mock_lock_session]
task.revert(test_object, None)
calls = [mock.call(mock_session, mock_lock_session,
data_models.L7Policy, project_id),
mock.call(mock_session, mock_lock_session,
data_models.L7Rule, project_id),
mock.call(mock_session, mock_lock_session,
data_models.L7Rule, project_id)]
mock_check_quota_met.assert_has_calls(calls)
self.assertEqual(2, mock_lock_session.commit.call_count)
self.assertEqual(1, mock_lock_session.rollback.call_count)
def test_decrement_l7rule_quota(self):
task = database_tasks.DecrementL7ruleQuota()
data_model = data_models.L7Rule
self._test_decrement_quota(task, data_model)

View File

@ -1,401 +0,0 @@
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import uuidutils
from octavia.controller.worker.v1.tasks import lifecycle_tasks
import octavia.tests.unit.base as base
class TestLifecycleTasks(base.TestCase):
def setUp(self):
self.AMPHORA = mock.MagicMock()
self.AMPHORA_ID = uuidutils.generate_uuid()
self.AMPHORA.id = self.AMPHORA_ID
self.HEALTH_MON = mock.MagicMock()
self.HEALTH_MON_ID = uuidutils.generate_uuid()
self.HEALTH_MON.id = self.HEALTH_MON_ID
self.L7POLICY = mock.MagicMock()
self.L7POLICY_ID = uuidutils.generate_uuid()
self.L7POLICY.id = self.L7POLICY_ID
self.L7RULE = mock.MagicMock()
self.L7RULE_ID = uuidutils.generate_uuid()
self.L7RULE.id = self.L7RULE_ID
self.LISTENER = mock.MagicMock()
self.LISTENER_ID = uuidutils.generate_uuid()
self.LISTENER.id = self.LISTENER_ID
self.LISTENERS = [self.LISTENER]
self.LOADBALANCER = mock.MagicMock()
self.LOADBALANCER_ID = uuidutils.generate_uuid()
self.LOADBALANCER.id = self.LOADBALANCER_ID
self.LISTENER.load_balancer = self.LOADBALANCER
self.MEMBER = mock.MagicMock()
self.MEMBER_ID = uuidutils.generate_uuid()
self.MEMBER.id = self.MEMBER_ID
self.MEMBERS = [self.MEMBER]
self.POOL = mock.MagicMock()
self.POOL_ID = uuidutils.generate_uuid()
self.POOL.id = self.POOL_ID
super().setUp()
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'unmark_amphora_health_busy')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_amphora_status_error')
def test_AmphoraIDToErrorOnRevertTask(self, mock_amp_status_error,
mock_amp_health_busy):
amp_id_to_error_on_revert = (lifecycle_tasks.
AmphoraIDToErrorOnRevertTask())
# Execute
amp_id_to_error_on_revert.execute(self.AMPHORA_ID)
self.assertFalse(mock_amp_status_error.called)
# Revert
amp_id_to_error_on_revert.revert(self.AMPHORA_ID)
mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID)
self.assertFalse(mock_amp_health_busy.called)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'unmark_amphora_health_busy')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_amphora_status_error')
def test_AmphoraToErrorOnRevertTask(self, mock_amp_status_error,
mock_amp_health_busy):
amp_to_error_on_revert = lifecycle_tasks.AmphoraToErrorOnRevertTask()
# Execute
amp_to_error_on_revert.execute(self.AMPHORA)
self.assertFalse(mock_amp_status_error.called)
# Revert
amp_to_error_on_revert.revert(self.AMPHORA)
mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID)
self.assertFalse(mock_amp_health_busy.called)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_health_mon_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_HealthMonitorToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_health_mon_prov_status_error):
health_mon_to_error_on_revert = (lifecycle_tasks.
HealthMonitorToErrorOnRevertTask())
# Execute
health_mon_to_error_on_revert.execute(self.HEALTH_MON,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_health_mon_prov_status_error.called)
# Revert
health_mon_to_error_on_revert.revert(self.HEALTH_MON,
self.LISTENERS,
self.LOADBALANCER)
mock_health_mon_prov_status_error.assert_called_once_with(
self.HEALTH_MON_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_l7policy_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_L7PolicyToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_l7policy_prov_status_error):
l7policy_to_error_on_revert = (lifecycle_tasks.
L7PolicyToErrorOnRevertTask())
# Execute
l7policy_to_error_on_revert.execute(self.L7POLICY,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_l7policy_prov_status_error.called)
# Revert
l7policy_to_error_on_revert.revert(self.L7POLICY,
self.LISTENERS,
self.LOADBALANCER)
mock_l7policy_prov_status_error.assert_called_once_with(
self.L7POLICY_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_l7rule_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_L7RuleToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_l7rule_prov_status_error):
l7rule_to_error_on_revert = (lifecycle_tasks.
L7RuleToErrorOnRevertTask())
# Execute
l7rule_to_error_on_revert.execute(self.L7RULE,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_l7rule_prov_status_error.called)
# Revert
l7rule_to_error_on_revert.revert(self.L7RULE,
self.LISTENERS,
self.LOADBALANCER)
mock_l7rule_prov_status_error.assert_called_once_with(
self.L7RULE_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_ListenerToErrorOnRevertTask(
self,
mock_listener_prov_status_error,
mock_loadbalancer_prov_status_active):
listener_to_error_on_revert = (lifecycle_tasks.
ListenerToErrorOnRevertTask())
# Execute
listener_to_error_on_revert.execute(self.LISTENER)
self.assertFalse(mock_listener_prov_status_error.called)
# Revert
listener_to_error_on_revert.revert(self.LISTENER)
mock_listener_prov_status_error.assert_called_once_with(
self.LISTENER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_ListenersToErrorOnRevertTask(
self,
mock_listener_prov_status_error,
mock_loadbalancer_prov_status_active):
listeners_to_error_on_revert = (lifecycle_tasks.
ListenersToErrorOnRevertTask())
# Execute
listeners_to_error_on_revert.execute(self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_listener_prov_status_error.called)
# Revert
listeners_to_error_on_revert.revert(self.LISTENERS,
self.LOADBALANCER)
mock_listener_prov_status_error.assert_called_once_with(
self.LISTENER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_error')
def test_LoadBalancerIDToErrorOnRevertTask(
self,
mock_loadbalancer_prov_status_error):
loadbalancer_id_to_error_on_revert = (
lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask())
# Execute
loadbalancer_id_to_error_on_revert.execute(self.LOADBALANCER_ID)
self.assertFalse(mock_loadbalancer_prov_status_error.called)
# Revert
loadbalancer_id_to_error_on_revert.revert(self.LOADBALANCER_ID)
mock_loadbalancer_prov_status_error.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_error')
def test_LoadBalancerToErrorOnRevertTask(
self,
mock_loadbalancer_prov_status_error):
loadbalancer_to_error_on_revert = (
lifecycle_tasks.LoadBalancerToErrorOnRevertTask())
# Execute
loadbalancer_to_error_on_revert.execute(self.LOADBALANCER)
self.assertFalse(mock_loadbalancer_prov_status_error.called)
# Revert
loadbalancer_to_error_on_revert.revert(self.LOADBALANCER)
mock_loadbalancer_prov_status_error.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_member_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_active')
def test_MemberToErrorOnRevertTask(
self,
mock_pool_prov_status_active,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_member_prov_status_error):
member_to_error_on_revert = lifecycle_tasks.MemberToErrorOnRevertTask()
# Execute
member_to_error_on_revert.execute(self.MEMBER,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
self.assertFalse(mock_member_prov_status_error.called)
# Revert
member_to_error_on_revert.revert(self.MEMBER,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
mock_member_prov_status_error.assert_called_once_with(
self.MEMBER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
mock_pool_prov_status_active.assert_called_once_with(
self.POOL_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_member_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_active')
def test_MembersToErrorOnRevertTask(
self,
mock_pool_prov_status_active,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_member_prov_status_error):
members_to_error_on_revert = (
lifecycle_tasks.MembersToErrorOnRevertTask())
# Execute
members_to_error_on_revert.execute(self.MEMBERS,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
self.assertFalse(mock_member_prov_status_error.called)
# Revert
members_to_error_on_revert.revert(self.MEMBERS,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
mock_member_prov_status_error.assert_called_once_with(
self.MEMBER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
mock_pool_prov_status_active.assert_called_once_with(
self.POOL_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_PoolToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_pool_prov_status_error):
pool_to_error_on_revert = lifecycle_tasks.PoolToErrorOnRevertTask()
# Execute
pool_to_error_on_revert.execute(self.POOL,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_pool_prov_status_error.called)
# Revert
pool_to_error_on_revert.revert(self.POOL,
self.LISTENERS,
self.LOADBALANCER)
mock_pool_prov_status_error.assert_called_once_with(
self.POOL_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)

View File

@ -1,44 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from octavia.controller.worker.v1.tasks import model_tasks
import octavia.tests.unit.base as base
class TestObjectUpdateTasks(base.TestCase):
def setUp(self):
self.listener_mock = mock.MagicMock()
self.listener_mock.name = 'TEST'
super().setUp()
def test_delete_model_object(self):
delete_object = model_tasks.DeleteModelObject()
delete_object.execute(self.listener_mock)
self.listener_mock.delete.assert_called_once_with()
def test_update_listener(self):
update_attr = model_tasks.UpdateAttributes()
update_attr.execute(self.listener_mock,
{'name': 'TEST2'})
self.listener_mock.update.assert_called_once_with({'name': 'TEST2'})

View File

@ -1,47 +0,0 @@
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from taskflow import retry
from octavia.controller.worker.v1.tasks import retry_tasks
import octavia.tests.unit.base as base
class TestRetryTasks(base.TestCase):
def setUp(self):
super().setUp()
@mock.patch('time.sleep')
def test_sleeping_retry_times_controller(self, mock_sleep):
retry_ctrlr = retry_tasks.SleepingRetryTimesController(
attempts=2, name='test_retry')
# Test on_failure that should RETRY
history = ['boom']
result = retry_ctrlr.on_failure(history)
self.assertEqual(retry.RETRY, result)
# Test on_failure retries exhausted, should REVERT
history = ['boom', 'bang', 'pow']
result = retry_ctrlr.on_failure(history)
self.assertEqual(retry.REVERT, result)
# Test revert - should not raise
retry_ctrlr.revert(history)

View File

@ -71,7 +71,7 @@ _session_mock = mock.MagicMock()
@mock.patch('octavia.db.repositories.ListenerRepository.get',
return_value=_listener_mock)
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG')
@mock.patch('octavia.controller.worker.v2.tasks.amphora_driver_tasks.LOG')
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
@mock.patch('stevedore.driver.DriverManager.driver')
class TestAmphoraDriverTasks(base.TestCase):

View File

@ -1820,7 +1820,7 @@ class TestControllerWorker(base.TestCase):
flow_utils.get_failover_amphora_flow,
mock_amphora.to_dict(), 1, store=expected_stored_params)
@mock.patch('octavia.controller.worker.v1.flows.amphora_flows.'
@mock.patch('octavia.controller.worker.v2.flows.amphora_flows.'
'AmphoraFlows.get_failover_amphora_flow')
def test_failover_amp_missing_amp(self,
mock_get_amp_failover,
@ -1893,7 +1893,7 @@ class TestControllerWorker(base.TestCase):
cw.failover_amphora,
AMP_ID, reraise=True)
@mock.patch('octavia.controller.worker.v1.flows.amphora_flows.'
@mock.patch('octavia.controller.worker.v2.flows.amphora_flows.'
'AmphoraFlows.get_failover_amphora_flow')
def test_failover_amp_no_lb(self,
mock_get_failover_amp_flow,

View File

@ -0,0 +1,10 @@
---
upgrade:
- |
The *amphorav1* provider was removed. It is recommended to the users who
have kept using it to switch to the default *amphora* provider, which is an
alias for the *amphorav2* provider.
deprecations:
- |
The deprecated *amphorav1* provider was removed. The default
provider *amphora* is still an alias for the *amphorav2* provider.

View File

@ -60,7 +60,6 @@ octavia.api.drivers =
amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver
# octavia is an alias for backward compatibility
octavia = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver
amphorav1 = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver
octavia.amphora.drivers =
amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver
amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver
@ -96,7 +95,7 @@ octavia.cert_manager =
octavia.barbican_auth =
barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth
octavia.plugins =
hot_plug_plugin = octavia.controller.worker.v1.controller_worker:ControllerWorker
hot_plug_plugin = octavia.controller.worker.v2.controller_worker:ControllerWorker
octavia.worker.jobboard_driver =
redis_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:RedisTaskFlowDriver
zookeeper_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:ZookeeperTaskFlowDriver

View File

@ -13,7 +13,6 @@ test_files=$(find ${test_path} -iname 'test_*.py')
ignore_regexes=(
"^amphorae/drivers/haproxy/test_rest_api_driver_0_5.py$"
"^amphorae/drivers/haproxy/test_rest_api_driver_1_0.py$"
"^controller/worker/v1/tasks/test_database_tasks_quota.py$"
"^controller/worker/v2/tasks/test_database_tasks_quota.py$"
)

View File

@ -1,32 +0,0 @@
# List of TaskFlow flows that should be documented
# Some flows are used by other flows, so just list the primary flows here
# Format:
# module class flow
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_amphora_flow
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_update_listener_flow
octavia.controller.worker.v1.flows.pool_flows PoolFlows get_create_pool_flow
octavia.controller.worker.v1.flows.pool_flows PoolFlows get_delete_pool_flow
octavia.controller.worker.v1.flows.pool_flows PoolFlows get_update_pool_flow
octavia.controller.worker.v1.flows.member_flows MemberFlows get_create_member_flow
octavia.controller.worker.v1.flows.member_flows MemberFlows get_delete_member_flow
octavia.controller.worker.v1.flows.member_flows MemberFlows get_update_member_flow
octavia.controller.worker.v1.flows.member_flows MemberFlows get_batch_update_members_flow
octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow
octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow
octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow
octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow
octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow
octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow
octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow
octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow
octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow