Add new policy action: vdu_autoheal

This patch adds below functionality:

- New policy action ``vdu_autoheal`` for recovering failed VDUs
  as reported by the respective monitoring driver configured in
  the monitoring policy of the VNFD template.

- Add unit/functional tests.

- Added oslo_versioned library to implement HealVnfRequest object.

Note: The implementation of vdu_autoheal policy action will support
HealVnfRequest interface  as mentioned in the ETSI standard [1]

[1]: https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/003/02.05.01_60/gs_NFV-SOL003v020501p.pdf

Implements: blueprint vdu-auto-healing
Change-Id: If62acbdac41c92842de0ae3b7dedcda9fd1f86e6
This commit is contained in:
bhagyashris 2018-07-13 17:46:26 +05:30
parent 4b9bcfeeef
commit 5f1e48ff46
43 changed files with 1412 additions and 169 deletions

View File

@ -83,7 +83,13 @@ The available actions that a monitor driver can call when a particular event
occurs.
#. respawn
In case of OpenStack VIM, when any VDU monitoring fails, it will delete
the entire VNF and create a new one.
#. log
#. vdu_autoheal
In case of OpenStack VIM, when any VDU monitoring fails, it will delete
only that specific VDU resource and create a new one alone with it's
dependent resources like CP.
How to write TOSCA template to monitor VNF entities
----------------------------------------------------
@ -134,3 +140,9 @@ Example Template
max_foo_reached: scale_up
min_foo_reached: scale_down
vdu3:
monitoring_policy:
ping:
actions:
failure: vdu_autoheal

View File

@ -84,6 +84,7 @@ oslo.serialization==2.18.0
oslo.service==1.24.0
oslo.upgradecheck==0.1.0
oslo.utils==3.33.0
oslo.versionedobjects==1.33.3
oslotest==3.2.0
packaging==17.1
paramiko==2.0.0

View File

@ -0,0 +1,11 @@
---
features:
- |
Added a new monitoring policy action ``vdu_autoheal`` to bring back the
failed VDU. If a VNF contains one or more VDUs with monitoring policy
action set to `vdu_autoheal` and if any one of the VDU is unreachable,
it will simply delete the resources of that particular VDUs and re-create
them again.
The `vdu_autoheal` monitoring policy action is implemented only for
openstack infra driver.

View File

@ -34,6 +34,7 @@ oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
oslo.upgradecheck>=0.1.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
oslo.versionedobjects>=1.33.3 # Apache-2.0
openstackdocstheme>=1.18.1 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0

View File

@ -73,6 +73,7 @@ tacker.tacker.policy.actions =
respawn = tacker.vnfm.policy_actions.respawn.respawn:VNFActionRespawn
log = tacker.vnfm.policy_actions.log.log:VNFActionLog
log_and_kill = tacker.vnfm.policy_actions.log.log:VNFActionLogAndKill
vdu_autoheal = tacker.vnfm.policy_actions.vdu_autoheal.vdu_autoheal:VNFActionVduAutoheal
oslo.config.opts =
tacker.common.config = tacker.common.config:config_opts
tacker.wsgi = tacker.wsgi:config_opts

View File

@ -11,7 +11,9 @@
# under the License.
from tacker.conductor import conductor_server
from tacker import objects
def main():
objects.register_all()
conductor_server.main()

View File

@ -25,6 +25,7 @@ import oslo_i18n
from oslo_service import service as common_service
from tacker.common import config
from tacker import objects
from tacker import service
@ -34,6 +35,7 @@ oslo_i18n.install("tacker")
def main():
# the configuration will be read into the cfg.CONF global data structure
config.init(sys.argv[1:])
objects.register_all()
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the default"
" search paths (~/.tacker/, ~/, /etc/tacker/, /etc/) and"

View File

@ -28,6 +28,7 @@ from tacker.db.common_services import common_services_db
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker import manager
from tacker import objects
from tacker.plugins.common import constants
from tacker import service as tacker_service
from tacker import version
@ -80,6 +81,7 @@ def init(args, **kwargs):
def main(manager='tacker.conductor.conductor_server.Conductor'):
init(sys.argv[1:])
objects.register_all()
logging.setup(cfg.CONF, "tacker")
oslo_messaging.set_transport_defaults(control_exchange='tacker')
logging.setup(cfg.CONF, "tacker")

View File

@ -40,11 +40,12 @@ from tacker import manager
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE,
constants.PENDING_HEAL)
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.PENDING_SCALE_IN, constants.PENDING_SCALE_OUT, constants.ERROR,
constants.PENDING_DELETE, constants.DEAD)
constants.PENDING_DELETE, constants.DEAD, constants.PENDING_HEAL)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
@ -498,7 +499,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
"is not permited. Please contact your "
"Administrator.")
raise vnfm.VNFDeleteFailed(reason=error_reason)
if vnf_db.status == constants.PENDING_UPDATE:
if(vnf_db.status in [constants.PENDING_UPDATE,
constants.PENDING_HEAL]):
raise vnfm.VNFInUse(vnf_id=vnf_id)
return True
@ -522,28 +524,30 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_pre(self, context, vnf_id):
def _update_vnf_pre(self, context, vnf_id, new_status):
with context.session.begin(subtransactions=True):
vnf_db = self._update_vnf_status_db(
context, vnf_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
context, vnf_id, _ACTIVE_UPDATE, new_status)
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=timeutils.utcnow())
if new_status in constants.VNF_STATUS_TO_EVT_TYPES:
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.VNF_STATUS_TO_EVT_TYPES[new_status],
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_post(self, context, vnf_id, new_status,
new_vnf_dict):
new_vnf_dict, vnf_status, evt_type):
updated_time_stamp = timeutils.utcnow()
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_UPDATE).
filter(VNF.status == vnf_status).
update({'status': new_status,
'updated_at': updated_time_stamp}))
'updated_at': updated_time_stamp,
'mgmt_url': new_vnf_dict['mgmt_url']}))
dev_attrs = new_vnf_dict.get('attributes', {})
(context.session.query(VNFAttribute).
@ -559,7 +563,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_UPDATE,
evt_type=evt_type,
tstamp=updated_time_stamp)
def _delete_vnf_pre(self, context, vnf_id):
@ -635,7 +639,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
# reference implementation. needs to be overrided by subclass
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
new_status = constants.PENDING_UPDATE
vnf_dict = self._update_vnf_pre(context, vnf_id, new_status)
# start actual update of hosting vnf
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while

View File

@ -63,6 +63,10 @@ class VNFCreateFailed(exceptions.TackerException):
message = _('creating VNF based on %(vnfd_id)s failed')
class VNFUpdateWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
class VNFCreateWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
@ -79,6 +83,10 @@ class VNFDeleteFailed(exceptions.TackerException):
message = _('%(reason)s')
class VNFHealFailed(exceptions.TackerException):
message = _('VNF %(vnf_id)s failed to heal')
class VNFDNotFound(exceptions.NotFound):
message = _('VNFD %(vnfd_id)s could not be found')

View File

@ -0,0 +1,27 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(bhagyashris): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(bhagyashris): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('tacker.objects.heal_vnf_request')

48
tacker/objects/base.py Normal file
View File

@ -0,0 +1,48 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from tacker import objects
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
class TackerObjectRegistry(ovoo_base.VersionedObjectRegistry):
notification_classes = []
def registration_hook(self, cls, index):
# NOTE(bhagyashris): This is called when an object is registered,
# and is responsible for maintaining tacker.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class TackerObject(ovoo_base.VersionedObject):
# NOTE(bhagyashris): OBJ_PROJECT_NAMESPACE needs to be set so that nova,
# tacker, and other objects can exist on the same bus and be distinguished
# from one another.
OBJ_SERIAL_NAMESPACE = 'tacker_object'
OBJ_PROJECT_NAMESPACE = 'tacker'

22
tacker/objects/fields.py Normal file
View File

@ -0,0 +1,22 @@
# Copyright 2018 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
# Import fields from oslo.versionedobjects
StringField = fields.StringField
ListOfObjectsField = fields.ListOfObjectsField
ListOfStringsField = fields.ListOfStringsField

View File

@ -0,0 +1,42 @@
# Copyright 2018 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.objects import base
from tacker.objects import fields
@base.TackerObjectRegistry.register
class HealVnfAdditionalParams(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'parameter': fields.StringField(),
'cause': fields.ListOfStringsField()
}
@base.TackerObjectRegistry.register
class HealVnfRequest(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'cause': fields.StringField(),
'additional_params': fields.ListOfObjectsField(
'HealVnfAdditionalParams')
}

View File

@ -36,6 +36,7 @@ PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
PENDING_SCALE_IN = "PENDING_SCALE_IN"
PENDING_SCALE_OUT = "PENDING_SCALE_OUT"
PENDING_HEAL = "PENDING_HEAL"
DEAD = "DEAD"
ERROR = "ERROR"
@ -67,6 +68,14 @@ RES_EVT_MONITOR = "MONITOR"
RES_EVT_SCALE = "SCALE"
RES_EVT_NA_STATE = "Not Applicable"
RES_EVT_ONBOARDED = "OnBoarded"
RES_EVT_HEAL = "HEAL"
VNF_STATUS_TO_EVT_TYPES = {PENDING_CREATE: RES_EVT_CREATE,
PENDING_UPDATE: RES_EVT_UPDATE,
PENDING_DELETE: RES_EVT_DELETE,
PENDING_HEAL: RES_EVT_HEAL}
RES_EVT_CREATED_FLD = "created_at"
RES_EVT_DELETED_FLD = "deleted_at"

View File

@ -22,3 +22,9 @@ SCALE_WINDOW_SLEEP_TIME = 120
NS_CREATE_TIMEOUT = 400
NS_DELETE_TIMEOUT = 300
NOVA_CLIENT_VERSION = 2
VDU_MARK_UNHEALTHY_TIMEOUT = 500
VDU_MARK_UNHEALTHY_SLEEP_TIME = 3
VDU_AUTOHEALING_TIMEOUT = 500
VDU_AUTOHEALING_SLEEP_TIME = 3
VNF_CIRROS_PENDING_HEAL_TIMEOUT = 300
PENDING_SLEEP_TIME = 3

View File

@ -0,0 +1,88 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VDU2:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
user_data_format: RAW
user_data: |
#!/bin/sh
echo "my hostname is `hostname`" > /tmp/hostname
df -h > /home/cirros/diskinfo
sleep 90
sudo ifdown eth0
CP2:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU2
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker

View File

@ -0,0 +1,55 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
user_data_format: RAW
user_data: |
#!/bin/sh
echo "my hostname is `hostname`" > /tmp/hostname
df -h > /home/cirros/diskinfo
sleep 90
sudo ifdown eth0
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker

View File

@ -119,6 +119,9 @@ class BaseTackerTest(base.BaseTestCase):
auth_ses = session.Session(auth=auth, verify=verify)
return glance_client.Client(session=auth_ses)
def get_vdu_resource(self, stack_id, res_name):
return self.h_client.resources.get(stack_id, res_name)
def wait_until_vnf_status(self, vnf_id, target_status, timeout,
sleep_interval):
start_time = int(time.time())
@ -140,6 +143,17 @@ class BaseTackerTest(base.BaseTestCase):
self.wait_until_vnf_status(vnf_id, 'ACTIVE', timeout,
sleep_interval)
def verify_vnf_update(self, vnf_id):
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'PENDING_HEAL',
constants.VNF_CIRROS_PENDING_HEAL_TIMEOUT,
constants.PENDING_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
def wait_until_vnf_delete(self, vnf_id, timeout):
start_time = int(time.time())
while True:

View File

@ -20,7 +20,7 @@ from tacker.tests.utils import read_file
class VnfTestPingMonitor(base.BaseTackerTest):
def _test_vnf_with_monitoring(self, vnfd_file, vnf_name):
def _vnfd_and_vnf_create(self, vnfd_file, vnf_name):
data = dict()
data['tosca'] = read_file(vnfd_file)
toscal = data['tosca']
@ -36,6 +36,15 @@ class VnfTestPingMonitor(base.BaseTackerTest):
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
return vnfd_instance, vnf_instance
def _test_vnf_with_monitoring(self, vnfd_file, vnf_name):
vnfd_instance, vnf_instance = self._vnfd_and_vnf_create(vnfd_file,
vnf_name)
# Verify vnf goes from ACTIVE->DEAD->ACTIVE states
self.verify_vnf_restart(vnfd_instance, vnf_instance)
@ -51,8 +60,6 @@ class VnfTestPingMonitor(base.BaseTackerTest):
vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
@ -65,3 +72,39 @@ class VnfTestPingMonitor(base.BaseTackerTest):
self._test_vnf_with_monitoring(
'sample-tosca-vnfd-multi-vdu-monitoring.yaml',
'ping monitor multi vdu vnf with tosca template')
def _test_vnf_with_monitoring_vdu_autoheal_action(
self, vnfd_file, vnf_name):
vnfd_instance, vnf_instance = self._vnfd_and_vnf_create(vnfd_file,
vnf_name)
vnf_id = vnf_instance['vnf']['id']
self.verify_vnf_update(vnf_id)
# Delete vnf_instance with vnf_id
try:
self.client.delete_vnf(vnf_id)
except Exception:
assert False, ("Failed to delete vnf %s after the monitor test" %
vnf_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
params = {'resource_id': vnf_id,
'resource_state': 'PENDING_UPDATE',
'event_type': evt_constants.RES_EVT_MONITOR}
vnf_events = self.client.list_vnf_events(**params)
# Check if vdu_autoheal action emits 4 monitoring events.
self.assertGreaterEqual(4, len(vnf_events['vnf_events']))
def test_vnf_monitoring_with_vdu_autoheal_action_for_multi_vdu(self):
self._test_vnf_with_monitoring_vdu_autoheal_action(
'sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml',
'ping multi vdu monitor having vdu_autoheal failure action '
'with tosca template')
def test_vnf_monitoring_with_vdu_autoheal_action_for_single_vdu(self):
self._test_vnf_with_monitoring_vdu_autoheal_action(
'sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml',
'ping vdu monitor having vdu_autoheal failure action '
'with tosca template')

View File

View File

@ -0,0 +1,61 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tacker.objects import base
from tacker.objects import fields
from tacker.tests.unit import base as test_base
class TestString(test_base.TestCase):
def setUp(self):
super(TestString, self).setUp()
self.field = fields.StringField()
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = [None]
def test_stringify(self):
self.assertEqual("'123'", self.field.stringify(123))
class TestListOfStrings(test_base.TestCase):
def setUp(self):
super(TestListOfStrings, self).setUp()
self.field = fields.ListOfStringsField()
def test_list_of_string(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
class TestListOfObjects(test_base.TestCase):
def test_list_of_obj(self):
@base.TackerObjectRegistry.register_if(False)
class MyObjElement(base.TackerObject):
fields = {'foo': fields.StringField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.TackerObjectRegistry.register_if(False)
class MyList(base.TackerObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement('a'), MyObjElement('b')]
self.assertEqual(['a', 'b'], [x.foo for x in mylist.objects])

View File

@ -20,12 +20,63 @@ import os
import yaml
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.extensions import vnfm
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.vnfm.infra_drivers.openstack import openstack
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
@ -59,6 +110,11 @@ class TestOpenStack(base.TestCase):
self.context = context.get_admin_context()
self.infra_driver = openstack.OpenStack()
self._mock_heat_client()
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin = \
common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
@ -181,6 +237,25 @@ class TestOpenStack(base.TestCase):
'config'])
self.assertEqual(expected_vnf_update, vnf_obj)
@mock.patch(
'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
def test_heal_vdu(self, mock_vdu):
self.infra_driver.heal_vdu(None, self.context, vnf_dict,
mock.ANY)
mock_vdu.assert_called_once_with(self.context, vnf_dict,
mock.ANY)
@mock.patch(
'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
@mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
def test_heal_vdu_failed(self, mock_log, mock_vdu):
mock_vdu.side_effect = Exception
self.assertRaises(vnfm.VNFHealFailed, self.infra_driver.heal_vdu,
None, self.context, vnf_dict,
mock.ANY)
mock_log.error.assert_called_with(
"VNF '%s' failed to heal", vnf_dict['id'])
def _get_expected_fields_tosca(self, template):
return {'stack_name':
'test_openwrt_eb84260e-5ff7-4332-b032-50a14d6c1123',

View File

@ -0,0 +1,162 @@
# Copyright 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit import base
from tacker.vnfm.infra_drivers.openstack import vdu
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
stack_status = 'CREATE_COMPLETE'
outputs = [{u'output_value': u'192.168.120.31', u'description':
u'management ip address', u'output_key': u'mgmt_ip-vdu1'}]
def create(self, *args, **kwargs):
return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}}
def get(self, id):
return self.Stack()
def update(self, stack_id, **kwargs):
return self.Stack()
def resource_mark_unhealthy(self, stack_id, resource_name,
mark_unhealthy, resource_status_reason):
return self.Stack()
class TestVDU(base.TestCase):
def setUp(self):
super(TestVDU, self).setUp()
self.context = context.get_admin_context()
self._mock_heat_client()
mock.patch('tacker.vnfm.vim_client.VimClient.get_vim').start()
self.additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter='VDU1',
cause=["Unable to reach while monitoring resource: 'VDU1'"])
self.heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=[self.additional_paramas_obj])
self.heal_vdu = vdu.Vdu(self.context, vnf_dict,
self.heal_request_data_obj)
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin = \
common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
self.heat_client = mock.Mock(wraps=FakeHeatClient())
fake_heat_client = mock.Mock()
fake_heat_client.return_value = self.heat_client
self._mock(
'tacker.vnfm.infra_drivers.openstack.heat_client.HeatClient',
fake_heat_client)
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_heal_vdu(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu.heal_vdu()
self.heat_client.update.assert_called_once_with(
stack_id=vnf_dict['instance_id'], existing=True)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details=("HealVnfRequest invoked to update the stack '%s'" %
vnf_dict['instance_id']))
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_resource_mark_unhealthy(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu._resource_mark_unhealthy()
self.heat_client.resource_mark_unhealthy.assert_called_once_with(
stack_id=vnf_dict['instance_id'],
resource_name=self.additional_paramas_obj.parameter,
mark_unhealthy=True,
resource_status_reason=self.additional_paramas_obj.cause)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details="HealVnfRequest invoked to mark resource 'VDU1' "
"to unhealthy.")

View File

@ -0,0 +1,147 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import datetime
import mock
from oslo_utils import uuidutils
from tacker import context
from tacker.db.nfvo import nfvo_db
from tacker.objects import heal_vnf_request
from tacker.tests.unit.db import base as db_base
from tacker.vnfm import plugin
from tacker.vnfm.policy_actions.vdu_autoheal import vdu_autoheal
vnf_dict = {
'id': uuidutils.generate_uuid(),
'mgmt_url': '{"VDU1": "a.b.c.d"}',
'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
'instance_id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
'attributes': {
'heat_template': {
'resources': {
'VDU1': {
'properties': {
'networks': [{'port': {'get_resource': 'CP1'}}]}
}
}
}
}
}
class FakeDriverManager(mock.Mock):
def invoke(self, *args, **kwargs):
if 'create' in args:
return uuidutils.generate_uuid()
if 'get_resource_info' in args:
return {'resources': {'name': 'dummy_vnf',
'type': 'dummy',
'id': uuidutils.generate_uuid()}}
class FakeVNFMonitor(mock.Mock):
pass
class TestVNFActionVduAutoheal(db_base.SqlTestCase):
def setUp(self):
super(TestVNFActionVduAutoheal, self).setUp()
self.context = context.get_admin_context()
self._mock_device_manager()
self._mock_vnf_monitor()
self._insert_dummy_vim()
self.vnfm_plugin = plugin.VNFMPlugin()
self.vdu_autoheal = vdu_autoheal.VNFActionVduAutoheal()
self.addCleanup(mock.patch.stopall)
def _mock_device_manager(self):
self._device_manager = mock.Mock(wraps=FakeDriverManager())
self._device_manager.__contains__ = mock.Mock(
return_value=True)
fake_device_manager = mock.Mock()
fake_device_manager.return_value = self._device_manager
self._mock(
'tacker.common.driver_manager.DriverManager', fake_device_manager)
def _mock_vnf_monitor(self):
self._vnf_monitor = mock.Mock(wraps=FakeVNFMonitor())
fake_vnf_monitor = mock.Mock()
fake_vnf_monitor.return_value = self._vnf_monitor
self._mock(
'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
def _insert_dummy_vim(self):
session = self.context.session
vim_db = nfvo_db.Vim(
id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='fake_vim',
description='fake_vim_description',
type='test_vim',
status='Active',
deleted_at=datetime.min,
placement_attr={'regions': ['RegionOne']})
vim_auth_db = nfvo_db.VimAuth(
vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
password='encrypted_pw',
auth_url='http://localhost:5000',
vim_project={'name': 'test_project'},
auth_cred={'username': 'test_user', 'user_domain_id': 'default',
'project_domain_id': 'default'})
session.add(vim_db)
session.add(vim_auth_db)
session.flush()
@mock.patch('tacker.vnfm.plugin.VNFMPlugin.heal_vnf')
@mock.patch('yaml.safe_load')
@mock.patch('tacker.objects.HealVnfRequest')
def test_vdu_autoheal_execute_action(self, mock_heal_vnf_request,
mock_safe_load,
mock_heal_vnf):
# Here yaml.safe_load is mock as in the test case i am passing
# vnf_dict containing having vnf_dict['attributes']['heat_template']
# value in json format so while excution it giving the error as
# dict object has no read attribute where as in actual execution the
# value of vnf_dict['attributes']['heat_template'] is in ymal format.
mock_safe_load.return_value = vnf_dict['attributes']['heat_template']
resource_list = ['VDU1', 'CP1']
additional_params = []
for resource in resource_list:
additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter=resource,
cause=["Unable to reach while monitoring resource: '%s'" %
resource])
additional_params.append(additional_paramas_obj)
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=additional_params)
mock_heal_vnf_request.return_value = heal_request_data_obj
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={'vdu_name': 'VDU1'})
mock_heal_vnf.assert_called_once_with(self.context, vnf_dict['id'],
heal_request_data_obj)
@mock.patch('tacker.vnfm.policy_actions.vdu_autoheal.'
'vdu_autoheal.LOG')
def test_vdu_autoheal_action_with_no_vdu_name(self, mock_log):
expected_error_msg = ("VDU resource of vnf '%s' is not present for "
"autoheal." % vnf_dict['id'])
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={})
mock_log.error.assert_called_with(expected_error_msg)

View File

@ -50,6 +50,33 @@ MOCK_VNF = {
}
MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL = {
'id': MOCK_VNF_ID,
'management_ip_addresses': {
'vdu1': 'a.b.c.d'
},
'monitoring_policy': {
'vdus': {
'vdu1': {
'ping': {
'actions': {
'failure': 'vdu_autoheal'
},
'monitoring_params': {
'count': 1,
'monitoring_delay': 0,
'interval': 0,
'timeout': 2
}
}
}
}
},
'boot_at': timeutils.utcnow(),
'action_cb': mock.MagicMock()
}
class TestVNFMonitor(testtools.TestCase):
def setUp(self):
@ -112,7 +139,7 @@ class TestVNFMonitor(testtools.TestCase):
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_run_monitor(self, mock_monitor_run):
test_hosting_vnf = MOCK_VNF
test_hosting_vnf['vnf'] = {}
test_hosting_vnf['vnf'] = {'status': 'ACTIVE'}
test_boot_wait = 30
mock_kwargs = {
'count': 1,
@ -125,6 +152,55 @@ class TestVNFMonitor(testtools.TestCase):
self.mock_monitor_manager.invoke = mock.MagicMock()
test_vnfmonitor._monitor_manager = self.mock_monitor_manager
test_vnfmonitor.run_monitor(test_hosting_vnf)
self.mock_monitor_manager\
.invoke.assert_called_once_with('ping', 'monitor_call', vnf={},
self.mock_monitor_manager \
.invoke.assert_called_once_with('ping', 'monitor_call',
vnf={'status': 'ACTIVE'},
kwargs=mock_kwargs)
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.monitor_call')
def test_vdu_autoheal_action(self, mock_monitor_call, mock_monitor_run):
test_hosting_vnf = MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL
test_boot_wait = 30
test_device_dict = {
'status': 'ACTIVE',
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'attributes': {
'monitoring_policy': json.dumps(
MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL['monitoring_policy'])
}
}
test_hosting_vnf['vnf'] = test_device_dict
mock_monitor_call.return_value = 'failure'
test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
test_vnfmonitor._monitor_manager = self.mock_monitor_manager
test_vnfmonitor.run_monitor(test_hosting_vnf)
test_hosting_vnf['action_cb'].assert_called_once_with(
'vdu_autoheal', vdu_name='vdu1')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_update_hosting_vnf(self, mock_monitor_run):
test_boot_wait = 30
test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
vnf_dict = {
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'management_ip_addresses': 'a.b.c.d',
'vnf': {
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'attributes': {
'monitoring_policy': json.dumps(
MOCK_VNF['monitoring_policy'])
},
'status': 'ACTIVE',
}
}
test_vnfmonitor.add_hosting_vnf(vnf_dict)
vnf_dict['status'] = 'PENDING_HEAL'
test_vnfmonitor.update_hosting_vnf(vnf_dict)
test_device_status = test_vnfmonitor._hosting_vnfs[MOCK_VNF_ID][
'vnf']['status']
self.assertEqual('PENDING_HEAL', test_device_status)

View File

@ -27,6 +27,7 @@ from tacker.db.nfvo import nfvo_db
from tacker.db.nfvo import ns_db
from tacker.db.vnfm import vnfm_db
from tacker.extensions import vnfm
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
@ -622,3 +623,32 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self.context,
uuidutils.generate_uuid(),
policy_type='invalid_policy_type')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.update_hosting_vnf')
def test_heal_vnf_vdu(self, mock_update_hosting_vnf):
self._insert_dummy_vnf_template()
dummy_device_obj = self._insert_dummy_vnf()
additional_params_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter='VDU1',
cause=["Unable to reach while monitoring resource: 'VDU1'"])
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=[additional_params_obj])
result = self.vnfm_plugin.heal_vnf(self.context,
dummy_device_obj['id'],
heal_request_data_obj)
self.assertIsNotNone(result)
self.assertEqual(dummy_device_obj['id'], result['id'])
self.assertIn('instance_id', result)
self.assertIn('status', result)
self.assertIn('attributes', result)
self.assertIn('mgmt_url', result)
self.assertIn('updated_at', result)
self._vnf_manager.invoke.assert_called_with(
'test_vim', 'heal_vdu', plugin=self.vnfm_plugin,
context=self.context, vnf_dict=mock.ANY,
heal_request_data_obj=heal_request_data_obj)
self._pool.spawn_n.assert_called_once_with(
self.vnfm_plugin._update_vnf_wait, self.context, mock.ANY,
mock.ANY, 'test_vim', vnf_heal=True)

View File

@ -53,7 +53,7 @@ class VnfAbstractDriver(extensions.PluginInterface):
pass
@abc.abstractmethod
def update_wait(self, plugin, context, vnf_id):
def update_wait(self, plugin, context, vnf_dict):
pass
@abc.abstractmethod
@ -69,3 +69,7 @@ class VnfAbstractDriver(extensions.PluginInterface):
region_name=None):
'''Fetches optional details of a VNF'''
pass
@abc.abstractmethod
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

View File

@ -545,3 +545,6 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
if file_descriptor is not None:
file_path = vim_auth.pop('ca_cert_file')
self.kubernetes.close_tmp_file(file_descriptor, file_path)
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

View File

@ -73,3 +73,6 @@ class VnfNoop(abstract_driver.VnfAbstractDriver):
def get_resource_info(self, plugin, context, vnf_info, auth_attr,
region_name=None):
return {'noop': {'id': uuidutils.generate_uuid(), 'type': 'noop'}}
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

View File

@ -0,0 +1,23 @@
# Copyright 2018 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# openstack infra constants
STACK_CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS"
STACK_CREATE_COMPLETE = "CREATE_COMPLETE"
STACK_UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS"
STACK_UPDATE_COMPLETE = "UPDATE_COMPLETE"
STACK_DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS"
STACK_DELETE_COMPLETE = "DELETE_COMPLETE"

View File

@ -53,6 +53,9 @@ class HeatClient(object):
def get(self, stack_id):
return self.stacks.get(stack_id)
def update(self, stack_id, **kwargs):
return self.stacks.update(stack_id, **kwargs)
def resource_attr_support(self, resource_name, property_name):
resource = self.resource_types.get(resource_name)
return property_name in resource['attributes']
@ -72,3 +75,9 @@ class HeatClient(object):
def resource_metadata(self, stack_id, rsc_name):
return self.heat.resources.metadata(stack_id, rsc_name)
def resource_mark_unhealthy(self, stack_id, resource_name,
mark_unhealthy, resource_status_reason):
return self.heat.resources.mark_unhealthy(stack_id, resource_name,
mark_unhealthy,
resource_status_reason)

View File

@ -16,7 +16,6 @@
import time
from heatclient import exc as heatException
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@ -26,8 +25,10 @@ from tacker.common import log
from tacker.common import utils
from tacker.extensions import vnfm
from tacker.vnfm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers.openstack import constants as infra_cnst
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.infra_drivers.openstack import translate_template
from tacker.vnfm.infra_drivers.openstack import vdu
from tacker.vnfm.infra_drivers import scale_driver
@ -128,11 +129,45 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
'region_name', None)
heatclient = hc.HeatClient(auth_attr, region_name)
stack, status, stack_retries = self._wait_until_stack_ready(
vnf_id, auth_attr, infra_cnst.STACK_CREATE_IN_PROGRESS,
region_name=region_name)
if stack_retries == 0 and status != infra_cnst.STACK_CREATE_COMPLETE:
error_reason = _("Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != infra_cnst.STACK_CREATE_COMPLETE:
error_reason = stack.stack_status_reason
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
# scaling enabled
if vnf_dict['attributes'].get('scaling_group_names'):
group_names = jsonutils.loads(
vnf_dict['attributes'].get('scaling_group_names')).values()
mgmt_ips = self._find_mgmt_ips_from_groups(heatclient,
vnf_id,
group_names)
else:
mgmt_ips = self._find_mgmt_ips(stack.outputs)
if mgmt_ips:
vnf_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
def _wait_until_stack_ready(self, vnf_id, auth_attr, wait_status,
region_name=None):
heatclient = hc.HeatClient(auth_attr, region_name)
stack = heatclient.get(vnf_id)
status = stack.stack_status
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'CREATE_IN_PROGRESS' and stack_retries > 0:
while status == wait_status and stack_retries > 0:
time.sleep(self.STACK_RETRY_WAIT)
try:
stack = heatclient.get(vnf_id)
@ -146,45 +181,19 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
status = stack.stack_status
LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
LOG.debug('stack status: %(stack)s %(status)s',
{'stack': str(stack), 'status': status})
if stack_retries == 0 and status != 'CREATE_COMPLETE':
error_reason = _("Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != 'CREATE_COMPLETE':
error_reason = stack.stack_status_reason
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
return stack, status, stack_retries
def _find_mgmt_ips(outputs):
LOG.debug('outputs %s', outputs)
mgmt_ips = dict((output['output_key'][len(OUTPUT_PREFIX):],
output['output_value'])
for output in outputs
if output.get('output_key',
'').startswith(OUTPUT_PREFIX))
return mgmt_ips
# scaling enabled
if vnf_dict['attributes'].get('scaling_group_names'):
group_names = jsonutils.loads(
vnf_dict['attributes'].get('scaling_group_names')).values()
mgmt_ips = self._find_mgmt_ips_from_groups(heatclient,
vnf_id,
group_names)
else:
mgmt_ips = _find_mgmt_ips(stack.outputs)
if mgmt_ips:
vnf_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
def _find_mgmt_ips(self, outputs):
LOG.debug('outputs %s', outputs)
mgmt_ips = dict((output['output_key'][len(OUTPUT_PREFIX):],
output['output_value'])
for output in outputs
if output.get('output_key',
'').startswith(OUTPUT_PREFIX))
return mgmt_ips
@log.log
def update(self, plugin, context, vnf_id, vnf_dict, vnf,
@ -219,11 +228,32 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
vnf_dict.setdefault('attributes', {})['config'] = new_yaml
@log.log
def update_wait(self, plugin, context, vnf_id, auth_attr,
def update_wait(self, plugin, context, vnf_dict, auth_attr,
region_name=None):
# do nothing but checking if the stack exists at the moment
heatclient = hc.HeatClient(auth_attr, region_name)
heatclient.get(vnf_id)
stack, status, stack_retries = self._wait_until_stack_ready(
vnf_dict['instance_id'], auth_attr,
infra_cnst.STACK_UPDATE_IN_PROGRESS,
region_name=region_name)
if stack_retries == 0 and status != infra_cnst.STACK_UPDATE_COMPLETE:
error_reason = _("Resource updation is not completed within"
" {wait} seconds as updation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT),
stack=vnf_dict['instance_id'])
LOG.error("VNF Updation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFUpdateWaitFailed(reason=error_reason)
elif stack_retries != 0 and (status !=
infra_cnst.STACK_UPDATE_COMPLETE):
error_reason = stack.stack_status_reason
raise vnfm.VNFUpdateWaitFailed(reason=error_reason)
mgmt_ips = self._find_mgmt_ips(stack.outputs)
if mgmt_ips:
vnf_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
@log.log
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None):
@ -233,29 +263,11 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
@log.log
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
heatclient = hc.HeatClient(auth_attr, region_name)
stack, status, stack_retries = self._wait_until_stack_ready(
vnf_id, auth_attr, infra_cnst.STACK_DELETE_IN_PROGRESS,
region_name=region_name)
stack = heatclient.get(vnf_id)
status = stack.stack_status
error_reason = None
stack_retries = self.STACK_RETRIES
while (status == 'DELETE_IN_PROGRESS' and stack_retries > 0):
time.sleep(self.STACK_RETRY_WAIT)
try:
stack = heatclient.get(vnf_id)
except heatException.HTTPNotFound:
return
except Exception:
LOG.warning("VNF Instance cleanup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"deleted", {'stack': vnf_id})
# Just like create wait, ignore the exception to
# avoid temporary connection error.
status = stack.stack_status
stack_retries = stack_retries - 1
if stack_retries == 0 and status != 'DELETE_COMPLETE':
if stack_retries == 0 and status != infra_cnst.STACK_DELETE_COMPLETE:
error_reason = _("Resource cleanup for vnf is"
" not completed within {wait} seconds as "
"deletion of Stack {stack} is "
@ -264,7 +276,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
LOG.warning(error_reason)
raise vnfm.VNFDeleteWaitFailed(reason=error_reason)
if stack_retries != 0 and status != 'DELETE_COMPLETE':
if stack_retries != 0 and status != infra_cnst.STACK_DELETE_COMPLETE:
error_reason = _("VNF {vnf_id} deletion is not completed. "
"{stack_status}").format(vnf_id=vnf_id,
stack_status=status)
@ -401,3 +413,11 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
# Raise exception when Heat API service is not available
except Exception:
raise vnfm.InfraDriverUnreachable(service="Heat API service")
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data_obj):
try:
heal_vdu = vdu.Vdu(context, vnf_dict, heal_request_data_obj)
heal_vdu.heal_vdu()
except Exception:
LOG.error("VNF '%s' failed to heal", vnf_dict['id'])
raise vnfm.VNFHealFailed(vnf_id=vnf_dict['id'])

View File

@ -0,0 +1,91 @@
# Copyright 2018 NTT DATA
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm import utils as vnfm_utils
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Vdu(object):
def __init__(self, context, vnf_dict, heal_request_data_obj):
super(Vdu, self).__init__()
self.context = context
self.vnf_dict = vnf_dict
self.heal_request_data_obj = heal_request_data_obj
vim_id = self.vnf_dict['vim_id']
vim_res = vim_client.VimClient().get_vim(context, vim_id)
placement_attr = vnf_dict.get('placement_attr', {})
auth_attr = vim_res['vim_auth']
region_name = placement_attr.get('region_name', None)
self.heat_client = hc.HeatClient(auth_attr=auth_attr,
region_name=region_name)
def _get_resource_status(self, stack_id, rsc_name):
# Get the status of VDU resource from heat
vdu_resource = self.heat_client.resource_get(stack_id=stack_id,
rsc_name=rsc_name)
return vdu_resource.resource_status
def _resource_mark_unhealthy(self):
"""Mark the resource unhealthy using heat."""
additional_params = self.heal_request_data_obj.additional_params
for additional_param in additional_params:
resource_name = additional_param.parameter
res_status = self._get_resource_status(
self.vnf_dict['instance_id'], resource_name)
if res_status != 'CHECK_FAILED':
self.heat_client.resource_mark_unhealthy(
stack_id=self.vnf_dict['instance_id'],
resource_name=resource_name, mark_unhealthy=True,
resource_status_reason=additional_param.cause)
LOG.debug("Heat stack '%s' resource '%s' marked as "
"unhealthy", self.vnf_dict['instance_id'],
resource_name)
evt_details = (("HealVnfRequest invoked to mark resource "
"'%s' to unhealthy.") % resource_name)
vnfm_utils.log_events(self.context, self.vnf_dict,
constants.RES_EVT_HEAL,
evt_details)
else:
LOG.debug("Heat stack '%s' resource '%s' already mark "
"unhealthy.", self.vnf_dict['instance_id'],
resource_name)
def heal_vdu(self):
"""Update stack using heat.
This will re-create the resource which are mark as unhealthy.
"""
# Mark all the resources as unhealthy
self._resource_mark_unhealthy()
self.heat_client.update(stack_id=self.vnf_dict['instance_id'],
existing=True)
LOG.debug("Heat stack '%s' update initiated to revive "
"unhealthy resources.", self.vnf_dict['instance_id'])
evt_details = (("HealVnfRequest invoked to update the stack "
"'%s'") % self.vnf_dict['instance_id'])
vnfm_utils.log_events(self.context, self.vnf_dict,
constants.RES_EVT_HEAL, evt_details)

View File

@ -15,6 +15,7 @@
# under the License.
import ast
import copy
import inspect
import threading
import time
@ -26,8 +27,8 @@ from oslo_utils import timeutils
from tacker.common import driver_manager
from tacker import context as t_context
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm import utils as vnfm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -46,16 +47,6 @@ def config_opts():
('tacker', VNFAppMonitor.OPTS)]
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFMonitor(object):
"""VNF Monitor."""
@ -96,8 +87,12 @@ class VNFMonitor(object):
with self._lock:
for hosting_vnf in VNFMonitor._hosting_vnfs.values():
if hosting_vnf.get('dead', False):
LOG.debug('monitor skips dead vnf %s', hosting_vnf)
if hosting_vnf.get('dead', False) or (
hosting_vnf['vnf']['status'] ==
constants.PENDING_HEAL):
LOG.debug(
'monitor skips for DEAD/PENDING_HEAL vnf %s',
hosting_vnf)
continue
try:
self.run_monitor(hosting_vnf)
@ -130,8 +125,9 @@ class VNFMonitor(object):
mon_policy_dict = attrib_dict['monitoring_policy']
evt_details = (("VNF added for monitoring. "
"mon_policy_dict = %s,") % (mon_policy_dict))
_log_monitor_events(t_context.get_admin_context(), new_vnf['vnf'],
evt_details)
vnfm_utils.log_events(t_context.get_admin_context(),
new_vnf['vnf'],
constants.RES_EVT_MONITOR, evt_details)
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
@ -142,6 +138,22 @@ class VNFMonitor(object):
{'vnf_id': vnf_id,
'ips': hosting_vnf['management_ip_addresses']})
def update_hosting_vnf(self, updated_vnf_dict, evt_details=None):
with self._lock:
vnf_to_update = VNFMonitor._hosting_vnfs.get(
updated_vnf_dict.get('id'))
if vnf_to_update:
updated_vnf = copy.deepcopy(updated_vnf_dict)
vnf_to_update['vnf'] = updated_vnf
vnf_to_update['management_ip_addresses'] = jsonutils.loads(
updated_vnf_dict['mgmt_url'])
if evt_details is not None:
vnfm_utils.log_events(t_context.get_admin_context(),
vnf_to_update['vnf'],
constants.RES_EVT_HEAL,
evt_details=evt_details)
def run_monitor(self, hosting_vnf):
mgmt_ips = hosting_vnf['management_ip_addresses']
vdupolicies = hosting_vnf['monitoring_policy']['vdus']
@ -150,7 +162,8 @@ class VNFMonitor(object):
'monitoring_delay', self.boot_wait)
for vdu in vdupolicies.keys():
if hosting_vnf.get('dead'):
if hosting_vnf.get('dead') or (
hosting_vnf['vnf']['status']) == constants.PENDING_HEAL:
return
policy = vdupolicies[vdu]
@ -165,8 +178,7 @@ class VNFMonitor(object):
continue
actions = policy[driver].get('actions', {})
if 'mgmt_ip' not in params:
params['mgmt_ip'] = mgmt_ips[vdu]
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['vnf'],
@ -176,7 +188,7 @@ class VNFMonitor(object):
if driver_return in actions:
action = actions[driver_return]
hosting_vnf['action_cb'](action)
hosting_vnf['action_cb'](action, vdu_name=vdu)
def mark_dead(self, vnf_id):
VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
@ -270,9 +282,9 @@ class VNFAlarmMonitor(object):
# TODO(Tung Doan) trigger_dict.get('actions') needs to be used
policy_action = trigger_dict.get('action')
if len(policy_action) == 0:
_log_monitor_events(t_context.get_admin_context(),
vnf,
"Alarm not set: policy action missing")
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR,
"Alarm not set: policy action missing")
return
# Other backend policies with the construct (policy, action)
# ex: (SP1, in), (SP1, out)
@ -303,9 +315,8 @@ class VNFAlarmMonitor(object):
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
_log_monitor_events(t_context.get_admin_context(),
vnf,
details)
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR, details)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):

View File

@ -124,7 +124,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
OPTS_POLICY_ACTION = [
cfg.ListOpt(
'policy_action', default=['autoscaling', 'respawn',
'log', 'log_and_kill'],
'vdu_autoheal', 'log', 'log_and_kill'],
help=_('Hosting vnf drivers tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS_POLICY_ACTION, 'tacker')
@ -239,11 +239,11 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
dev_attrs = vnf_dict['attributes']
mgmt_url = vnf_dict['mgmt_url']
if 'monitoring_policy' in dev_attrs and mgmt_url:
def action_cb(action):
def action_cb(action, **kwargs):
LOG.debug('policy action: %s', action)
self._vnf_action.invoke(
action, 'execute_action', plugin=self, context=context,
vnf_dict=hosting_vnf['vnf'], args={})
vnf_dict=hosting_vnf['vnf'], args=kwargs)
hosting_vnf = self._vnf_monitor.to_hosting_vnf(
vnf_dict, action_cb)
@ -435,8 +435,8 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
self._create_vnf_wait(context, vnf_dict, vim_auth, infra_driver)
return vnf_dict
def _update_vnf_wait(self, context, vnf_dict, vim_auth, driver_name):
instance_id = self._instance_id(vnf_dict)
def _update_vnf_wait(self, context, vnf_dict, vim_auth, driver_name,
vnf_heal=False):
kwargs = {
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_UPDATE_VNF,
mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
@ -448,9 +448,15 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
try:
self._vnf_manager.invoke(
driver_name, 'update_wait', plugin=self,
context=context, vnf_id=instance_id, auth_attr=vim_auth,
context=context, vnf_dict=vnf_dict, auth_attr=vim_auth,
region_name=region_name)
self.mgmt_call(context, vnf_dict, kwargs)
except vnfm.VNFUpdateWaitFailed as e:
with excutils.save_and_reraise_exception():
new_status = constants.ERROR
self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
self.set_vnf_error_status_reason(context, vnf_dict['id'],
six.text_type(e))
except exceptions.MgmtDriverException as e:
LOG.error('VNF configuration failed')
new_status = constants.ERROR
@ -460,8 +466,21 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnf_dict['status'] = new_status
self.mgmt_update_post(context, vnf_dict)
self._update_vnf_post(context, vnf_dict['id'],
new_status, vnf_dict)
if vnf_heal:
# Update vnf status to 'ACTIVE' so that monitoring can be resumed.
evt_details = ("Ends the heal vnf request for VNF '%s'" %
vnf_dict['id'])
self._vnf_monitor.update_hosting_vnf(vnf_dict, evt_details)
# _update_vnf_post() method updates vnf_status and mgmt_url
self._update_vnf_post(context, vnf_dict['id'],
new_status, vnf_dict,
constants.PENDING_HEAL,
constants.RES_EVT_HEAL)
else:
self._update_vnf_post(context, vnf_dict['id'], new_status,
vnf_dict, constants.PENDING_UPDATE,
constants.RES_EVT_UPDATE)
def update_vnf(self, context, vnf_id, vnf):
vnf_attributes = vnf['vnf']['attributes']
@ -474,7 +493,8 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnf_attributes['config'] = yaml.safe_dump(config)
else:
self._report_deprecated_yaml_str()
vnf_dict = self._update_vnf_pre(context, vnf_id)
vnf_dict = self._update_vnf_pre(context, vnf_id,
constants.PENDING_UPDATE)
driver_name, vim_auth = self._get_infra_driver(context, vnf_dict)
instance_id = self._instance_id(vnf_dict)
@ -494,12 +514,48 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
self.mgmt_update_post(context, vnf_dict)
self._update_vnf_post(context, vnf_id,
constants.ERROR,
vnf_dict)
vnf_dict, constants.PENDING_UPDATE,
constants.RES_EVT_UPDATE)
self.spawn_n(self._update_vnf_wait, context, vnf_dict, vim_auth,
driver_name)
return vnf_dict
def heal_vnf(self, context, vnf_id, heal_request_data_obj):
vnf_dict = self._update_vnf_pre(context, vnf_id,
constants.PENDING_HEAL)
driver_name, vim_auth = self._get_infra_driver(context, vnf_dict)
# Update vnf status to 'PENDING_HEAL' so that monitoring can
# be paused.
evt_details = ("Starts heal vnf request for VNF '%s'. "
"Reason to Heal VNF: '%s'" % (vnf_dict['id'],
heal_request_data_obj.cause))
self._vnf_monitor.update_hosting_vnf(vnf_dict, evt_details)
try:
self.mgmt_update_pre(context, vnf_dict)
self._vnf_manager.invoke(
driver_name, 'heal_vdu', plugin=self,
context=context, vnf_dict=vnf_dict,
heal_request_data_obj=heal_request_data_obj)
except vnfm.VNFHealFailed as e:
with excutils.save_and_reraise_exception():
vnf_dict['status'] = constants.ERROR
self._vnf_monitor.delete_hosting_vnf(vnf_id)
self.set_vnf_error_status_reason(context,
vnf_dict['id'],
six.text_type(e))
self.mgmt_update_post(context, vnf_dict)
self._update_vnf_post(context, vnf_id,
constants.ERROR,
vnf_dict, constants.PENDING_HEAL,
constants.RES_EVT_HEAL)
self.spawn_n(self._update_vnf_wait, context, vnf_dict, vim_auth,
driver_name, vnf_heal=True)
return vnf_dict
def _delete_vnf_wait(self, context, vnf_dict, auth_attr, driver_name):
instance_id = self._instance_id(vnf_dict)
e = None

View File

@ -13,25 +13,14 @@
#
from oslo_log import log as logging
from oslo_utils import timeutils
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.policy_actions import abstract_action
from tacker.vnfm import utils as vnfm_utils
LOG = logging.getLogger(__name__)
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFActionAutoscaling(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'autoscaling'
@ -44,7 +33,7 @@ class VNFActionAutoscaling(abstract_action.AbstractPolicyAction):
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
_log_monitor_events(context,
vnf_dict,
"ActionAutoscalingHeat invoked")
vnfm_utils.log_events(context, vnf_dict,
constants.RES_EVT_MONITOR,
"ActionAutoscalingHeat invoked")
plugin.create_vnf_scale(context, vnf_id, args)

View File

@ -13,25 +13,14 @@
#
from oslo_log import log as logging
from oslo_utils import timeutils
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.policy_actions import abstract_action
from tacker.vnfm import utils as vnfm_utils
LOG = logging.getLogger(__name__)
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFActionLog(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'log'
@ -45,9 +34,9 @@ class VNFActionLog(abstract_action.AbstractPolicyAction):
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
LOG.error('vnf %s dead', vnf_id)
_log_monitor_events(context,
vnf_dict,
"ActionLogOnly invoked")
vnfm_utils.log_events(context, vnf_dict,
constants.RES_EVT_MONITOR,
"ActionLogOnly invoked")
class VNFActionLogAndKill(abstract_action.AbstractPolicyAction):
@ -61,9 +50,9 @@ class VNFActionLogAndKill(abstract_action.AbstractPolicyAction):
return 'Tacker VNF log_and_kill policy'
def execute_action(self, plugin, context, vnf_dict, args):
_log_monitor_events(context,
vnf_dict,
"ActionLogAndKill invoked")
vnfm_utils.log_events(context, vnf_dict,
constants.RES_EVT_MONITOR,
"ActionLogAndKill invoked")
vnf_id = vnf_dict['id']
if plugin._mark_vnf_dead(vnf_dict['id']):
if vnf_dict['attributes'].get('monitoring_policy'):

View File

@ -13,27 +13,16 @@
#
from oslo_log import log as logging
from oslo_utils import timeutils
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.policy_actions import abstract_action
from tacker.vnfm import utils as vnfm_utils
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFActionRespawn(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'respawn'
@ -71,7 +60,9 @@ class VNFActionRespawn(abstract_action.AbstractPolicyAction):
heatclient.delete(vnf_dict['instance_id'])
LOG.debug("Heat stack %s delete initiated",
vnf_dict['instance_id'])
_log_monitor_events(context, vnf_dict, "ActionRespawnHeat invoked")
vnfm_utils.log_events(context, vnf_dict,
constants.RES_EVT_MONITOR,
"ActionRespawnHeat invoked")
def _respawn_vnf():
update_vnf_dict = plugin.create_vnf_sync(context, vnf_dict)

View File

@ -0,0 +1,70 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
import yaml
from tacker import objects
from tacker.vnfm.policy_actions import abstract_action
LOG = logging.getLogger(__name__)
class VNFActionVduAutoheal(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'vdu_autoheal'
def get_name(self):
return 'vdu_autoheal'
def get_description(self):
return 'Tacker VNF vdu_autoheal policy'
def execute_action(self, plugin, context, vnf_dict, args):
vdu_name = args.get('vdu_name')
if vdu_name is None:
LOG.error("VDU resource of vnf '%s' is not present for "
"autoheal." % vnf_dict['id'])
return
def _get_vdu_resources():
"""Get all the resources linked to the VDU.
Returns: resource list for eg. ['VDU1', CP1]
"""
resource_list = [vdu_name]
heat_template = yaml.safe_load(vnf_dict['attributes'].get(
'heat_template'))
vdu_resources = heat_template['resources'].get(vdu_name)
cp_resources = vdu_resources['properties'].get('networks')
for resource in cp_resources:
resource_list.append(resource['port'].get('get_resource'))
return resource_list
resource_list = _get_vdu_resources()
additional_params = []
for resource in resource_list:
additional_paramas_obj = objects.HealVnfAdditionalParams(
parameter=resource,
cause=["Unable to reach while monitoring resource: '%s'" %
resource])
additional_params.append(additional_paramas_obj)
heal_request_data_obj = objects.HealVnfRequest(
cause=("Failed to monitor VDU resource '%s'" % vdu_name),
additional_params=additional_params)
plugin.heal_vnf(context, vnf_dict['id'], heal_request_data_obj)

34
tacker/vnfm/utils.py Normal file
View File

@ -0,0 +1,34 @@
# Copyright 2018, OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
from oslo_utils import timeutils
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
def log_events(context, vnf_dict, evt_type, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=evt_type,
tstamp=timeutils.utcnow(),
details=evt_details)