From 6f5e1d7338633b0940bb90197397ee1959cdfaf6 Mon Sep 17 00:00:00 2001 From: Tung Doan Date: Mon, 7 May 2018 13:25:56 +0200 Subject: [PATCH] Update APMEC code --- CONTRIBUTING.rst | 16 + HACKING.rst | 19 + LICENSE | 201 +++ README.md | 35 + README.rst | 41 + TESTING.rst | 130 ++ apmec/__init__.py | 23 + apmec/_i18n.py | 24 + apmec/agent/__init__.py | 0 apmec/agent/linux/__init__.py | 0 apmec/agent/linux/utils.py | 130 ++ apmec/alarm_receiver.py | 94 + apmec/api/__init__.py | 0 apmec/api/api_common.py | 411 +++++ apmec/api/extensions.py | 622 +++++++ apmec/api/v1/__init__.py | 0 apmec/api/v1/attributes.py | 613 +++++++ apmec/api/v1/base.py | 600 +++++++ apmec/api/v1/resource.py | 114 ++ apmec/api/v1/resource_helper.py | 83 + apmec/api/v1/router.py | 60 + apmec/api/versions.py | 59 + apmec/api/views/__init__.py | 0 apmec/api/views/versions.py | 58 + apmec/auth.py | 75 + apmec/catalogs/__init__.py | 0 apmec/catalogs/tosca/__init__.py | 0 apmec/catalogs/tosca/lib/apmec_defs.yaml | 192 ++ apmec/catalogs/tosca/lib/apmec_mec_defs.yaml | 274 +++ apmec/catalogs/tosca/utils.py | 687 +++++++ apmec/cmd/__init__.py | 28 + apmec/cmd/eventlet/__init__.py | 17 + apmec/cmd/eventlet/apmec_server.py | 52 + apmec/cmd/eventlet/conductor.py | 17 + apmec/common/__init__.py | 0 apmec/common/clients.py | 53 + apmec/common/cmd_executer.py | 106 ++ apmec/common/config.py | 141 ++ apmec/common/constants.py | 47 + apmec/common/driver_manager.py | 76 + apmec/common/eventlet_utils.py | 26 + apmec/common/exceptions.py | 285 +++ apmec/common/log.py | 36 + apmec/common/rpc.py | 338 ++++ apmec/common/test_lib.py | 42 + apmec/common/topics.py | 15 + apmec/common/utils.py | 226 +++ apmec/conductor/__init__.py | 0 apmec/conductor/conductor_server.py | 91 + apmec/conductor/conductorrpc/__init__.py | 0 .../conductor/conductorrpc/vim_monitor_rpc.py | 30 + apmec/context.py | 141 ++ apmec/db/__init__.py | 0 apmec/db/api.py | 45 + apmec/db/common_services/__init__.py | 0 .../db/common_services/common_services_db.py | 31 + .../common_services_db_plugin.py | 88 + apmec/db/db_base.py | 217 +++ apmec/db/mem/__init__.py | 0 apmec/db/mem/mem_db.py | 683 +++++++ apmec/db/meo/__init__.py | 0 apmec/db/meo/meo_db.py | 57 + apmec/db/meo/meo_db_plugin.py | 208 +++ apmec/db/meo/mes_db.py | 384 ++++ apmec/db/migration/README | 87 + apmec/db/migration/__init__.py | 95 + apmec/db/migration/alembic.ini | 52 + .../migration/alembic_migrations/__init__.py | 0 apmec/db/migration/alembic_migrations/env.py | 84 + .../alembic_migrations/script.py.mako | 36 + ...000632983ada_add_template_source_column.py | 33 + ...ce1c18_create_of_network_service_tables.py | 73 + ...0ad3bbce1c19_increase_vim_password_size.py | 35 + .../0ae5b1ce3024_unique_constraint_name.py | 58 + .../versions/12a57080b277_add_service_db.py | 98 + .../versions/12a57080b278_alter_devices.py | 42 + .../13c0e0661015_add_descrition_to_vnf.py | 35 + .../1c6b0d82afcd_servicevm_framework.py | 72 + .../22f5385a3d3f_add_status_to_vims.py | 35 + .../versions/22f5385a3d4f_remove_proxydb.py | 35 + .../versions/22f5385a3d50_rename_devicedb.py | 52 + ...5f211c7_alter_value_in_deviceattributes.py | 34 + .../2774a42c7163_remove_service_related.py | 37 + .../versions/2ff0a0e360f1_audit_support.py | 37 + ...299_change_vim_shared_property_to_false.py | 36 + ...e64ba129_set_mandatory_columns_not_null.py | 37 + .../4c31092895b8_remove_service_instance.py | 30 + .../4ee19c8a6d0a_audit_support_events.py | 45 + .../versions/507122918800_adds_vnffg.py | 145 ++ .../versions/5246a6bd410f_multisite_vim.py | 60 + .../5958429bcb3c_modify_datatype_of_value.py | 34 + ...6b35c7_make_vnfd_vnf_vim_name_mandatory.py | 41 + .../6e56d4474b2a_blob_to_json_text.py | 55 + .../versions/81ffa86020d_rpc_proxy.py | 54 + ...8f7145914cb0_remove_infra_driver_column.py | 32 + .../941b5a6fff9e_enable_soft_delete.py | 39 + .../alembic_migrations/versions/HEAD | 1 + .../alembic_migrations/versions/README | 5 + ...acf941e54075_add_error_reason_to_device.py | 34 + ...bb8654_set_status_type_tenant_id_length.py | 54 + ...8ed37c_unique_constraint_on_name_and_id.py | 40 + .../c7cde2f45f82_set_description_to_text.py | 41 + .../d4f265e8eb9d_add_default_to_vim.py | 37 + ...af1_add_unique_constraint_on_deleted_at.py | 47 + .../e8918cda6433_add_attributes_to_vnffg.py | 34 + ..._source_status_to_vnffgtemplate_and_nsd.py | 45 + ...7_add_default_onboarded_template_source.py | 36 + ...b0f6b4_set_default_value_for_deleted_at.py | 46 + .../versions/f958f58e5daa_uuid_consistency.py | 66 + apmec/db/migration/cli.py | 177 ++ apmec/db/migration/models/__init__.py | 0 apmec/db/migration/models/head.py | 28 + apmec/db/migration/purge_tables.py | 121 ++ apmec/db/model_base.py | 51 + apmec/db/models_v1.py | 43 + apmec/db/sqlalchemyutils.py | 103 ++ apmec/db/types.py | 47 + apmec/extensions/__init__.py | 0 apmec/extensions/common_services.py | 153 ++ apmec/extensions/mem.py | 572 ++++++ apmec/extensions/meo.py | 526 ++++++ apmec/extensions/meo_plugins/__init__.py | 0 apmec/extensions/meo_plugins/edge_service.py | 61 + apmec/hacking/__init__.py | 0 apmec/hacking/checks.py | 50 + apmec/keymgr/__init__.py | 35 + apmec/keymgr/barbican_key_manager.py | 251 +++ apmec/keymgr/exception.py | 43 + apmec/keymgr/key_manager.py | 87 + apmec/locale/es/LC_MESSAGES/apmec.po | 1259 +++++++++++++ apmec/manager.py | 191 ++ apmec/mem/__init__.py | 0 apmec/mem/infra_drivers/__init__.py | 0 apmec/mem/infra_drivers/abstract_driver.py | 71 + apmec/mem/infra_drivers/noop.py | 75 + apmec/mem/infra_drivers/openstack/__init__.py | 0 .../infra_drivers/openstack/heat_client.py | 74 + .../mem/infra_drivers/openstack/openstack.py | 397 ++++ .../openstack/translate_template.py | 342 ++++ apmec/mem/infra_drivers/scale_driver.py | 42 + apmec/mem/keystone.py | 86 + apmec/mem/mgmt_drivers/__init__.py | 0 apmec/mem/mgmt_drivers/abstract_driver.py | 81 + apmec/mem/mgmt_drivers/constants.py | 24 + apmec/mem/mgmt_drivers/noop.py | 41 + apmec/mem/mgmt_drivers/openwrt/__init__.py | 0 apmec/mem/mgmt_drivers/openwrt/openwrt.py | 109 ++ apmec/mem/monitor.py | 286 +++ apmec/mem/monitor_drivers/__init__.py | 0 apmec/mem/monitor_drivers/abstract_driver.py | 82 + .../monitor_drivers/ceilometer/__init__.py | 0 .../monitor_drivers/ceilometer/ceilometer.py | 92 + .../mem/monitor_drivers/http_ping/__init__.py | 0 .../monitor_drivers/http_ping/http_ping.py | 80 + apmec/mem/monitor_drivers/ping/__init__.py | 0 apmec/mem/monitor_drivers/ping/ping.py | 81 + apmec/mem/monitor_drivers/token.py | 37 + apmec/mem/plugin.py | 868 +++++++++ apmec/mem/policy_actions/__init__.py | 0 apmec/mem/policy_actions/abstract_action.py | 38 + .../policy_actions/autoscaling/__init__.py | 0 .../policy_actions/autoscaling/autoscaling.py | 50 + apmec/mem/policy_actions/log/__init__.py | 0 apmec/mem/policy_actions/log/log.py | 72 + apmec/mem/policy_actions/respawn/__init__.py | 0 apmec/mem/policy_actions/respawn/respawn.py | 95 + apmec/mem/vim_client.py | 115 ++ apmec/meo/__init__.py | 0 apmec/meo/drivers/__init__.py | 0 apmec/meo/drivers/vim/__init__.py | 0 apmec/meo/drivers/vim/abstract_vim_driver.py | 95 + apmec/meo/drivers/vim/openstack_driver.py | 385 ++++ apmec/meo/drivers/workflow/__init__.py | 0 .../drivers/workflow/workflow_generator.py | 157 ++ apmec/meo/meo_plugin.py | 660 +++++++ apmec/meo/workflows/__init__.py | 0 apmec/meo/workflows/vim_monitor/__init__.py | 15 + .../vim_monitor/vim_monitor_utils.py | 89 + .../workflows/vim_monitor/vim_ping_action.py | 107 ++ .../vim_monitor/workflow_generator.py | 59 + apmec/mistral/__init__.py | 0 apmec/mistral/actionrpc/__init__.py | 0 apmec/mistral/actionrpc/kill_action.py | 27 + apmec/mistral/mistral_client.py | 27 + apmec/mistral/workflow_generator.py | 36 + apmec/nfv/__init__.py | 0 apmec/nfv/tacker_client.py | 104 ++ apmec/plugins/__init__.py | 0 apmec/plugins/common/__init__.py | 0 apmec/plugins/common/constants.py | 74 + apmec/plugins/common/utils.py | 67 + apmec/plugins/common_services/__init__.py | 0 .../common_services/common_services_plugin.py | 44 + apmec/policy.py | 413 +++++ ...d-mgmt-driver-in-api-954fe28b1294a2d6.yaml | 3 + apmec/service.py | 248 +++ apmec/services/__init__.py | 0 apmec/services/service_base.py | 48 + apmec/tests/__init__.py | 0 apmec/tests/base.py | 205 +++ apmec/tests/constants.py | 24 + apmec/tests/contrib/README | 3 + apmec/tests/contrib/post_test_hook.sh | 84 + apmec/tests/contrib/post_test_hook_lib.sh | 73 + apmec/tests/etc/api-paste.ini.test | 8 + apmec/tests/etc/apmec.conf.test | 24 + apmec/tests/etc/rootwrap.d/apmec.test.filters | 12 + apmec/tests/etc/samples/install_vnfc.sh | 2 + apmec/tests/etc/samples/local-vim.yaml | 7 + .../samples/sample-tosca-alarm-respawn.yaml | 57 + .../etc/samples/sample-tosca-alarm-scale.yaml | 82 + .../etc/samples/sample-tosca-mea-values.yaml | 10 + .../sample-tosca-mead-block-storage.yaml | 59 + .../etc/samples/sample-tosca-mead-flavor.yaml | 68 + .../etc/samples/sample-tosca-mead-image.yaml | 71 + .../sample-tosca-mead-large-template.yaml | 137 ++ .../samples/sample-tosca-mead-monitor.yaml | 52 + ...ample-tosca-mead-multi-vdu-monitoring.yaml | 175 ++ .../samples/sample-tosca-mead-multi-vdu.yaml | 152 ++ .../samples/sample-tosca-mead-no-monitor.yaml | 35 + .../etc/samples/sample-tosca-mead-param.yaml | 101 ++ .../samples/sample-tosca-mead-static-ip.yaml | 72 + .../tests/etc/samples/sample-tosca-mead.yaml | 82 + .../etc/samples/sample-tosca-scale-all.yaml | 50 + ...sample_tosca_assign_floatingip_to_vdu.yaml | 44 + .../tests/etc/samples/sample_tosca_meac.yaml | 42 + apmec/tests/etc/samples/test-ns-nsd.yaml | 37 + apmec/tests/etc/samples/test-ns-vnfd1.yaml | 98 + apmec/tests/etc/samples/test-ns-vnfd2.yaml | 68 + apmec/tests/etc/samples/test-nsd-vnfd1.yaml | 98 + apmec/tests/etc/samples/test-nsd-vnfd2.yaml | 68 + apmec/tests/etc/samples/test-nsd.yaml | 37 + apmec/tests/fake_notifier.py | 52 + apmec/tests/functional/__init__.py | 0 apmec/tests/functional/base.py | 222 +++ apmec/tests/functional/clients.py | 52 + apmec/tests/functional/keystone.py | 55 + apmec/tests/functional/mem/__init__.py | 0 apmec/tests/functional/mem/test_mea.py | 106 ++ .../functional/mem/test_mea_monitoring.py | 67 + apmec/tests/functional/mem/test_mem_param.py | 129 ++ apmec/tests/functional/mem/test_tosca_mea.py | 282 +++ .../functional/mem/test_tosca_mea_alarm.py | 151 ++ .../mem/test_tosca_mea_block_storage.py | 134 ++ .../mem/test_tosca_mea_floatingip.py | 98 + .../mem/test_tosca_mea_multiple_vdu.py | 90 + .../functional/mem/test_tosca_mea_scale.py | 107 ++ apmec/tests/functional/mem/test_tosca_meac.py | 115 ++ apmec/tests/functional/mem/test_tosca_mead.py | 65 + apmec/tests/functional/meo/__init__.py | 0 apmec/tests/functional/meo/test_meo.py | 178 ++ apmec/tests/functional/meo/test_vim.py | 191 ++ apmec/tests/post_mortem_debug.py | 104 ++ apmec/tests/tools.py | 44 + apmec/tests/unit/__init__.py | 24 + apmec/tests/unit/_test_rootwrap_exec.py | 82 + apmec/tests/unit/base.py | 33 + apmec/tests/unit/database_stubs.py | 184 ++ apmec/tests/unit/db/__init__.py | 0 apmec/tests/unit/db/base.py | 51 + apmec/tests/unit/db/utils.py | 202 +++ apmec/tests/unit/extension_stubs.py | 77 + apmec/tests/unit/extensions/__init__.py | 0 .../unit/extensions/extendedattribute.py | 54 + .../unit/extensions/extensionattribute.py | 102 ++ apmec/tests/unit/extensions/foxinsocks.py | 109 ++ apmec/tests/unit/extensions/v2attributes.py | 48 + apmec/tests/unit/mem/__init__.py | 0 .../tests/unit/mem/infra_drivers/__init__.py | 0 .../mem/infra_drivers/openstack/__init__.py | 0 .../openstack/data/config_data.yaml | 12 + .../data/hot_alarm_scale_custom.yaml | 26 + .../openstack/data/hot_flavor.yaml | 32 + .../data/hot_flavor_and_capabilities.yaml | 26 + .../openstack/data/hot_flavor_defaults.yaml | 32 + .../openstack/data/hot_flavor_no_units.yaml | 33 + .../data/hot_image_after_processed_image.yaml | 16 + .../hot_image_before_processed_image.yaml | 9 + .../openstack/data/hot_openwrt.yaml | 26 + .../openstack/data/hot_openwrt_ipparams.yaml | 41 + .../openstack/data/hot_openwrt_params.yaml | 35 + .../openstack/data/hot_scale_custom.yaml | 25 + .../openstack/data/hot_scale_main.yaml | 30 + .../data/hot_tosca_alarm_metadata.yaml | 41 + .../data/hot_tosca_alarm_respawn.yaml | 42 + .../openstack/data/hot_tosca_alarm_scale.yaml | 53 + .../data/hot_tosca_allowed_address_pairs.yaml | 88 + .../data/hot_tosca_flavor_all_numa_count.yaml | 22 + .../data/hot_tosca_flavor_all_numa_nodes.yaml | 22 + .../hot_tosca_flavor_cpu_allocations.yaml | 22 + .../data/hot_tosca_flavor_huge_pages.yaml | 22 + .../data/hot_tosca_flavor_numa_nodes.yaml | 22 + .../hot_tosca_flavor_numa_nodes_count.yaml | 22 + .../data/hot_tosca_generic_vnfd_params.yaml | 45 + .../openstack/data/hot_tosca_image.yaml | 34 + .../openstack/data/hot_tosca_mac_ip.yaml | 37 + .../openstack/data/hot_tosca_mgmt_sriov.yaml | 28 + .../data/hot_tosca_monitoring_multi_vdu.yaml | 43 + .../openstack/data/hot_tosca_openwrt.yaml | 25 + .../data/hot_tosca_openwrt_kilo.yaml | 25 + .../data/hot_tosca_openwrt_userdata.yaml | 29 + .../data/hot_tosca_security_groups.yaml | 38 + .../openstack/data/hot_tosca_sriov.yaml | 28 + .../openstack/data/hot_tosca_vnfc.yaml | 36 + .../openstack/data/hot_tosca_vnic_normal.yaml | 28 + .../test_tosca_allowed_address_pairs.yaml | 105 ++ .../openstack/data/test_tosca_flavor.yaml | 44 + .../test_tosca_flavor_and_capabilities.yaml | 44 + .../data/test_tosca_flavor_defaults.yaml | 41 + .../data/test_tosca_flavor_no_units.yaml | 43 + .../openstack/data/test_tosca_image.yaml | 42 + .../openstack/data/test_tosca_mac_ip.yaml | 45 + .../openstack/data/test_tosca_meac.yaml | 39 + .../test_tosca_mead_alarm_multi_actions.yaml | 57 + .../data/test_tosca_mead_alarm_respawn.yaml | 57 + .../data/test_tosca_mead_alarm_scale.yaml | 67 + .../openstack/data/test_tosca_openwrt.yaml | 44 + .../data/test_tosca_openwrt_userdata.yaml | 49 + .../data/test_tosca_security_groups.yaml | 46 + .../openstack/data/tosca_alarm_metadata.yaml | 60 + .../openstack/data/tosca_alarm_respawn.yaml | 58 + .../openstack/data/tosca_alarm_scale.yaml | 78 + .../openstack/data/tosca_block_storage.yaml | 57 + .../data/tosca_flavor_all_numa_count.yaml | 36 + .../data/tosca_flavor_all_numa_nodes.yaml | 44 + .../data/tosca_flavor_cpu_allocations.yaml | 34 + .../data/tosca_flavor_huge_pages.yaml | 29 + .../data/tosca_flavor_numa_nodes.yaml | 37 + .../data/tosca_flavor_numa_nodes_count.yaml | 38 + .../data/tosca_generic_mead_params.yaml | 86 + .../openstack/data/tosca_mgmt_sriov.yaml | 57 + .../data/tosca_monitoring_multi_vdu.yaml | 74 + .../openstack/data/tosca_nsd_template.yaml | 38 + .../openstack/data/tosca_scale.yaml | 45 + .../openstack/data/tosca_sriov.yaml | 59 + .../openstack/data/tosca_vnic_port.yaml | 58 + .../openstack/data/update_config_data.yaml | 11 + .../infra_drivers/openstack/test_openstack.py | 463 +++++ .../openstack/test_openstack_driver.py | 41 + .../unit/mem/monitor_drivers/__init__.py | 0 .../mem/monitor_drivers/http_ping/__init__.py | 0 .../http_ping/test_http_ping.py | 56 + .../unit/mem/monitor_drivers/ping/__init__.py | 0 .../mem/monitor_drivers/ping/test_ping.py | 61 + apmec/tests/unit/mem/test_monitor.py | 130 ++ apmec/tests/unit/mem/test_plugin.py | 474 +++++ apmec/tests/unit/mem/test_vim_client.py | 39 + apmec/tests/unit/mem/tosca/__init__.py | 0 apmec/tests/unit/mem/tosca/test_utils.py | 268 +++ apmec/tests/unit/meo/__init__.py | 0 apmec/tests/unit/meo/drivers/__init__.py | 0 apmec/tests/unit/meo/drivers/vim/__init__.py | 0 .../meo/drivers/vim/test_openstack_driver.py | 257 +++ .../tests/unit/meo/drivers/vnffg/__init__.py | 0 .../meo/drivers/vnffg/sfc_drivers/__init__.py | 0 .../sfc_drivers/networking-sfc/__init__.py | 0 .../sfc_drivers/networking-sfc/test_n_sfc.py | 241 +++ .../unit/meo/drivers/workflow/__init__.py | 0 .../workflow/test_workflow_generator.py | 172 ++ apmec/tests/unit/meo/test_nfvo_plugin.py | 355 ++++ apmec/tests/unit/test_alarm_receiver.py | 60 + apmec/tests/unit/test_api_api_common.py | 94 + apmec/tests/unit/test_api_v2.py | 1449 +++++++++++++++ apmec/tests/unit/test_api_v2_extension.py | 114 ++ apmec/tests/unit/test_api_v2_resource.py | 318 ++++ apmec/tests/unit/test_attributes.py | 801 +++++++++ apmec/tests/unit/test_auth.py | 100 ++ apmec/tests/unit/test_common_log.py | 80 + .../tests/unit/test_common_services_plugin.py | 160 ++ apmec/tests/unit/test_common_utils.py | 38 + apmec/tests/unit/test_config.py | 47 + apmec/tests/unit/test_db_migration.py | 157 ++ apmec/tests/unit/test_db_purge_delete.py | 81 + .../unit/test_extension_extended_attribute.py | 117 ++ apmec/tests/unit/test_extensions.py | 546 ++++++ apmec/tests/unit/test_policy.py | 557 ++++++ apmec/tests/unit/test_post_mortem_debug.py | 99 + apmec/tests/unit/test_tacker_context.py | 141 ++ .../test_tosca_templates_under_samples.py | 91 + apmec/tests/unit/test_wsgi.py | 752 ++++++++ apmec/tests/unit/testlib_api.py | 82 + apmec/tests/utils.py | 22 + apmec/version.py | 17 + apmec/wsgi.py | 997 +++++++++++ babel.cfg | 2 + devstack/README.rst | 1 + devstack/lib/apmec | 482 +++++ devstack/local.conf.example | 62 + devstack/local.conf.standalone | 27 + devstack/local.sh.mysql_fix | 46 + devstack/plugin.sh | 69 + devstack/settings | 51 + devstack/vim_config.yaml | 6 + doc/Makefile | 96 + doc/source/_extra/.htaccess | 23 + doc/source/conf.py | 91 + doc/source/contributor/api/api_extensions.rst | 18 + doc/source/contributor/api/api_layer.rst | 60 + doc/source/contributor/api/mano_api.rst | 315 ++++ doc/source/contributor/dashboards.rst | 9 + doc/source/contributor/dev-process.rst | 63 + .../contributor/development.environment.rst | 48 + .../encrypt_vim_auth_with_barbican.rst | 147 ++ doc/source/contributor/event_logging.rst | 217 +++ doc/source/contributor/monitor-api.rst | 136 ++ .../contributor/policy_actions_framework.rst | 91 + doc/source/contributor/tacker_conductor.rst | 70 + .../contributor/tacker_functional_test.rst | 126 ++ .../contributor/tacker_vim_monitoring.rst | 124 ++ .../contributor/vnfd_template_description.rst | 632 +++++++ .../vnfd_template_parameterization.rst | 277 +++ doc/source/index.rst | 139 ++ doc/source/install/deploy_openwrt.rst | 179 ++ doc/source/install/devstack.rst | 65 + doc/source/install/getting_started.rst | 133 ++ doc/source/install/kolla.rst | 197 ++ doc/source/install/manual_installation.rst | 311 ++++ doc/source/install/openstack_nodes.png | Bin 0 -> 81038 bytes doc/source/install/openstack_role.png | Bin 0 -> 149316 bytes .../install/openstack_vim_installation.rst | 260 +++ .../reference/block_storage_usage_guide.rst | 125 ++ .../mistral_workflows_usage_guide.rst | 504 ++++++ .../user/alarm_monitoring_usage_guide.rst | 260 +++ ...hanced_placement_awareness_usage_guide.rst | 155 ++ doc/source/user/mea_component_usage_guide.rst | 58 + doc/source/user/mem_usage_guide.rst | 134 ++ doc/source/user/mesd_usage_guide.rst | 256 +++ doc/source/user/multisite_vim_usage_guide.rst | 154 ++ doc/source/user/scale_usage_guide.rst | 186 ++ etc/apmec/README.txt | 9 + etc/apmec/api-paste.ini | 33 + etc/apmec/policy.json | 10 + etc/apmec/rootwrap.conf | 34 + etc/apmec/rootwrap.d/apmec.filters | 10 + etc/config-generator.conf | 27 + etc/init.d/apmec-server | 68 + requirements.txt | 43 + samples/mistral/workflows/create_mea.yaml | 53 + samples/mistral/workflows/create_mead.yaml | 22 + samples/mistral/workflows/delete_mea.yaml | 16 + samples/mistral/workflows/delete_mead.yaml | 16 + .../mistral/workflows/input/create_mea.json | 11 + .../mistral/workflows/input/create_mead.json | 10 + .../mistral/workflows/input/delete_mea.json | 3 + .../mistral/workflows/input/delete_mead.json | 3 + .../evaluation/sample-tosca-mead1.yaml | 88 + .../evaluation/sample-tosca-mead2.yaml | 80 + .../evaluation/sample2-tosca-10app-apmec.yaml | 355 ++++ .../sample2-tosca-10app-tacker.yaml | 355 ++++ .../evaluation/sample2-tosca-15app-apmec.yaml | 524 ++++++ .../sample2-tosca-15app-tacker.yaml | 644 +++++++ .../evaluation/sample2-tosca-20app-apmec.yaml | 689 +++++++ .../sample2-tosca-20app-tacker.yaml | 689 +++++++ .../sample2-tosca-50app-tacker.yaml | 1595 +++++++++++++++++ .../evaluation/sample2-tosca-7app-apmec.yaml | 259 +++ .../evaluation/sample2-tosca-7app-tacker.yaml | 260 +++ .../evaluation/sample2-tosca-nsd-mec.yaml | 19 + .../evaluation/sample2-tosca-nsd.yaml | 23 + .../evaluation/sample2-tosca-vnfd-3app.yaml | 89 + .../evaluation/sample2-tosca-vnfd1.yaml | 55 + .../evaluation/sample2-tosca-vnfd2.yaml | 56 + .../evaluation/sample2-tosca-vnfd3.yaml | 57 + .../samples2-tosca-50app-apmec.yaml | 1594 ++++++++++++++++ .../evaluation/test_simple_mesd.yaml | 15 + .../tosca-templates/mead/test_tosca_meac.yaml | 40 + .../test_tosca_meac_multiple_servers.yaml | 63 + .../mead/tosca-config-openwrt-vrouter.yaml | 46 + .../tosca-config-openwrt-with-firewall.yaml | 97 + .../mead/tosca-mead-alarm-multi-actions.yaml | 57 + .../mead/tosca-mead-alarm-respawn.yaml | 57 + .../mead/tosca-mead-alarm-scale.yaml | 106 ++ .../mead/tosca-mead-block-attach.yaml | 57 + .../mead/tosca-mead-cpu-dedicate.yaml | 38 + .../mead/tosca-mead-hello-world.yaml | 76 + .../mead/tosca-mead-http-monitor.yaml | 84 + .../mead/tosca-mead-hugepages.yaml | 37 + .../mead/tosca-mead-image.yaml | 79 + .../mead/tosca-mead-keyname.yaml | 43 + .../mead/tosca-mead-mac-ip.yaml | 45 + .../mead/tosca-mead-monitor-multi-vdu.yaml | 193 ++ .../mead/tosca-mead-monitor.yaml | 85 + .../mead/tosca-mead-multi-vdu.yaml | 164 ++ .../mead/tosca-mead-network.yaml | 78 + .../mead/tosca-mead-nova-flavor.yaml | 71 + .../mead/tosca-mead-numacount.yaml | 37 + .../mead/tosca-mead-numadefine.yaml | 47 + .../mead/tosca-mead-openwrt.yaml | 84 + .../mead/tosca-mead-param-values.yaml | 10 + .../mead/tosca-mead-scale.yaml | 65 + .../mead/tosca-mead-secgroups.yaml | 45 + .../mead/tosca-mead-sriov.yaml | 48 + .../mead/tosca-mead-userdata.yaml | 47 + .../mead/tosca-mead-vcpu-topology.yaml | 40 + .../mead/tosca-mead-vdu-name.yaml | 84 + .../tosca-templates/mead/tosca-mead-vip.yaml | 94 + .../mead/tosca-mead-with-params.yaml | 101 ++ ...assign_fip_to_vdu_floating_ip_address.yaml | 45 + ...ad_assign_fip_to_vdu_floating_network.yaml | 44 + .../mesd/sample-tosca-mead1.yaml | 94 + .../mesd/sample-tosca-mead2.yaml | 89 + .../mesd/test_simple_mesd.yaml | 20 + samples/tosca-templates/nfv/vnfd1.yaml | 41 + samples/tosca-templates/nfv/vnffgd1.yaml | 30 + samples/vim/vim_config.yaml | 6 + setup.cfg | 120 ++ setup.py | 29 + test-requirements.txt | 25 + tools/check_i18n.py | 154 ++ tools/check_i18n_test_case.txt | 67 + tools/clean.sh | 5 + tools/generate_config_file_sample.sh | 26 + tools/i18n_cfg.py | 109 ++ tools/install_venv.py | 73 + tools/install_venv_common.py | 171 ++ tools/meac/build_image.sh | 26 + tools/ostestr_compat_shim.sh | 8 + tools/prepare_functional_test.sh | 20 + tools/test-setup.sh | 57 + tools/with_venv.sh | 20 + tox.ini | 102 ++ 520 files changed, 55216 insertions(+) create mode 100644 CONTRIBUTING.rst create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 README.md create mode 100644 README.rst create mode 100644 TESTING.rst create mode 100644 apmec/__init__.py create mode 100644 apmec/_i18n.py create mode 100644 apmec/agent/__init__.py create mode 100644 apmec/agent/linux/__init__.py create mode 100644 apmec/agent/linux/utils.py create mode 100644 apmec/alarm_receiver.py create mode 100644 apmec/api/__init__.py create mode 100644 apmec/api/api_common.py create mode 100644 apmec/api/extensions.py create mode 100644 apmec/api/v1/__init__.py create mode 100644 apmec/api/v1/attributes.py create mode 100644 apmec/api/v1/base.py create mode 100644 apmec/api/v1/resource.py create mode 100644 apmec/api/v1/resource_helper.py create mode 100644 apmec/api/v1/router.py create mode 100644 apmec/api/versions.py create mode 100644 apmec/api/views/__init__.py create mode 100644 apmec/api/views/versions.py create mode 100644 apmec/auth.py create mode 100644 apmec/catalogs/__init__.py create mode 100644 apmec/catalogs/tosca/__init__.py create mode 100644 apmec/catalogs/tosca/lib/apmec_defs.yaml create mode 100644 apmec/catalogs/tosca/lib/apmec_mec_defs.yaml create mode 100644 apmec/catalogs/tosca/utils.py create mode 100644 apmec/cmd/__init__.py create mode 100644 apmec/cmd/eventlet/__init__.py create mode 100644 apmec/cmd/eventlet/apmec_server.py create mode 100644 apmec/cmd/eventlet/conductor.py create mode 100644 apmec/common/__init__.py create mode 100644 apmec/common/clients.py create mode 100644 apmec/common/cmd_executer.py create mode 100644 apmec/common/config.py create mode 100644 apmec/common/constants.py create mode 100644 apmec/common/driver_manager.py create mode 100644 apmec/common/eventlet_utils.py create mode 100644 apmec/common/exceptions.py create mode 100644 apmec/common/log.py create mode 100644 apmec/common/rpc.py create mode 100644 apmec/common/test_lib.py create mode 100644 apmec/common/topics.py create mode 100644 apmec/common/utils.py create mode 100644 apmec/conductor/__init__.py create mode 100644 apmec/conductor/conductor_server.py create mode 100644 apmec/conductor/conductorrpc/__init__.py create mode 100644 apmec/conductor/conductorrpc/vim_monitor_rpc.py create mode 100644 apmec/context.py create mode 100644 apmec/db/__init__.py create mode 100644 apmec/db/api.py create mode 100644 apmec/db/common_services/__init__.py create mode 100644 apmec/db/common_services/common_services_db.py create mode 100644 apmec/db/common_services/common_services_db_plugin.py create mode 100644 apmec/db/db_base.py create mode 100644 apmec/db/mem/__init__.py create mode 100644 apmec/db/mem/mem_db.py create mode 100644 apmec/db/meo/__init__.py create mode 100644 apmec/db/meo/meo_db.py create mode 100644 apmec/db/meo/meo_db_plugin.py create mode 100644 apmec/db/meo/mes_db.py create mode 100644 apmec/db/migration/README create mode 100644 apmec/db/migration/__init__.py create mode 100644 apmec/db/migration/alembic.ini create mode 100644 apmec/db/migration/alembic_migrations/__init__.py create mode 100644 apmec/db/migration/alembic_migrations/env.py create mode 100644 apmec/db/migration/alembic_migrations/script.py.mako create mode 100644 apmec/db/migration/alembic_migrations/versions/000632983ada_add_template_source_column.py create mode 100644 apmec/db/migration/alembic_migrations/versions/0ad3bbce1c18_create_of_network_service_tables.py create mode 100644 apmec/db/migration/alembic_migrations/versions/0ad3bbce1c19_increase_vim_password_size.py create mode 100644 apmec/db/migration/alembic_migrations/versions/0ae5b1ce3024_unique_constraint_name.py create mode 100644 apmec/db/migration/alembic_migrations/versions/12a57080b277_add_service_db.py create mode 100644 apmec/db/migration/alembic_migrations/versions/12a57080b278_alter_devices.py create mode 100644 apmec/db/migration/alembic_migrations/versions/13c0e0661015_add_descrition_to_vnf.py create mode 100644 apmec/db/migration/alembic_migrations/versions/1c6b0d82afcd_servicevm_framework.py create mode 100644 apmec/db/migration/alembic_migrations/versions/22f5385a3d3f_add_status_to_vims.py create mode 100644 apmec/db/migration/alembic_migrations/versions/22f5385a3d4f_remove_proxydb.py create mode 100644 apmec/db/migration/alembic_migrations/versions/22f5385a3d50_rename_devicedb.py create mode 100644 apmec/db/migration/alembic_migrations/versions/24bec5f211c7_alter_value_in_deviceattributes.py create mode 100644 apmec/db/migration/alembic_migrations/versions/2774a42c7163_remove_service_related.py create mode 100644 apmec/db/migration/alembic_migrations/versions/2ff0a0e360f1_audit_support.py create mode 100644 apmec/db/migration/alembic_migrations/versions/31acbaeb8299_change_vim_shared_property_to_false.py create mode 100644 apmec/db/migration/alembic_migrations/versions/354de64ba129_set_mandatory_columns_not_null.py create mode 100644 apmec/db/migration/alembic_migrations/versions/4c31092895b8_remove_service_instance.py create mode 100644 apmec/db/migration/alembic_migrations/versions/4ee19c8a6d0a_audit_support_events.py create mode 100644 apmec/db/migration/alembic_migrations/versions/507122918800_adds_vnffg.py create mode 100644 apmec/db/migration/alembic_migrations/versions/5246a6bd410f_multisite_vim.py create mode 100644 apmec/db/migration/alembic_migrations/versions/5958429bcb3c_modify_datatype_of_value.py create mode 100644 apmec/db/migration/alembic_migrations/versions/5f88e86b35c7_make_vnfd_vnf_vim_name_mandatory.py create mode 100644 apmec/db/migration/alembic_migrations/versions/6e56d4474b2a_blob_to_json_text.py create mode 100644 apmec/db/migration/alembic_migrations/versions/81ffa86020d_rpc_proxy.py create mode 100644 apmec/db/migration/alembic_migrations/versions/8f7145914cb0_remove_infra_driver_column.py create mode 100644 apmec/db/migration/alembic_migrations/versions/941b5a6fff9e_enable_soft_delete.py create mode 100644 apmec/db/migration/alembic_migrations/versions/HEAD create mode 100644 apmec/db/migration/alembic_migrations/versions/README create mode 100644 apmec/db/migration/alembic_migrations/versions/acf941e54075_add_error_reason_to_device.py create mode 100644 apmec/db/migration/alembic_migrations/versions/b07673bb8654_set_status_type_tenant_id_length.py create mode 100644 apmec/db/migration/alembic_migrations/versions/c256228ed37c_unique_constraint_on_name_and_id.py create mode 100644 apmec/db/migration/alembic_migrations/versions/c7cde2f45f82_set_description_to_text.py create mode 100644 apmec/db/migration/alembic_migrations/versions/d4f265e8eb9d_add_default_to_vim.py create mode 100644 apmec/db/migration/alembic_migrations/versions/e7993093baf1_add_unique_constraint_on_deleted_at.py create mode 100644 apmec/db/migration/alembic_migrations/versions/e8918cda6433_add_attributes_to_vnffg.py create mode 100644 apmec/db/migration/alembic_migrations/versions/e9a1e47fb0b5_add_template_source_status_to_vnffgtemplate_and_nsd.py create mode 100644 apmec/db/migration/alembic_migrations/versions/ef14f8026327_add_default_onboarded_template_source.py create mode 100644 apmec/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py create mode 100644 apmec/db/migration/alembic_migrations/versions/f958f58e5daa_uuid_consistency.py create mode 100644 apmec/db/migration/cli.py create mode 100644 apmec/db/migration/models/__init__.py create mode 100644 apmec/db/migration/models/head.py create mode 100644 apmec/db/migration/purge_tables.py create mode 100644 apmec/db/model_base.py create mode 100644 apmec/db/models_v1.py create mode 100644 apmec/db/sqlalchemyutils.py create mode 100644 apmec/db/types.py create mode 100644 apmec/extensions/__init__.py create mode 100644 apmec/extensions/common_services.py create mode 100644 apmec/extensions/mem.py create mode 100644 apmec/extensions/meo.py create mode 100644 apmec/extensions/meo_plugins/__init__.py create mode 100644 apmec/extensions/meo_plugins/edge_service.py create mode 100644 apmec/hacking/__init__.py create mode 100644 apmec/hacking/checks.py create mode 100644 apmec/keymgr/__init__.py create mode 100644 apmec/keymgr/barbican_key_manager.py create mode 100644 apmec/keymgr/exception.py create mode 100644 apmec/keymgr/key_manager.py create mode 100644 apmec/locale/es/LC_MESSAGES/apmec.po create mode 100644 apmec/manager.py create mode 100644 apmec/mem/__init__.py create mode 100644 apmec/mem/infra_drivers/__init__.py create mode 100644 apmec/mem/infra_drivers/abstract_driver.py create mode 100644 apmec/mem/infra_drivers/noop.py create mode 100644 apmec/mem/infra_drivers/openstack/__init__.py create mode 100644 apmec/mem/infra_drivers/openstack/heat_client.py create mode 100644 apmec/mem/infra_drivers/openstack/openstack.py create mode 100644 apmec/mem/infra_drivers/openstack/translate_template.py create mode 100644 apmec/mem/infra_drivers/scale_driver.py create mode 100644 apmec/mem/keystone.py create mode 100644 apmec/mem/mgmt_drivers/__init__.py create mode 100644 apmec/mem/mgmt_drivers/abstract_driver.py create mode 100644 apmec/mem/mgmt_drivers/constants.py create mode 100644 apmec/mem/mgmt_drivers/noop.py create mode 100644 apmec/mem/mgmt_drivers/openwrt/__init__.py create mode 100644 apmec/mem/mgmt_drivers/openwrt/openwrt.py create mode 100644 apmec/mem/monitor.py create mode 100644 apmec/mem/monitor_drivers/__init__.py create mode 100644 apmec/mem/monitor_drivers/abstract_driver.py create mode 100644 apmec/mem/monitor_drivers/ceilometer/__init__.py create mode 100644 apmec/mem/monitor_drivers/ceilometer/ceilometer.py create mode 100644 apmec/mem/monitor_drivers/http_ping/__init__.py create mode 100644 apmec/mem/monitor_drivers/http_ping/http_ping.py create mode 100644 apmec/mem/monitor_drivers/ping/__init__.py create mode 100644 apmec/mem/monitor_drivers/ping/ping.py create mode 100644 apmec/mem/monitor_drivers/token.py create mode 100644 apmec/mem/plugin.py create mode 100644 apmec/mem/policy_actions/__init__.py create mode 100644 apmec/mem/policy_actions/abstract_action.py create mode 100644 apmec/mem/policy_actions/autoscaling/__init__.py create mode 100644 apmec/mem/policy_actions/autoscaling/autoscaling.py create mode 100644 apmec/mem/policy_actions/log/__init__.py create mode 100644 apmec/mem/policy_actions/log/log.py create mode 100644 apmec/mem/policy_actions/respawn/__init__.py create mode 100644 apmec/mem/policy_actions/respawn/respawn.py create mode 100644 apmec/mem/vim_client.py create mode 100644 apmec/meo/__init__.py create mode 100644 apmec/meo/drivers/__init__.py create mode 100644 apmec/meo/drivers/vim/__init__.py create mode 100644 apmec/meo/drivers/vim/abstract_vim_driver.py create mode 100644 apmec/meo/drivers/vim/openstack_driver.py create mode 100644 apmec/meo/drivers/workflow/__init__.py create mode 100644 apmec/meo/drivers/workflow/workflow_generator.py create mode 100644 apmec/meo/meo_plugin.py create mode 100644 apmec/meo/workflows/__init__.py create mode 100644 apmec/meo/workflows/vim_monitor/__init__.py create mode 100644 apmec/meo/workflows/vim_monitor/vim_monitor_utils.py create mode 100644 apmec/meo/workflows/vim_monitor/vim_ping_action.py create mode 100644 apmec/meo/workflows/vim_monitor/workflow_generator.py create mode 100644 apmec/mistral/__init__.py create mode 100644 apmec/mistral/actionrpc/__init__.py create mode 100644 apmec/mistral/actionrpc/kill_action.py create mode 100644 apmec/mistral/mistral_client.py create mode 100644 apmec/mistral/workflow_generator.py create mode 100644 apmec/nfv/__init__.py create mode 100644 apmec/nfv/tacker_client.py create mode 100644 apmec/plugins/__init__.py create mode 100644 apmec/plugins/common/__init__.py create mode 100644 apmec/plugins/common/constants.py create mode 100644 apmec/plugins/common/utils.py create mode 100644 apmec/plugins/common_services/__init__.py create mode 100644 apmec/plugins/common_services/common_services_plugin.py create mode 100644 apmec/policy.py create mode 100644 apmec/releasenotes/notes/remove-passing-infra-and-mgmt-driver-in-api-954fe28b1294a2d6.yaml create mode 100644 apmec/service.py create mode 100644 apmec/services/__init__.py create mode 100644 apmec/services/service_base.py create mode 100644 apmec/tests/__init__.py create mode 100644 apmec/tests/base.py create mode 100644 apmec/tests/constants.py create mode 100644 apmec/tests/contrib/README create mode 100755 apmec/tests/contrib/post_test_hook.sh create mode 100644 apmec/tests/contrib/post_test_hook_lib.sh create mode 100644 apmec/tests/etc/api-paste.ini.test create mode 100644 apmec/tests/etc/apmec.conf.test create mode 100644 apmec/tests/etc/rootwrap.d/apmec.test.filters create mode 100644 apmec/tests/etc/samples/install_vnfc.sh create mode 100644 apmec/tests/etc/samples/local-vim.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-alarm-respawn.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-alarm-scale.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mea-values.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-block-storage.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-flavor.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-image.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-large-template.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-monitor.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-multi-vdu-monitoring.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-multi-vdu.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-no-monitor.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-param.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead-static-ip.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-mead.yaml create mode 100644 apmec/tests/etc/samples/sample-tosca-scale-all.yaml create mode 100644 apmec/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml create mode 100644 apmec/tests/etc/samples/sample_tosca_meac.yaml create mode 100644 apmec/tests/etc/samples/test-ns-nsd.yaml create mode 100644 apmec/tests/etc/samples/test-ns-vnfd1.yaml create mode 100644 apmec/tests/etc/samples/test-ns-vnfd2.yaml create mode 100644 apmec/tests/etc/samples/test-nsd-vnfd1.yaml create mode 100644 apmec/tests/etc/samples/test-nsd-vnfd2.yaml create mode 100644 apmec/tests/etc/samples/test-nsd.yaml create mode 100644 apmec/tests/fake_notifier.py create mode 100644 apmec/tests/functional/__init__.py create mode 100644 apmec/tests/functional/base.py create mode 100644 apmec/tests/functional/clients.py create mode 100644 apmec/tests/functional/keystone.py create mode 100644 apmec/tests/functional/mem/__init__.py create mode 100644 apmec/tests/functional/mem/test_mea.py create mode 100644 apmec/tests/functional/mem/test_mea_monitoring.py create mode 100644 apmec/tests/functional/mem/test_mem_param.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea_alarm.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea_block_storage.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea_floatingip.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea_multiple_vdu.py create mode 100644 apmec/tests/functional/mem/test_tosca_mea_scale.py create mode 100644 apmec/tests/functional/mem/test_tosca_meac.py create mode 100644 apmec/tests/functional/mem/test_tosca_mead.py create mode 100644 apmec/tests/functional/meo/__init__.py create mode 100644 apmec/tests/functional/meo/test_meo.py create mode 100644 apmec/tests/functional/meo/test_vim.py create mode 100644 apmec/tests/post_mortem_debug.py create mode 100644 apmec/tests/tools.py create mode 100644 apmec/tests/unit/__init__.py create mode 100644 apmec/tests/unit/_test_rootwrap_exec.py create mode 100644 apmec/tests/unit/base.py create mode 100644 apmec/tests/unit/database_stubs.py create mode 100644 apmec/tests/unit/db/__init__.py create mode 100644 apmec/tests/unit/db/base.py create mode 100644 apmec/tests/unit/db/utils.py create mode 100644 apmec/tests/unit/extension_stubs.py create mode 100644 apmec/tests/unit/extensions/__init__.py create mode 100644 apmec/tests/unit/extensions/extendedattribute.py create mode 100644 apmec/tests/unit/extensions/extensionattribute.py create mode 100644 apmec/tests/unit/extensions/foxinsocks.py create mode 100644 apmec/tests/unit/extensions/v2attributes.py create mode 100644 apmec/tests/unit/mem/__init__.py create mode 100644 apmec/tests/unit/mem/infra_drivers/__init__.py create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/__init__.py create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/config_data.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_defaults.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_no_units.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_after_processed_image.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_before_processed_image.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_params.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_custom.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_main.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_image.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_security_groups.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_sriov.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnfc.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_image.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mac_ip.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_meac.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_multi_actions.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_respawn.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_scale.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_security_groups.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_metadata.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_respawn.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_scale.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_block_storage.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_generic_mead_params.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_nsd_template.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_scale.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_sriov.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_vnic_port.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/data/update_config_data.yaml create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/test_openstack.py create mode 100644 apmec/tests/unit/mem/infra_drivers/openstack/test_openstack_driver.py create mode 100644 apmec/tests/unit/mem/monitor_drivers/__init__.py create mode 100644 apmec/tests/unit/mem/monitor_drivers/http_ping/__init__.py create mode 100644 apmec/tests/unit/mem/monitor_drivers/http_ping/test_http_ping.py create mode 100644 apmec/tests/unit/mem/monitor_drivers/ping/__init__.py create mode 100644 apmec/tests/unit/mem/monitor_drivers/ping/test_ping.py create mode 100644 apmec/tests/unit/mem/test_monitor.py create mode 100644 apmec/tests/unit/mem/test_plugin.py create mode 100644 apmec/tests/unit/mem/test_vim_client.py create mode 100644 apmec/tests/unit/mem/tosca/__init__.py create mode 100644 apmec/tests/unit/mem/tosca/test_utils.py create mode 100644 apmec/tests/unit/meo/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/vim/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/vim/test_openstack_driver.py create mode 100644 apmec/tests/unit/meo/drivers/vnffg/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py create mode 100644 apmec/tests/unit/meo/drivers/workflow/__init__.py create mode 100644 apmec/tests/unit/meo/drivers/workflow/test_workflow_generator.py create mode 100644 apmec/tests/unit/meo/test_nfvo_plugin.py create mode 100644 apmec/tests/unit/test_alarm_receiver.py create mode 100644 apmec/tests/unit/test_api_api_common.py create mode 100644 apmec/tests/unit/test_api_v2.py create mode 100644 apmec/tests/unit/test_api_v2_extension.py create mode 100644 apmec/tests/unit/test_api_v2_resource.py create mode 100644 apmec/tests/unit/test_attributes.py create mode 100644 apmec/tests/unit/test_auth.py create mode 100644 apmec/tests/unit/test_common_log.py create mode 100644 apmec/tests/unit/test_common_services_plugin.py create mode 100644 apmec/tests/unit/test_common_utils.py create mode 100644 apmec/tests/unit/test_config.py create mode 100644 apmec/tests/unit/test_db_migration.py create mode 100644 apmec/tests/unit/test_db_purge_delete.py create mode 100644 apmec/tests/unit/test_extension_extended_attribute.py create mode 100644 apmec/tests/unit/test_extensions.py create mode 100644 apmec/tests/unit/test_policy.py create mode 100644 apmec/tests/unit/test_post_mortem_debug.py create mode 100644 apmec/tests/unit/test_tacker_context.py create mode 100644 apmec/tests/unit/test_tosca_templates_under_samples.py create mode 100644 apmec/tests/unit/test_wsgi.py create mode 100644 apmec/tests/unit/testlib_api.py create mode 100644 apmec/tests/utils.py create mode 100644 apmec/version.py create mode 100644 apmec/wsgi.py create mode 100644 babel.cfg create mode 100644 devstack/README.rst create mode 100644 devstack/lib/apmec create mode 100644 devstack/local.conf.example create mode 100644 devstack/local.conf.standalone create mode 100755 devstack/local.sh.mysql_fix create mode 100644 devstack/plugin.sh create mode 100644 devstack/settings create mode 100644 devstack/vim_config.yaml create mode 100644 doc/Makefile create mode 100644 doc/source/_extra/.htaccess create mode 100644 doc/source/conf.py create mode 100644 doc/source/contributor/api/api_extensions.rst create mode 100644 doc/source/contributor/api/api_layer.rst create mode 100644 doc/source/contributor/api/mano_api.rst create mode 100644 doc/source/contributor/dashboards.rst create mode 100644 doc/source/contributor/dev-process.rst create mode 100644 doc/source/contributor/development.environment.rst create mode 100644 doc/source/contributor/encrypt_vim_auth_with_barbican.rst create mode 100644 doc/source/contributor/event_logging.rst create mode 100644 doc/source/contributor/monitor-api.rst create mode 100644 doc/source/contributor/policy_actions_framework.rst create mode 100644 doc/source/contributor/tacker_conductor.rst create mode 100644 doc/source/contributor/tacker_functional_test.rst create mode 100644 doc/source/contributor/tacker_vim_monitoring.rst create mode 100644 doc/source/contributor/vnfd_template_description.rst create mode 100644 doc/source/contributor/vnfd_template_parameterization.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/install/deploy_openwrt.rst create mode 100644 doc/source/install/devstack.rst create mode 100644 doc/source/install/getting_started.rst create mode 100644 doc/source/install/kolla.rst create mode 100644 doc/source/install/manual_installation.rst create mode 100644 doc/source/install/openstack_nodes.png create mode 100644 doc/source/install/openstack_role.png create mode 100644 doc/source/install/openstack_vim_installation.rst create mode 100644 doc/source/reference/block_storage_usage_guide.rst create mode 100644 doc/source/reference/mistral_workflows_usage_guide.rst create mode 100644 doc/source/user/alarm_monitoring_usage_guide.rst create mode 100644 doc/source/user/enhanced_placement_awareness_usage_guide.rst create mode 100644 doc/source/user/mea_component_usage_guide.rst create mode 100644 doc/source/user/mem_usage_guide.rst create mode 100644 doc/source/user/mesd_usage_guide.rst create mode 100644 doc/source/user/multisite_vim_usage_guide.rst create mode 100644 doc/source/user/scale_usage_guide.rst create mode 100644 etc/apmec/README.txt create mode 100644 etc/apmec/api-paste.ini create mode 100644 etc/apmec/policy.json create mode 100644 etc/apmec/rootwrap.conf create mode 100644 etc/apmec/rootwrap.d/apmec.filters create mode 100644 etc/config-generator.conf create mode 100644 etc/init.d/apmec-server create mode 100644 requirements.txt create mode 100644 samples/mistral/workflows/create_mea.yaml create mode 100644 samples/mistral/workflows/create_mead.yaml create mode 100644 samples/mistral/workflows/delete_mea.yaml create mode 100644 samples/mistral/workflows/delete_mead.yaml create mode 100644 samples/mistral/workflows/input/create_mea.json create mode 100644 samples/mistral/workflows/input/create_mead.json create mode 100644 samples/mistral/workflows/input/delete_mea.json create mode 100644 samples/mistral/workflows/input/delete_mead.json create mode 100644 samples/tosca-templates/evaluation/sample-tosca-mead1.yaml create mode 100644 samples/tosca-templates/evaluation/sample-tosca-mead2.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-10app-apmec.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-10app-tacker.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-15app-apmec.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-15app-tacker.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-20app-apmec.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-20app-tacker.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-50app-tacker.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-7app-apmec.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-7app-tacker.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-nsd-mec.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-nsd.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-vnfd-3app.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-vnfd1.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-vnfd2.yaml create mode 100644 samples/tosca-templates/evaluation/sample2-tosca-vnfd3.yaml create mode 100644 samples/tosca-templates/evaluation/samples2-tosca-50app-apmec.yaml create mode 100644 samples/tosca-templates/evaluation/test_simple_mesd.yaml create mode 100644 samples/tosca-templates/mead/test_tosca_meac.yaml create mode 100644 samples/tosca-templates/mead/test_tosca_meac_multiple_servers.yaml create mode 100644 samples/tosca-templates/mead/tosca-config-openwrt-vrouter.yaml create mode 100644 samples/tosca-templates/mead/tosca-config-openwrt-with-firewall.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-alarm-multi-actions.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-alarm-respawn.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-alarm-scale.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-block-attach.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-cpu-dedicate.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-hello-world.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-http-monitor.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-hugepages.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-image.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-keyname.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-mac-ip.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-monitor-multi-vdu.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-monitor.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-multi-vdu.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-network.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-nova-flavor.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-numacount.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-numadefine.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-openwrt.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-param-values.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-scale.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-secgroups.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-sriov.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-userdata.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-vcpu-topology.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-vdu-name.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-vip.yaml create mode 100644 samples/tosca-templates/mead/tosca-mead-with-params.yaml create mode 100644 samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_ip_address.yaml create mode 100644 samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_network.yaml create mode 100644 samples/tosca-templates/mesd/sample-tosca-mead1.yaml create mode 100644 samples/tosca-templates/mesd/sample-tosca-mead2.yaml create mode 100644 samples/tosca-templates/mesd/test_simple_mesd.yaml create mode 100644 samples/tosca-templates/nfv/vnfd1.yaml create mode 100644 samples/tosca-templates/nfv/vnffgd1.yaml create mode 100644 samples/vim/vim_config.yaml create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tools/check_i18n.py create mode 100644 tools/check_i18n_test_case.txt create mode 100755 tools/clean.sh create mode 100755 tools/generate_config_file_sample.sh create mode 100644 tools/i18n_cfg.py create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/meac/build_image.sh create mode 100755 tools/ostestr_compat_shim.sh create mode 100755 tools/prepare_functional_test.sh create mode 100755 tools/test-setup.sh create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..8e20be3 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in this page: + + https://docs.openstack.org/infra/manual/developers.html + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + https://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/apmec diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000..f26f0d4 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,19 @@ +Apmec Style Commandments +========================= + +- Step 1: Read the OpenStack Style Commandments + https://docs.openstack.org/hacking/latest/ +- Step 2: Read on + +Apmec Specific Commandments +---------------------------- + +- [N320] Validate that LOG messages, except debug ones, have translations + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..7228656 --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# apmec +This project aims at building an automated platform for Mobile Edge Cloud (MEC) based on OpenStack + + +The objective of APMEC is to: + +- manage the lifecycle of MEC applications including "create/update/delete" + +- monitor MEC application + +- scale in/out MEC applications + +- provide advanced features like live migration, state management, and fast data processing + +- tightly integrate with OpenStack projects like Apmec (MEC Orchestrator) + + +The development of this project is still under implementation, therefore folks should consider the copyright + + + +**Taxonomy**: + + +MEP: Mobile Edge Platform + +MEM: Mobile Edge manager + +MEO: Mobile Edge Orchestrator + +MEA: Mobile Edge Application + +MEAD: MEA Descriptor + +Author: Tung Doan diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..8cc437f --- /dev/null +++ b/README.rst @@ -0,0 +1,41 @@ +Welcome! +======== + +APMEC is an OpenStack based MEC Orchestrator service with built-in general +purpose MEC Manager to deploy and operate MEC applications (MEPs) +on an OpenStack based MEC Platform. It is based on ETSI MEC Architectural +Framework and provides full functional stack to orchestrate MEC applications. + +Installation: +============= + +Installation instructions: +https://wiki.openstack.org/wiki/apmec/Installation +**Add wiki comnets here** + +APMEC code base now supports OpenStack master and pike releases. Please +follow the instructions in the above wiki for a successful installation of +corresponding release. + +Code: +===== + +APMEC code is available in following repositories: + +* **APMEC server:** https://git.openstack.org/cgit/openstack/apmec +* **APMEC Python client:** https://git.openstack.org/cgit/openstack/python-apmecclient +* **APMEC Horizon UI:** https://git.openstack.org/cgit/openstack/apmec-gui + +Bugs: +===== + +Please report bugs at: https://bugs.launchpad.net/apmec + +External Resources: +=================== + +MEC Wiki: +https://wiki.openstack.org/wiki/apmec + +For help on usage and hacking of APMEC, please send mail to + with [APMEC] tag. diff --git a/TESTING.rst b/TESTING.rst new file mode 100644 index 0000000..8016111 --- /dev/null +++ b/TESTING.rst @@ -0,0 +1,130 @@ +Testing Apmec +============== + +Overview +-------- + +The unit tests are meant to cover as much code as possible and should +be executed without the service running. They are designed to test +the various pieces of the apmec tree to make sure any new changes +don't break existing functionality. + +The functional tests are intended to validate actual system +interaction. Mocks should be used sparingly, if at all. Care +should be taken to ensure that existing system resources are not +modified and that resources created in tests are properly cleaned +up. + +Development process +------------------- + +It is expected that any new changes that are proposed for merge +come with tests for that feature or code area. Ideally any bugs +fixes that are submitted also have tests to prove that they stay +fixed! In addition, before proposing for merge, all of the +current tests should be passing. + +Running unit tests +------------------ + +There are two mechanisms for running tests: tox and nose. Before +submitting a patch for review you should always ensure all test pass; +a tox run is triggered by the jenkins gate executed on gerrit for +each patch pushed for review. + +With these mechanisms you can either run the tests in the standard +environment or create a virtual environment to run them in. + +By default after running all of the tests, any pep8 errors +found in the tree will be reported. + +Note that the tests can use a database, see ``tools/tests-setup.sh`` +on how the databases are set up in the OpenStack CI environment. + +With `nose` +~~~~~~~~~~~ + +You can use `nose`_ to run individual tests, as well as use for debugging +portions of your code:: + + source .venv/bin/activate + pip install nose + nosetests + +There are disadvantages to running Nose - the tests are run sequentially, so +race condition bugs will not be triggered, and the full test suite will +take significantly longer than tox & testr. The upside is that testr has +some rough edges when it comes to diagnosing errors and failures, and there is +no easy way to set a breakpoint in the Apmec code, and enter an +interactive debugging session while using testr. + +.. _nose: https://nose.readthedocs.org/en/latest/index.html + +With `tox` +~~~~~~~~~~ + +Apmec, like other OpenStack projects, uses `tox`_ for managing the virtual +environments for running test cases. It uses `Testr`_ for managing the running +of the test cases. + +Tox handles the creation of a series of `virtualenvs`_ that target specific +versions of Python (2.7, 3.5, etc). + +Testr handles the parallel execution of series of test cases as well as +the tracking of long-running tests and other things. + +Running unit tests is as easy as executing this in the root directory of the +Apmec source code:: + + tox + +For more information on the standard Tox-based test infrastructure used by +OpenStack and how to do some common test/debugging procedures with Testr, +see this wiki page: + + https://wiki.openstack.org/wiki/Testr + +.. _Testr: https://wiki.openstack.org/wiki/Testr +.. _tox: http://tox.readthedocs.org/en/latest/ +.. _virtualenvs: https://pypi.python.org/pypi/virtualenv + + +Running individual tests +~~~~~~~~~~~~~~~~~~~~~~~~ + +For running individual test modules or cases, you just need to pass +the dot-separated path to the module you want as an argument to it. + +For executing a specific test case, specify the name of the test case +class separating it from the module path with a colon. + +For example, the following would run only the TestMemPlugin tests from +apmec/tests/unit/vm/test_plugin.py:: + + $ ./tox apmec.tests.unit.vm.test_plugin:TestMemPlugin + +Debugging +--------- + +It's possible to debug tests in a tox environment:: + + $ tox -e venv -- python -m testtools.run [test module path] + +Tox-created virtual environments (venv's) can also be activated +after a tox run and reused for debugging:: + + $ tox -e venv + $ . .tox/venv/bin/activate + $ python -m testtools.run [test module path] + +Tox packages and installs the apmec source tree in a given venv +on every invocation, but if modifications need to be made between +invocation (e.g. adding more pdb statements), it is recommended +that the source tree be installed in the venv in editable mode:: + + # run this only after activating the venv + $ pip install --editable . + +Editable mode ensures that changes made to the source tree are +automatically reflected in the venv, and that such changes are not +overwritten during the next tox run. diff --git a/apmec/__init__.py b/apmec/__init__.py new file mode 100644 index 0000000..5e33bac --- /dev/null +++ b/apmec/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gettext + +import six + +if six.PY2: + gettext.install('apmec', unicode=1) +else: + gettext.install('apmec') diff --git a/apmec/_i18n.py b/apmec/_i18n.py new file mode 100644 index 0000000..c90233f --- /dev/null +++ b/apmec/_i18n.py @@ -0,0 +1,24 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_i18n + +_translators = oslo_i18n.TranslatorFactory(domain='apmec') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + + +def enable_lazy(enable=True): + return oslo_i18n.enable_lazy(enable) diff --git a/apmec/agent/__init__.py b/apmec/agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/agent/linux/__init__.py b/apmec/agent/linux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/agent/linux/utils.py b/apmec/agent/linux/utils.py new file mode 100644 index 0000000..15c1823 --- /dev/null +++ b/apmec/agent/linux/utils.py @@ -0,0 +1,130 @@ +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fcntl +import os +import shlex +import socket +import struct +import tempfile + +from eventlet.green import subprocess +from eventlet import greenthread +from oslo_log import log as logging +from oslo_utils import excutils + +from apmec.common import utils + + +LOG = logging.getLogger(__name__) + + +def create_process(cmd, root_helper=None, addl_env=None, + debuglog=True): + """Create a process object for the given command. + + The return value will be a tuple of the process object and the + list of command arguments used to create it. + """ + if root_helper: + cmd = shlex.split(root_helper) + cmd + cmd = map(str, cmd) + + if debuglog: + LOG.debug("Running command: %s", cmd) + env = os.environ.copy() + if addl_env: + env.update(addl_env) + + obj = utils.subprocess_popen(cmd, shell=False, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + + return obj, cmd + + +def execute(cmd, root_helper=None, process_input=None, addl_env=None, + check_exit_code=True, return_stderr=False, debuglog=True): + # Note(gongysh) not use log_levels in config file because + # some other codes that are not in a loop probably need the debug log + try: + obj, cmd = create_process(cmd, root_helper=root_helper, + addl_env=addl_env, debuglog=debuglog) + _stdout, _stderr = (process_input and + obj.communicate(process_input) or + obj.communicate()) + obj.stdin.close() + m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" + "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, + 'stdout': _stdout, 'stderr': _stderr} + if obj.returncode: + LOG.error(m) + if check_exit_code: + raise RuntimeError(m) + elif debuglog: + LOG.debug(m) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + return return_stderr and (_stdout, _stderr) or _stdout + + +def get_interface_mac(interface): + DEVICE_NAME_LEN = 15 + MAC_START = 18 + MAC_END = 24 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, + struct.pack('256s', interface[:DEVICE_NAME_LEN])) + return ''.join(['%02x:' % ord(char) + for char in info[MAC_START:MAC_END]])[:-1] + + +def replace_file(file_name, data): + """Replaces the contents of file_name with data in a safe manner. + + First write to a temp file and then rename. Since POSIX renames are + atomic, the file is unlikely to be corrupted by competing writes. + + We create the tempfile on the same device to ensure that it can be renamed. + """ + + base_dir = os.path.dirname(os.path.abspath(file_name)) + tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) + tmp_file.write(data) + tmp_file.close() + os.chmod(tmp_file.name, 0o644) + os.rename(tmp_file.name, file_name) + + +def find_child_pids(pid): + """Retrieve a list of the pids of child processes of the given pid.""" + + try: + raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid=']) + except RuntimeError as e: + # Unexpected errors are the responsibility of the caller + with excutils.save_and_reraise_exception() as ctxt: + # Exception has already been logged by execute + no_children_found = 'Exit code: 1' in str(e) + if no_children_found: + ctxt.reraise = False + return [] + return [x.strip() for x in raw_pids.split('\n') if x.strip()] diff --git a/apmec/alarm_receiver.py b/apmec/alarm_receiver.py new file mode 100644 index 0000000..8fdea8d --- /dev/null +++ b/apmec/alarm_receiver.py @@ -0,0 +1,94 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +from six.moves.urllib import parse +from apmec.mem.monitor_drivers.token import Token +from apmec import wsgi +# check alarm url with db --> move to plugin + + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('username', default='admin', + help=_('User name for alarm monitoring')), + cfg.StrOpt('password', default='devstack', + help=_('password for alarm monitoring')), + cfg.StrOpt('project_name', default='admin', + help=_('project name for alarm monitoring')), +] + +cfg.CONF.register_opts(OPTS, 'alarm_auth') + + +def config_opts(): + return [('alarm_auth', OPTS)] + + +class AlarmReceiver(wsgi.Middleware): + def process_request(self, req): + LOG.debug('Process request: %s', req) + if req.method != 'POST': + return + url = req.url + if not self.handle_url(url): + return + prefix, info, params = self.handle_url(req.url) + auth = cfg.CONF.keystone_authtoken + token = Token(username=cfg.CONF.alarm_auth.username, + password=cfg.CONF.alarm_auth.password, + project_name=cfg.CONF.alarm_auth.project_name, + auth_url=auth.auth_url + '/v3', + user_domain_name='default', + project_domain_name='default') + + token_identity = token.create_token() + req.headers['X_AUTH_TOKEN'] = token_identity + # Change the body request + if req.body: + body_dict = dict() + body_dict['trigger'] = {} + body_dict['trigger'].setdefault('params', {}) + # Update params in the body request + body_info = jsonutils.loads(req.body) + body_dict['trigger']['params']['data'] = body_info + body_dict['trigger']['params']['credential'] = info[6] + # Update policy and action + body_dict['trigger']['policy_name'] = info[4] + body_dict['trigger']['action_name'] = info[5] + req.body = jsonutils.dumps(body_dict) + LOG.debug('Body alarm: %s', req.body) + # Need to change url because of mandatory + req.environ['PATH_INFO'] = prefix + 'triggers' + req.environ['QUERY_STRING'] = '' + LOG.debug('alarm url in receiver: %s', req.url) + + def handle_url(self, url): + # alarm_url = 'http://host:port/v1.0/meas/mea-uuid/mon-policy-name/action-name/8ef785' # noqa + parts = parse.urlparse(url) + p = parts.path.split('/') + if len(p) != 7: + return None + + if any((p[0] != '', p[2] != 'meas')): + return None + # decode action name: respawn%25log + p[5] = parse.unquote(p[5]) + qs = parse.parse_qs(parts.query) + params = dict((k, v[0]) for k, v in qs.items()) + prefix_url = '/%(collec)s/%(mea_uuid)s/' % {'collec': p[2], + 'mea_uuid': p[3]} + return prefix_url, p, params diff --git a/apmec/api/__init__.py b/apmec/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/api/api_common.py b/apmec/api/api_common.py new file mode 100644 index 0000000..d5f30ac --- /dev/null +++ b/apmec/api/api_common.py @@ -0,0 +1,411 @@ +# Copyright 2011 Citrix System. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo_config import cfg +import oslo_i18n +from oslo_log import log as logging +from oslo_policy import policy as oslo_policy +from six import iteritems +from six.moves.urllib import parse as urllib_parse +from webob import exc + +from apmec.common import constants +from apmec.common import exceptions +from apmec import wsgi + +LOG = logging.getLogger(__name__) + + +def get_filters(request, attr_info, skips=None): + """Extracts the filters from the request string. + + Returns a dict of lists for the filters: + check=a&check=b&name=Bob& + becomes: + {'check': [u'a', u'b'], 'name': [u'Bob']} + """ + res = {} + skips = skips or [] + for key, values in iteritems(request.GET.dict_of_lists()): + if key in skips: + continue + values = [v for v in values if v] + key_attr_info = attr_info.get(key, {}) + if 'convert_list_to' in key_attr_info: + values = key_attr_info['convert_list_to'](values) + elif 'convert_to' in key_attr_info: + convert_to = key_attr_info['convert_to'] + values = [convert_to(v) for v in values] + if values: + res[key] = values + return res + + +def get_previous_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[0][id_key] + params['marker'] = marker + params['page_reverse'] = True + return "%s?%s" % (request.path_url, urllib_parse.urlencode(params)) + + +def get_next_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[-1][id_key] + params['marker'] = marker + params.pop('page_reverse', None) + return "%s?%s" % (request.path_url, urllib_parse.urlencode(params)) + + +def get_limit_and_marker(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If limit == 0, it means we needn't + pagination, then return None. + """ + max_limit = _get_pagination_max_limit() + limit = _get_limit_param(request, max_limit) + if max_limit > 0: + limit = min(max_limit, limit) or max_limit + if not limit: + return None, None + marker = request.GET.get('marker', None) + return limit, marker + + +def _get_pagination_max_limit(): + max_limit = -1 + if (cfg.CONF.pagination_max_limit.lower() != + constants.PAGINATION_INFINITE): + try: + max_limit = int(cfg.CONF.pagination_max_limit) + if max_limit == 0: + raise ValueError() + except ValueError: + LOG.warning("Invalid value for pagination_max_limit: %s. It " + "should be an integer greater to 0", + cfg.CONF.pagination_max_limit) + return max_limit + + +def _get_limit_param(request, max_limit): + """Extract integer limit from request or fail.""" + try: + limit = int(request.GET.get('limit', 0)) + if limit >= 0: + return limit + except ValueError: + pass + msg = _("Limit must be an integer 0 or greater and not '%d'") + raise exceptions.BadRequest(resource='limit', msg=msg) + + +def list_args(request, arg): + """Extracts the list of arg from request.""" + return [v for v in request.GET.getall(arg) if v] + + +def get_sorts(request, attr_info): + """Extract sort_key and sort_dir from request. + + Return as: [(key1, value1), (key2, value2)] + """ + sort_keys = list_args(request, "sort_key") + sort_dirs = list_args(request, "sort_dir") + if len(sort_keys) != len(sort_dirs): + msg = _("The number of sort_keys and sort_dirs must be same") + raise exc.HTTPBadRequest(explanation=msg) + valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] + absent_keys = [x for x in sort_keys if x not in attr_info] + if absent_keys: + msg = _("%s is invalid attribute for sort_keys") % absent_keys + raise exc.HTTPBadRequest(explanation=msg) + invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] + if invalid_dirs: + msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " + "valid value is '%(asc)s' and '%(desc)s'") % + {'invalid_dirs': invalid_dirs, + 'asc': constants.SORT_DIRECTION_ASC, + 'desc': constants.SORT_DIRECTION_DESC}) + raise exc.HTTPBadRequest(explanation=msg) + return zip(sort_keys, + [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]) + + +def get_page_reverse(request): + data = request.GET.get('page_reverse', 'False') + return data.lower() == "true" + + +def get_pagination_links(request, items, limit, + marker, page_reverse, key="id"): + key = key if key else 'id' + links = [] + if not limit: + return links + if not (len(items) < limit and not page_reverse): + links.append({"rel": "next", + "href": get_next_link(request, items, + key)}) + if not (len(items) < limit and page_reverse): + links.append({"rel": "previous", + "href": get_previous_link(request, items, + key)}) + return links + + +class PaginationHelper(object): + + def __init__(self, request, primary_key='id'): + self.request = request + self.primary_key = primary_key + + def update_fields(self, original_fields, fields_to_add): + pass + + def update_args(self, args): + pass + + def paginate(self, items): + return items + + def get_links(self, items): + return {} + + +class PaginationEmulatedHelper(PaginationHelper): + + def __init__(self, request, primary_key='id'): + super(PaginationEmulatedHelper, self).__init__(request, primary_key) + self.limit, self.marker = get_limit_and_marker(request) + self.page_reverse = get_page_reverse(request) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + if self.primary_key not in original_fields: + original_fields.append(self.primary_key) + fields_to_add.append(self.primary_key) + + def paginate(self, items): + if not self.limit: + return items + i = -1 + if self.marker: + for item in items: + i = i + 1 + if item[self.primary_key] == self.marker: + break + if self.page_reverse: + return items[i - self.limit:i] + return items[i + 1:i + self.limit + 1] + + def get_links(self, items): + return get_pagination_links( + self.request, items, self.limit, self.marker, + self.page_reverse, self.primary_key) + + +class PaginationNativeHelper(PaginationEmulatedHelper): + + def update_args(self, args): + if self.primary_key not in dict(args.get('sorts', [])).keys(): + args.setdefault('sorts', []).append((self.primary_key, True)) + args.update({'limit': self.limit, 'marker': self.marker, + 'page_reverse': self.page_reverse}) + + def paginate(self, items): + return items + + +class NoPaginationHelper(PaginationHelper): + pass + + +class SortingHelper(object): + + def __init__(self, request, attr_info): + pass + + def update_args(self, args): + pass + + def update_fields(self, original_fields, fields_to_add): + pass + + def sort(self, items): + return items + + +class SortingEmulatedHelper(SortingHelper): + + def __init__(self, request, attr_info): + super(SortingEmulatedHelper, self).__init__(request, attr_info) + self.sort_dict = get_sorts(request, attr_info) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + for key in dict(self.sort_dict).keys(): + if key not in original_fields: + original_fields.append(key) + fields_to_add.append(key) + + def sort(self, items): + def cmp_func(obj1, obj2): + for key, direction in self.sort_dict: + ret = cmp(obj1[key], obj2[key]) + if ret: + return ret * (1 if direction else -1) + return 0 + return sorted(items, cmp=cmp_func) + + +class SortingNativeHelper(SortingHelper): + + def __init__(self, request, attr_info): + super(SortingNativeHelper, self).__init__(request, attr_info) + self.sort_dict = get_sorts(request, attr_info) + + def update_args(self, args): + args['sorts'] = self.sort_dict + + +class NoSortingHelper(SortingHelper): + pass + + +class ApmecController(object): + """Base controller class for Apmec API.""" + # _resource_name will be redefined in sub concrete controller + _resource_name = None + + def __init__(self, plugin): + self._plugin = plugin + super(ApmecController, self).__init__() + + def _prepare_request_body(self, body, params): + """Verifies required parameters are in request body. + + Sets default value for missing optional parameters. + Body argument must be the deserialized body. + """ + try: + if body is None: + # Initialize empty resource for setting default value + body = {self._resource_name: {}} + data = body[self._resource_name] + except KeyError: + # raise if _resource_name is not in req body. + raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") % + self._resource_name) + for param in params: + param_name = param['param-name'] + param_value = data.get(param_name) + # If the parameter wasn't found and it was required, return 400 + if param_value is None and param['required']: + msg = (_("Failed to parse request. " + "Parameter '%s' not specified") % param_name) + LOG.error(msg) + raise exc.HTTPBadRequest(msg) + data[param_name] = param_value or param.get('default-value') + return body + + +def convert_exception_to_http_exc(e, faults, language): + serializer = wsgi.JSONDictSerializer() + e = translate(e, language) + body = serializer.serialize( + {'ApmecError': get_exception_data(e)}) + kwargs = {'body': body, 'content_type': 'application/json'} + if isinstance(e, exc.HTTPException): + # already an HTTP error, just update with content type and body + e.body = body + e.content_type = kwargs['content_type'] + return e + if isinstance(e, (exceptions.ApmecException, netaddr.AddrFormatError, + oslo_policy.PolicyNotAuthorized)): + for fault in faults: + if isinstance(e, fault): + mapped_exc = faults[fault] + break + else: + mapped_exc = exc.HTTPInternalServerError + return mapped_exc(**kwargs) + if isinstance(e, NotImplementedError): + # NOTE(armando-migliaccio): from a client standpoint + # it makes sense to receive these errors, because + # extensions may or may not be implemented by + # the underlying plugin. So if something goes south, + # because a plugin does not implement a feature, + # returning 500 is definitely confusing. + kwargs['body'] = serializer.serialize( + {'NotImplementedError': get_exception_data(e)}) + return exc.HTTPNotImplemented(**kwargs) + # NOTE(jkoelker) Everything else is 500 + # Do not expose details of 500 error to clients. + msg = _('Request Failed: internal server error while ' + 'processing your request.') + msg = translate(msg, language) + kwargs['body'] = serializer.serialize( + {'ApmecError': get_exception_data(exc.HTTPInternalServerError(msg))}) + return exc.HTTPInternalServerError(**kwargs) + + +def get_exception_data(e): + """Extract the information about an exception. + + Apmec client for the v1 API expects exceptions to have 'type', 'message' + and 'detail' attributes.This information is extracted and converted into a + dictionary. + + :param e: the exception to be reraised + :returns: a structured dict with the exception data + """ + err_data = {'type': e.__class__.__name__, + 'message': e, 'detail': ''} + return err_data + + +def translate(translatable, locale): + """Translates the object to the given locale. + + If the object is an exception its translatable elements are translated + in place, if the object is a translatable string it is translated and + returned. Otherwise, the object is returned as-is. + + :param translatable: the object to be translated + :param locale: the locale to translate to + :returns: the translated object, or the object as-is if it + was not translated + """ + localize = oslo_i18n.translate + if isinstance(translatable, exceptions.ApmecException): + translatable.msg = localize(translatable.msg, locale) + elif isinstance(translatable, exc.HTTPError): + translatable.detail = localize(translatable.detail, locale) + elif isinstance(translatable, Exception): + translatable.message = localize(translatable, locale) + else: + return localize(translatable, locale) + return translatable diff --git a/apmec/api/extensions.py b/apmec/api/extensions.py new file mode 100644 index 0000000..f9d4b63 --- /dev/null +++ b/apmec/api/extensions.py @@ -0,0 +1,622 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import imp +import os + +from oslo_config import cfg +from oslo_log import log as logging +import routes +import six +import webob.dec +import webob.exc + +from apmec.common import exceptions +import apmec.extensions +from apmec import policy +from apmec import wsgi + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class PluginInterface(object): + + @classmethod + def __subclasshook__(cls, klass): + """Checking plugin class. + + The __subclasshook__ method is a class method + that will be called every time a class is tested + using issubclass(klass, PluginInterface). + In that case, it will check that every method + marked with the abstractmethod decorator is + provided by the plugin class. + """ + + if not cls.__abstractmethods__: + return NotImplemented + + for method in cls.__abstractmethods__: + if any(method in base.__dict__ for base in klass.__mro__): + continue + return NotImplemented + return True + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + """ + + def get_name(self): + """The name of the extension. + + e.g. 'Fox In Socks' + """ + raise NotImplementedError() + + def get_alias(self): + """The alias for the extension. + + e.g. 'FOXNSOX' + """ + raise NotImplementedError() + + def get_description(self): + """Friendly description for the extension. + + e.g. 'The Fox In Socks Extension' + """ + raise NotImplementedError() + + def get_namespace(self): + """The XML namespace for the extension. + + e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' + """ + raise NotImplementedError() + + def get_updated(self): + """The timestamp when the extension was last updated. + + e.g. '2011-01-22T13:25:27-06:00' + """ + # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS + raise NotImplementedError() + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + """ + resources = [] + return resources + + def get_actions(self): + """List of extensions.ActionExtension extension objects. + + Actions are verbs callable from the API. + """ + actions = [] + return actions + + def get_request_extensions(self): + """List of extensions.RequestException extension objects. + + Request extensions are used to handle custom request data. + """ + request_exts = [] + return request_exts + + def get_extended_resources(self, version): + """Retrieve extended resources or attributes for core resources. + + Extended attributes are implemented by a core plugin similarly + to the attributes defined in the core, and can appear in + request and response messages. Their names are scoped with the + extension's prefix. The core API version is passed to this + function, which must return a + map[][][] + specifying the extended resource attribute properties required + by that API version. + + Extension can add resources and their attr definitions too. + The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP. + """ + return {} + + def get_plugin_interface(self): + """Returns an abstract class which defines contract for the plugin. + + The abstract class should inherit from extesnions.PluginInterface, + Methods in this abstract class should be decorated as abstractmethod + """ + return None + + def update_attributes_map(self, extended_attributes, + extension_attrs_map=None): + """Update attributes map for this extension. + + This is default method for extending an extension's attributes map. + An extension can use this method and supplying its own resource + attribute map in extension_attrs_map argument to extend all its + attributes that needs to be extended. + + If an extension does not implement update_attributes_map, the method + does nothing and just return. + """ + if not extension_attrs_map: + return + + for resource, attrs in extension_attrs_map.items(): + extended_attrs = extended_attributes.get(resource) + if extended_attrs: + attrs.update(extended_attrs) + + def get_alias_namespace_compatibility_map(self): + """Returns mappings between extension aliases and XML namespaces. + + The mappings are XML namespaces that should, for backward compatibility + reasons, be added to the XML serialization of extended attributes. + This allows an established extended attribute to be provided by + another extension than the original one while keeping its old alias + in the name. + :return: A dictionary of extension_aliases and namespace strings. + """ + return {} + + +class ActionExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.action_handlers = {} + + def add_action(self, action_name, handler): + self.action_handlers[action_name] = handler + + def action(self, request, id): + input_dict = self._deserialize(request.body, + request.get_content_type()) + for action_name, handler in (self.action_handlers).items(): + if action_name in input_dict: + return handler(input_dict, request, id) + # no action handler found (bump to downstream application) + response = self.application + return response + + +class RequestExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.handlers = [] + + def add_handler(self, handler): + self.handlers.append(handler) + + def process(self, request, *args, **kwargs): + res = request.get_response(self.application) + # currently request handlers are un-ordered + for handler in self.handlers: + response = handler(request, res) + return response + + +class ExtensionController(wsgi.Controller): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.get_name() + ext_data['alias'] = ext.get_alias() + ext_data['description'] = ext.get_description() + ext_data['namespace'] = ext.get_namespace() + ext_data['updated'] = ext.get_updated() + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + def index(self, request): + extensions = [] + for _alias, ext in (self.extension_manager.extensions).items(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + def show(self, request, id): + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions.get(id) + if not ext: + raise webob.exc.HTTPNotFound( + _("Extension with alias %s does not exist") % id) + return dict(extension=self._translate(ext)) + + def delete(self, request, id): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + def create(self, request): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + +class ExtensionMiddleware(wsgi.Middleware): + """Extensions middleware for WSGI.""" + + def __init__(self, application, + ext_mgr=None): + self.ext_mgr = (ext_mgr + or ExtensionManager(get_extensions_path())) + mapper = routes.Mapper() + + # extended resources + for resource in self.ext_mgr.get_resources(): + path_prefix = resource.path_prefix + if resource.parent: + path_prefix = (resource.path_prefix + + "/%s/{%s_id}" % + (resource.parent["collection_name"], + resource.parent["member_name"])) + + LOG.debug('Extended resource: %s', resource.collection) + for action, method in (resource.collection_actions).items(): + conditions = dict(method=[method]) + path = "/%s/%s" % (resource.collection, action) + with mapper.submapper(controller=resource.controller, + action=action, + path_prefix=path_prefix, + conditions=conditions) as submap: + submap.connect(path) + submap.connect("%s.:(format)" % path) + + mapper.resource(resource.collection, resource.collection, + controller=resource.controller, + member=resource.member_actions, + parent_resource=resource.parent, + path_prefix=path_prefix) + + # extended actions + action_controllers = self._action_ext_controllers(application, + self.ext_mgr, mapper) + for action in self.ext_mgr.get_actions(): + LOG.debug('Extended action: %s', action.action_name) + controller = action_controllers[action.collection] + controller.add_action(action.action_name, action.handler) + + # extended requests + req_controllers = self._request_ext_controllers(application, + self.ext_mgr, mapper) + for request_ext in self.ext_mgr.get_request_extensions(): + LOG.debug('Extended request: %s', request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) + + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + mapper) + super(ExtensionMiddleware, self).__init__(application) + + @classmethod + def factory(cls, global_config, **local_config): + """Paste factory.""" + def _factory(app): + return cls(app, global_config, **local_config) + return _factory + + def _action_ext_controllers(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionController-s by collection.""" + action_controllers = {} + for action in ext_mgr.get_actions(): + if action.collection not in action_controllers.keys(): + controller = ActionExtensionController(application) + mapper.connect("/%s/:(id)/action.:(format)" % + action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + mapper.connect("/%s/:(id)/action" % action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + action_controllers[action.collection] = controller + + return action_controllers + + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if req_ext.key not in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', + action='process', + controller=controller, + conditions=req_ext.conditions) + + mapper.connect(req_ext.url_route, + action='process', + controller=controller, + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller + + return request_ext_controllers + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Route the incoming request with router.""" + req.environ['extended.app'] = self.application + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=wsgi.Request) + def _dispatch(req): + """Dispatch the request. + + Returns the routed WSGI app's response or defers to the extended + application. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return req.environ['extended.app'] + app = match['controller'] + return app + + +def extension_middleware_factory(global_config, **local_config): + """Paste factory.""" + def _factory(app): + ext_mgr = ExtensionManager.get_instance() + return ExtensionMiddleware(app, ext_mgr=ext_mgr) + return _factory + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See tests/unit/extensions/foxinsocks.py for an + example extension implementation. + """ + + _instance = None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls(get_extensions_path()) + return cls._instance + + def __init__(self, path): + LOG.info('Initializing extension manager.') + self.path = path + self.extensions = {} + self._load_all_extensions() + policy.reset() + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionController(self))) + for ext in self.extensions.values(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_actions(self): + """Returns a list of ActionExtension objects.""" + actions = [] + for ext in self.extensions.values(): + try: + actions.extend(ext.get_actions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have action + # extensions + pass + return actions + + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] + for ext in self.extensions.values(): + try: + request_exts.extend(ext.get_request_extensions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have request + # extensions + pass + return request_exts + + def extend_resources(self, version, attr_map): + """Extend resources with additional resources or attributes. + + :param attr_map: the existing mapping from resource name to + attrs definition. + + After this function, we will extend the attr_map if an extension + wants to extend this map. + """ + update_exts = [] + processed_exts = set() + exts_to_process = self.extensions.copy() + # Iterate until there are unprocessed extensions or if no progress + # is made in a whole iteration + while exts_to_process: + processed_ext_count = len(processed_exts) + for ext_name, ext in exts_to_process.items(): + if not hasattr(ext, 'get_extended_resources'): + del exts_to_process[ext_name] + continue + if hasattr(ext, 'update_attributes_map'): + update_exts.append(ext) + if hasattr(ext, 'get_required_extensions'): + # Process extension only if all required extensions + # have been processed already + required_exts_set = set(ext.get_required_extensions()) + if required_exts_set - processed_exts: + continue + try: + extended_attrs = ext.get_extended_resources(version) + for resource, resource_attrs in extended_attrs.items(): + if attr_map.get(resource): + attr_map[resource].update(resource_attrs) + else: + attr_map[resource] = resource_attrs + except AttributeError: + LOG.exception("Error fetching extended attributes for " + "extension '%s'", ext.get_name()) + processed_exts.add(ext_name) + del exts_to_process[ext_name] + if len(processed_exts) == processed_ext_count: + # Exit loop as no progress was made + break + if exts_to_process: + # NOTE(salv-orlando): Consider whether this error should be fatal + LOG.error("It was impossible to process the following " + "extensions: %s because of missing requirements.", + ','.join(exts_to_process.keys())) + + # Extending extensions' attributes map. + for ext in update_exts: + ext.update_attributes_map(attr_map) + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug('Ext name: %s', extension.get_name()) + LOG.debug('Ext alias: %s', extension.get_alias()) + LOG.debug('Ext description: %s', extension.get_description()) + LOG.debug('Ext namespace: %s', extension.get_namespace()) + LOG.debug('Ext updated: %s', extension.get_updated()) + except AttributeError as ex: + LOG.exception("Exception loading extension: %s", ex) + return False + return True + + def _load_all_extensions(self): + """Load extensions from the configured path. + + The extension name is constructed from the module_name. If your + extension module is named widgets.py, the extension class within that + module should be 'Widgets'. + + See tests/unit/extensions/foxinsocks.py for an example extension + implementation. + """ + for path in self.path.split(':'): + if os.path.exists(path): + self._load_all_extensions_from_path(path) + else: + LOG.error("Extension path '%s' doesn't exist!", path) + + def _load_all_extensions_from_path(self, path): + # Sorting the extension list makes the order in which they + # are loaded predictable across a cluster of load-balanced + # Apmec Servers + for f in sorted(os.listdir(path)): + try: + LOG.debug('Loading extension file: %s', f) + mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) + ext_path = os.path.join(path, f) + if file_ext.lower() == '.py' and not mod_name.startswith('_'): + mod = imp.load_source(mod_name, ext_path) + ext_name = mod_name[0].upper() + mod_name[1:] + new_ext_class = getattr(mod, ext_name, None) + if not new_ext_class: + LOG.warning('Did not find expected name ' + '"%(ext_name)s" in %(file)s', + {'ext_name': ext_name, + 'file': ext_path}) + continue + new_ext = new_ext_class() + self.add_extension(new_ext) + except Exception as exception: + LOG.warning("Extension file %(f)s wasn't loaded due to " + "%(exception)s", + {'f': f, 'exception': exception}) + + def add_extension(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.get_alias() + LOG.info('Loaded extension: %s', alias) + + if alias in self.extensions: + raise exceptions.DuplicatedExtension(alias=alias) + self.extensions[alias] = ext + + +class RequestExtension(object): + """Extend requests and responses of core Apmec OpenStack API controllers. + + Provide a way to add data to responses and handle custom request data + that is sent to core Apmec OpenStack API controllers. + """ + + def __init__(self, method, url_route, handler): + self.url_route = url_route + self.handler = handler + self.conditions = dict(method=[method]) + self.key = "%s-%s" % (method, url_route) + + +class ActionExtension(object): + """Add custom actions to core Apmec OpenStack API controllers.""" + + def __init__(self, collection, action_name, handler): + self.collection = collection + self.action_name = action_name + self.handler = handler + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in Apmec.""" + + def __init__(self, collection, controller, parent=None, path_prefix="", + collection_actions={}, member_actions={}, attr_map={}): + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.path_prefix = path_prefix + self.attr_map = attr_map + + +# Returns the extension paths from a config entry and the __path__ +# of apmec.extensions +def get_extensions_path(): + paths = ':'.join(apmec.extensions.__path__) + if cfg.CONF.api_extensions_path: + paths = ':'.join([cfg.CONF.api_extensions_path, paths]) + + return paths + + +def append_api_extensions_path(paths): + paths = [cfg.CONF.api_extensions_path] + paths + cfg.CONF.set_override('api_extensions_path', + ':'.join([p for p in paths if p])) diff --git a/apmec/api/v1/__init__.py b/apmec/api/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/api/v1/attributes.py b/apmec/api/v1/attributes.py new file mode 100644 index 0000000..a5fcb05 --- /dev/null +++ b/apmec/api/v1/attributes.py @@ -0,0 +1,613 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import netaddr +from oslo_log import log as logging +from oslo_utils import uuidutils +import six + +from apmec.common import exceptions as n_exc + + +LOG = logging.getLogger(__name__) + +ATTRIBUTES_TO_UPDATE = 'attributes_to_update' +ATTR_NOT_SPECIFIED = object() +# Defining a constant to avoid repeating string literal in several modules +SHARED = 'shared' + +# Used by range check to indicate no limit for a bound. +UNLIMITED = None + + +def _verify_dict_keys(expected_keys, target_dict, strict=True): + """Allows to verify keys in a dictionary. + + :param expected_keys: A list of keys expected to be present. + :param target_dict: The dictionary which should be verified. + :param strict: Specifies whether additional keys are allowed to be present. + :return: True, if keys in the dictionary correspond to the specification. + """ + if not isinstance(target_dict, dict): + msg = (_("Invalid input. '%(target_dict)s' must be a dictionary " + "with keys: %(expected_keys)s") % + {'target_dict': target_dict, 'expected_keys': expected_keys}) + return msg + + expected_keys = set(expected_keys) + provided_keys = set(target_dict.keys()) + + predicate = expected_keys.__eq__ if strict else expected_keys.issubset + + if not predicate(provided_keys): + msg = (_("Validation of dictionary's keys failed." + "Expected keys: %(expected_keys)s " + "Provided keys: %(provided_keys)s") % + {'expected_keys': expected_keys, + 'provided_keys': provided_keys}) + return msg + + +def is_attr_set(attribute): + return not (attribute is None or attribute is ATTR_NOT_SPECIFIED) + + +def _validate_values(data, valid_values=None): + if data not in valid_values: + msg = (_("'%(data)s' is not in %(valid_values)s") % + {'data': data, 'valid_values': valid_values}) + LOG.debug(msg) + return msg + + +def _validate_not_empty_string_or_none(data, max_len=None): + if data is not None: + return _validate_not_empty_string(data, max_len=max_len) + + +def _validate_not_empty_string(data, max_len=None): + msg = _validate_string(data, max_len=max_len) + if msg: + return msg + if not data.strip(): + return _("'%s' Blank strings are not permitted") % data + + +def _validate_string_or_none(data, max_len=None): + if data is not None: + return _validate_string(data, max_len=max_len) + + +def _validate_string(data, max_len=None): + if not isinstance(data, six.string_types): + msg = _("'%s' is not a valid string") % data + LOG.debug(msg) + return msg + + if max_len is not None and len(data) > max_len: + msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") % + {'data': data, 'max_len': max_len}) + LOG.debug(msg) + return msg + + +def _validate_boolean(data, valid_values=None): + try: + convert_to_boolean(data) + except n_exc.InvalidInput: + msg = _("'%s' is not a valid boolean value") % data + LOG.debug(msg) + return msg + + +def _validate_range(data, valid_values=None): + """Check that integer value is within a range provided. + + Test is inclusive. Allows either limit to be ignored, to allow + checking ranges where only the lower or upper limit matter. + It is expected that the limits provided are valid integers or + the value None. + """ + + min_value = valid_values[0] + max_value = valid_values[1] + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + if min_value is not UNLIMITED and data < min_value: + msg = _("'%(data)s' is too small - must be at least " + "'%(limit)d'") % {'data': data, 'limit': min_value} + LOG.debug(msg) + return msg + if max_value is not UNLIMITED and data > max_value: + msg = _("'%(data)s' is too large - must be no larger than " + "'%(limit)d'") % {'data': data, 'limit': max_value} + LOG.debug(msg) + return msg + + +def _validate_no_whitespace(data): + """Validates that input has no whitespace.""" + if len(data.split()) > 1: + msg = _("'%s' contains whitespace") % data + LOG.debug(msg) + raise n_exc.InvalidInput(error_message=msg) + return data + + +def _validate_mac_address(data, valid_values=None): + valid_mac = False + try: + valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) + except Exception: + pass + finally: + # TODO(arosen): The code in this file should be refactored + # so it catches the correct exceptions. _validate_no_whitespace + # raises AttributeError if data is None. + if valid_mac is False: + msg = _("'%s' is not a valid MAC address") % data + LOG.debug(msg) + return msg + + +def _validate_mac_address_or_none(data, valid_values=None): + if data is None: + return + return _validate_mac_address(data, valid_values) + + +def _validate_ip_address(data, valid_values=None): + try: + netaddr.IPAddress(_validate_no_whitespace(data)) + except Exception: + msg = _("'%s' is not a valid IP address") % data + LOG.debug(msg) + return msg + + +def _validate_ip_pools(data, valid_values=None): + """Validate that start and end IP addresses are present. + + In addition to this the IP addresses will also be validated + """ + if not isinstance(data, list): + msg = _("Invalid data format for IP pool: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['start', 'end'] + for ip_pool in data: + msg = _verify_dict_keys(expected_keys, ip_pool) + if msg: + LOG.debug(msg) + return msg + for k in expected_keys: + msg = _validate_ip_address(ip_pool[k]) + if msg: + LOG.debug(msg) + return msg + + +def _validate_fixed_ips(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for fixed IP: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for fixed_ip in data: + if not isinstance(fixed_ip, dict): + msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip + LOG.debug(msg) + return msg + if 'ip_address' in fixed_ip: + # Ensure that duplicate entries are not set - just checking IP + # suffices. Duplicate subnet_id's are legitimate. + fixed_ip_address = fixed_ip['ip_address'] + if fixed_ip_address in ips: + msg = _("Duplicate IP address '%s'") % fixed_ip_address + else: + msg = _validate_ip_address(fixed_ip_address) + if msg: + LOG.debug(msg) + return msg + ips.append(fixed_ip_address) + if 'subnet_id' in fixed_ip: + msg = _validate_uuid(fixed_ip['subnet_id']) + if msg: + LOG.debug(msg) + return msg + + +def _validate_nameservers(data, valid_values=None): + if not hasattr(data, '__iter__'): + msg = _("Invalid data format for nameserver: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for ip in data: + msg = _validate_ip_address(ip) + if msg: + # This may be a hostname + msg = _validate_regex(ip, HOSTNAME_PATTERN) + if msg: + msg = _("'%s' is not a valid nameserver") % ip + LOG.debug(msg) + return msg + if ip in ips: + msg = _("Duplicate nameserver '%s'") % ip + LOG.debug(msg) + return msg + ips.append(ip) + + +def _validate_hostroutes(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for hostroute: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['destination', 'nexthop'] + hostroutes = [] + for hostroute in data: + msg = _verify_dict_keys(expected_keys, hostroute) + if msg: + LOG.debug(msg) + return msg + msg = _validate_subnet(hostroute['destination']) + if msg: + LOG.debug(msg) + return msg + msg = _validate_ip_address(hostroute['nexthop']) + if msg: + LOG.debug(msg) + return msg + if hostroute in hostroutes: + msg = _("Duplicate hostroute '%s'") % hostroute + LOG.debug(msg) + return msg + hostroutes.append(hostroute) + + +def _validate_ip_address_or_none(data, valid_values=None): + if data is None: + return None + return _validate_ip_address(data, valid_values) + + +def _validate_subnet(data, valid_values=None): + msg = None + try: + net = netaddr.IPNetwork(_validate_no_whitespace(data)) + if '/' not in data: + msg = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": data, + "cidr": net.cidr} + else: + return + except Exception: + msg = _("'%s' is not a valid IP subnet") % data + if msg: + LOG.debug(msg) + return msg + + +def _validate_subnet_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_subnet(item) + if msg: + return msg + + +def _validate_subnet_or_none(data, valid_values=None): + if data is None: + return + return _validate_subnet(data, valid_values) + + +def _validate_regex(data, valid_values=None): + try: + if re.match(valid_values, data): + return + except TypeError: + pass + + msg = _("'%s' is not a valid input") % data + LOG.debug(msg) + return msg + + +def _validate_regex_or_none(data, valid_values=None): + if data is None: + return + return _validate_regex(data, valid_values) + + +def _validate_uuid(data, valid_values=None): + if not uuidutils.is_uuid_like(data): + msg = _("'%s' is not a valid UUID") % data + LOG.debug(msg) + return msg + + +def _validate_uuid_or_none(data, valid_values=None): + if data is not None: + return _validate_uuid(data) + + +def _validate_uuid_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_uuid(item) + if msg: + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + +def _validate_dict_item(key, key_validator, data): + # Find conversion function, if any, and apply it + conv_func = key_validator.get('convert_to') + if conv_func: + data[key] = conv_func(data.get(key)) + # Find validator function + # TODO(salv-orlando): Structure of dict attributes should be improved + # to avoid iterating over items + val_func = val_params = None + for (k, v) in (key_validator).items(): + if k.startswith('type:'): + # ask forgiveness, not permission + try: + val_func = validators[k] + except KeyError: + return _("Validator '%s' does not exist.") % k + val_params = v + break + # Process validation + if val_func: + return val_func(data.get(key), val_params) + + +def _validate_dict(data, key_specs=None): + if not isinstance(data, dict): + msg = _("'%s' is not a dictionary") % data + LOG.debug(msg) + return msg + # Do not perform any further validation, if no constraints are supplied + if not key_specs: + return + + # Check whether all required keys are present + required_keys = [key for key, spec in (key_specs).items() + if spec.get('required')] + + if required_keys: + msg = _verify_dict_keys(required_keys, data, False) + if msg: + LOG.debug(msg) + return msg + + # Perform validation and conversion of all values + # according to the specifications. + for key, key_validator in [(k, v) for k, v in (key_specs).items() + if k in data]: + msg = _validate_dict_item(key, key_validator, data) + if msg: + LOG.debug(msg) + return msg + + +def _validate_dict_or_none(data, key_specs=None): + if data is not None: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_empty(data, key_specs=None): + if data != {}: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_nodata(data, key_specs=None): + if data: + return _validate_dict(data, key_specs) + + +def _validate_non_negative(data, valid_values=None): + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + + if data < 0: + msg = _("'%s' should be non-negative") % data + LOG.debug(msg) + return msg + + +def convert_to_boolean(data): + if isinstance(data, six.string_types): + val = data.lower() + if val == "true" or val == "1": + return True + if val == "false" or val == "0": + return False + elif isinstance(data, bool): + return data + elif isinstance(data, int): + if data == 0: + return False + elif data == 1: + return True + msg = _("'%s' cannot be converted to boolean") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_to_int(data): + try: + return int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_str_to_list(data): + """Convert a value of the form 'key=value' to ['key', 'value']. + + :raises n_exc.InvalidInput: if any of the strings are malformed + (e.g. do not contain a key). + """ + kvp = [x.strip() for x in data.split('=', 1)] + if len(kvp) == 2 and kvp[0]: + return kvp + msg = _("'%s' is not of the form =[value]") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_list_to_dict(kvp_list): + """Convert a list of 'key=value' strings to a dict. + + :raises n_exc.InvalidInput: if any of the strings are malformed + (e.g. do not contain a key) or if any + of the keys appear more than once. + """ + if kvp_list == ['True']: + # No values were provided (i.e. '--flag-name') + return {} + kvp_map = {} + for kvp_str in kvp_list: + key, value = convert_kvp_str_to_list(kvp_str) + kvp_map.setdefault(key, set()) + kvp_map[key].add(value) + return dict((x, list(y)) for x, y in (kvp_map).items()) + + +def convert_none_to_empty_list(value): + return [] if value is None else value + + +def convert_none_to_empty_dict(value): + return {} if value is None else value + + +def convert_to_list(data): + if data is None: + return [] + elif hasattr(data, '__iter__') and not isinstance(data, six.string_types): + return list(data) + else: + return [data] + + +HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]" + "{1,63}(? policy. + Otherwise, will strip off the last character for normal mappings, like + routers -> router. + """ + plural_mappings = {} + for plural in resource_map: + singular = special_mappings.get(plural, plural[:-1]) + plural_mappings[plural] = singular + return plural_mappings + + +def build_resource_info(plural_mappings, resource_map, which_service, + action_map=None, + translate_name=False, allow_bulk=False): + """Build resources for advanced services. + + Takes the resource information, and singular/plural mappings, and creates + API resource objects for advanced services extensions. Will optionally + translate underscores to dashes in resource names, register the resource, + and accept action information for resources. + + :param plural_mappings: mappings between singular and plural forms + :param resource_map: attribute map for the WSGI resources to create + :param which_service: The name of the service for which the WSGI resources + are being created. This name will be used to pass + the appropriate plugin to the WSGI resource. + It can be set to None or "CORE"to create WSGI + resources for the core plugin + :param action_map: custom resource actions + :param translate_name: replaces underscores with dashes + :param allow_bulk: True if bulk create are allowed + """ + resources = [] + if not which_service: + which_service = constants.CORE + action_map = action_map or {} + plugin = manager.ApmecManager.get_service_plugins()[which_service] + for collection_name in resource_map: + resource_name = plural_mappings[collection_name] + params = resource_map.get(collection_name, {}) + if translate_name: + collection_name = collection_name.replace('_', '-') + member_actions = action_map.get(resource_name, {}) + controller = base.create_resource( + collection_name, resource_name, plugin, params, + member_actions=member_actions, + allow_bulk=allow_bulk, + allow_pagination=cfg.CONF.allow_pagination, + allow_sorting=cfg.CONF.allow_sorting) + resource = extensions.ResourceExtension( + collection_name, + controller, + path_prefix=constants.COMMON_PREFIXES[which_service], + member_actions=member_actions, + attr_map=params) + resources.append(resource) + return resources diff --git a/apmec/api/v1/router.py b/apmec/api/v1/router.py new file mode 100644 index 0000000..6fa14f8 --- /dev/null +++ b/apmec/api/v1/router.py @@ -0,0 +1,60 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import routes as routes_mapper +import six.moves.urllib.parse as urlparse +import webob +import webob.dec +import webob.exc + +from apmec.api import extensions +from apmec.api.v1 import attributes +from apmec import wsgi + + +class Index(wsgi.Application): + def __init__(self, resources): + self.resources = resources + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + metadata = {} + + layout = [] + for name, collection in (self.resources).items(): + href = urlparse.urljoin(req.path_url, collection) + resource = {'name': name, + 'collection': collection, + 'links': [{'rel': 'self', + 'href': href}]} + layout.append(resource) + response = dict(resources=layout) + content_type = req.best_match_content_type() + body = wsgi.Serializer(metadata=metadata).serialize(response, + content_type) + return webob.Response(body=body, content_type=content_type) + + +class APIRouter(wsgi.Router): + + @classmethod + def factory(cls, global_config, **local_config): + return cls(**local_config) + + def __init__(self, **local_config): + mapper = routes_mapper.Mapper() + ext_mgr = extensions.ExtensionManager.get_instance() + ext_mgr.extend_resources("1.0", attributes.RESOURCE_ATTRIBUTE_MAP) + super(APIRouter, self).__init__(mapper) diff --git a/apmec/api/versions.py b/apmec/api/versions.py new file mode 100644 index 0000000..543b3f4 --- /dev/null +++ b/apmec/api/versions.py @@ -0,0 +1,59 @@ +# Copyright 2011 Citrix Systems. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.dec + +import oslo_i18n + +from apmec.api.views import versions as versions_view +from apmec import wsgi + + +class Versions(object): + + @classmethod + def factory(cls, global_config, **local_config): + return cls() + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Respond to a request for all Apmec API versions.""" + version_objs = [ + { + "id": "v1.0", + "status": "CURRENT", + }, + ] + + if req.path != '/': + language = req.best_match_language() + msg = _('Unknown API version specified') + msg = oslo_i18n.translate(msg, language) + return webob.exc.HTTPNotFound(explanation=msg) + + builder = versions_view.get_view_builder(req) + versions = [builder.build(version) for version in version_objs] + response = dict(versions=versions) + metadata = {} + + content_type = req.best_match_content_type() + body = (wsgi.Serializer(metadata=metadata). + serialize(response, content_type)) + + response = webob.Response() + response.content_type = content_type + response.body = body + + return response diff --git a/apmec/api/views/__init__.py b/apmec/api/views/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/api/views/versions.py b/apmec/api/views/versions.py new file mode 100644 index 0000000..d097bb7 --- /dev/null +++ b/apmec/api/views/versions.py @@ -0,0 +1,58 @@ +# Copyright 2010-2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + + def __init__(self, base_url): + """Object initialization. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, version_data): + """Generic method used to generate a version entity.""" + version = { + "id": version_data["id"], + "status": version_data["status"], + "links": self._build_links(version_data), + } + + return version + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href(version_data["id"]) + + links = [ + { + "rel": "self", + "href": href, + }, + ] + + return links + + def generate_href(self, version_number): + """Create an url that refers to a specific version_number.""" + return os.path.join(self.base_url, version_number) diff --git a/apmec/auth.py b/apmec/auth.py new file mode 100644 index 0000000..6e2db1b --- /dev/null +++ b/apmec/auth.py @@ -0,0 +1,75 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_middleware import request_id +import webob.dec +import webob.exc + +from apmec import context +from apmec import wsgi + +LOG = logging.getLogger(__name__) + + +class ApmecKeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers.""" + + @webob.dec.wsgify + def __call__(self, req): + # Determine the user ID + user_id = req.headers.get('X_USER_ID') + if not user_id: + LOG.debug("X_USER_ID is not found in request") + return webob.exc.HTTPUnauthorized() + + # Determine the tenant + tenant_id = req.headers.get('X_PROJECT_ID') + + # Suck out the roles + roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')] + + # Human-friendly names + tenant_name = req.headers.get('X_PROJECT_NAME') + user_name = req.headers.get('X_USER_NAME') + + # Use request_id if already set + req_id = req.environ.get(request_id.ENV_REQUEST_ID) + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Create a context with the authentication data + ctx = context.Context(user_id, tenant_id, roles=roles, + user_name=user_name, tenant_name=tenant_name, + request_id=req_id, auth_token=auth_token) + + # Inject the context... + req.environ['apmec.context'] = ctx + + return self.application + + +def pipeline_factory(loader, global_conf, **local_conf): + """Create a paste pipeline based on the 'auth_strategy' config option.""" + pipeline = local_conf[cfg.CONF.auth_strategy] + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for f in filters: + app = f(app) + return app diff --git a/apmec/catalogs/__init__.py b/apmec/catalogs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/catalogs/tosca/__init__.py b/apmec/catalogs/tosca/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/catalogs/tosca/lib/apmec_defs.yaml b/apmec/catalogs/tosca/lib/apmec_defs.yaml new file mode 100644 index 0000000..1f0a9d0 --- /dev/null +++ b/apmec/catalogs/tosca/lib/apmec_defs.yaml @@ -0,0 +1,192 @@ +data_types: + tosca.datatypes.apmec.ActionMap: + properties: + trigger: + type: string + required: true + action: + type: string + required: true + params: + type: map + entry_schema: + type: string + required: false + + tosca.datatypes.apmec.MonitoringParams: + properties: + monitoring_delay: + type: int + required: false + count: + type: int + required: false + interval: + type: int + required: false + timeout: + type: int + required: false + retry: + type: int + required: false + port: + type: int + required: false + + tosca.datatypes.apmec.MonitoringType: + properties: + name: + type: string + required: true + actions: + type: map + required: true + parameters: + type: tosca.datatypes.apmec.MonitoringParams + required: false + + tosca.datatypes.compute_properties: + properties: + num_cpus: + type: integer + required: false + mem_size: + type: string + required: false + disk_size: + type: string + required: false + mem_page_size: + type: string + required: false + numa_node_count: + type: integer + constraints: + - greater_or_equal: 2 + required: false + numa_nodes: + type: map + required: false + cpu_allocation: + type: map + required: false + + tosca.datatypes.apmec.VirtualIP: + properties: + ip_address: + type: string + required: true + description: The virtual IP address allowed to be paired with. + mac_address: + type: string + required: false + description: The mac address allowed to be paired with specific virtual IP. + +policy_types: + tosca.policies.apmec.Placement: + derived_from: tosca.policies.Root + + tosca.policies.apmec.Failure: + derived_from: tosca.policies.Root + action: + type: string + + tosca.policies.apmec.Failure.Respawn: + derived_from: tosca.policies.apmec.Failure + action: respawn + + tosca.policies.apmec.Failure.Terminate: + derived_from: tosca.policies.apmec.Failure + action: log_and_kill + + tosca.policies.apmec.Failure.Log: + derived_from: tosca.policies.apmec.Failure + action: log + + tosca.policies.apmec.Monitoring: + derived_from: tosca.policies.Root + properties: + name: + type: string + required: true + parameters: + type: map + entry_schema: + type: string + required: false + actions: + type: map + entry_schema: + type: string + required: true + + tosca.policies.apmec.Monitoring.NoOp: + derived_from: tosca.policies.apmec.Monitoring + properties: + name: noop + + tosca.policies.apmec.Monitoring.Ping: + derived_from: tosca.policies.apmec.Monitoring + properties: + name: ping + + tosca.policies.apmec.Monitoring.HttpPing: + derived_from: tosca.policies.apmec.Monitoring.Ping + properties: + name: http-ping + + tosca.policies.apmec.Alarming: + derived_from: tosca.policies.Monitoring + triggers: + resize_compute: + event_type: + type: map + entry_schema: + type: string + required: true + metrics: + type: string + required: true + condition: + type: map + entry_schema: + type: string + required: false + action: + type: map + entry_schema: + type: string + required: true + + tosca.policies.apmec.Scaling: + derived_from: tosca.policies.Scaling + description: Defines policy for scaling the given targets. + properties: + increment: + type: integer + required: true + description: Number of nodes to add or remove during the scale out/in. + targets: + type: list + entry_schema: + type: string + required: true + description: List of Scaling nodes. + min_instances: + type: integer + required: true + description: Minimum number of instances to scale in. + max_instances: + type: integer + required: true + description: Maximum number of instances to scale out. + default_instances: + type: integer + required: true + description: Initial number of instances. + cooldown: + type: integer + required: false + default: 120 + description: Wait time (in seconds) between consecutive scaling operations. During the cooldown period, scaling action will be ignored diff --git a/apmec/catalogs/tosca/lib/apmec_mec_defs.yaml b/apmec/catalogs/tosca/lib/apmec_mec_defs.yaml new file mode 100644 index 0000000..efc4c84 --- /dev/null +++ b/apmec/catalogs/tosca/lib/apmec_mec_defs.yaml @@ -0,0 +1,274 @@ +data_types: + tosca.mec.datatypes.pathType: + properties: + forwarder: + type: string + required: true + capability: + type: string + required: true + + tosca.mec.datatypes.aclType: + properties: + eth_type: + type: string + required: false + eth_src: + type: string + required: false + eth_dst: + type: string + required: false + vlan_id: + type: integer + constraints: + - in_range: [ 1, 4094 ] + required: false + vlan_pcp: + type: integer + constraints: + - in_range: [ 0, 7 ] + required: false + mpls_label: + type: integer + constraints: + - in_range: [ 16, 1048575] + required: false + mpls_tc: + type: integer + constraints: + - in_range: [ 0, 7 ] + required: false + ip_dscp: + type: integer + constraints: + - in_range: [ 0, 63 ] + required: false + ip_ecn: + type: integer + constraints: + - in_range: [ 0, 3 ] + required: false + ip_src_prefix: + type: string + required: false + ip_dst_prefix: + type: string + required: false + ip_proto: + type: integer + constraints: + - in_range: [ 1, 254 ] + required: false + destination_port_range: + type: string + required: false + source_port_range: + type: string + required: false + network_src_port_id: + type: string + required: false + network_dst_port_id: + type: string + required: false + network_id: + type: string + required: false + network_name: + type: string + required: false + tenant_id: + type: string + required: false + icmpv4_type: + type: integer + constraints: + - in_range: [ 0, 254 ] + required: false + icmpv4_code: + type: integer + constraints: + - in_range: [ 0, 15 ] + required: false + arp_op: + type: integer + constraints: + - in_range: [ 1, 25 ] + required: false + arp_spa: + type: string + required: false + arp_tpa: + type: string + required: false + arp_sha: + type: string + required: false + arp_tha: + type: string + required: false + ipv6_src: + type: string + required: false + ipv6_dst: + type: string + required: false + ipv6_flabel: + type: integer + constraints: + - in_range: [ 0, 1048575] + required: false + icmpv6_type: + type: integer + constraints: + - in_range: [ 0, 255] + required: false + icmpv6_code: + type: integer + constraints: + - in_range: [ 0, 7] + required: false + ipv6_nd_target: + type: string + required: false + ipv6_nd_sll: + type: string + required: false + ipv6_nd_tll: + type: string + required: false + + tosca.mec.datatypes.policyType: + properties: + type: + type: string + required: false + constraints: + - valid_values: [ ACL ] + criteria: + type: list + required: true + entry_schema: + type: tosca.mec.datatypes.aclType + +node_types: + tosca.nodes.mec.VDU.Apmec: + derived_from: tosca.nodes.mec.VDU + capabilities: + mec_compute: + type: tosca.datatypes.compute_properties + properties: + name: + type: string + required: false + image: +# type: tosca.artifacts.Deployment.Image.VM + type: string + required: false + flavor: + type: string + required: false + availability_zone: + type: string + required: false + metadata: + type: map + entry_schema: + type: string + required: false + config_drive: + type: boolean + default: false + required: false + + placement_policy: +# type: tosca.policies.apmec.Placement + type: string + required: false + + monitoring_policy: +# type: tosca.policies.apmec.Monitoring +# type: tosca.datatypes.apmec.MonitoringType + type: map + required: false + + config: + type: string + required: false + + mgmt_driver: + type: string + default: noop + required: false + + service_type: + type: string + required: false + + user_data: + type: string + required: false + + user_data_format: + type: string + required: false + + key_name: + type: string + required: false + + tosca.nodes.mec.CP.Apmec: + derived_from: tosca.nodes.mec.CP + properties: + mac_address: + type: string + required: false + name: + type: string + required: false + management: + type: boolean + required: false + anti_spoofing_protection: + type: boolean + required: false + allowed_address_pairs: + type: list + entry_schema: + type: tosca.datatypes.apmec.VirtualIP + required: false + security_groups: + type: list + required: false + type: + type: string + required: false + constraints: + - valid_values: [ sriov, vnic ] + + tosca.nodes.mec.MEAC.Apmec: + derived_from: tosca.nodes.SoftwareComponent + requirements: + - host: + node: tosca.nodes.mec.VDU.Apmec + relationship: tosca.relationships.HostedOn + + tosca.nodes.BlockStorage.Apmec: + derived_from: tosca.nodes.BlockStorage + properties: + image: + type: string + required: false + + tosca.nodes.BlockStorageAttachment: + derived_from: tosca.nodes.Root + properties: + location: + type: string + required: true + requirements: + - virtualBinding: + node: tosca.nodes.mec.VDU.Apmec + - virtualAttachment: + node: tosca.nodes.BlockStorage.Apmec diff --git a/apmec/catalogs/tosca/utils.py b/apmec/catalogs/tosca/utils.py new file mode 100644 index 0000000..9f75fec --- /dev/null +++ b/apmec/catalogs/tosca/utils.py @@ -0,0 +1,687 @@ +# Copyright 2016 - Nokia +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import os +import re +import sys +import yaml + +from oslo_log import log as logging +from toscaparser import properties +from toscaparser.utils import yamlparser + +from apmec.common import log +from apmec.common import utils +from apmec.extensions import mem + +from collections import OrderedDict + +FAILURE = 'tosca.policies.apmec.Failure' +LOG = logging.getLogger(__name__) +MONITORING = 'tosca.policies.Monitoring' +SCALING = 'tosca.policies.Scaling' +PLACEMENT = 'tosca.policies.apmec.Placement' +APMECCP = 'tosca.nodes.mec.CP.Apmec' +APMECVDU = 'tosca.nodes.mec.VDU.Apmec' +BLOCKSTORAGE = 'tosca.nodes.BlockStorage.Apmec' +BLOCKSTORAGE_ATTACHMENT = 'tosca.nodes.BlockStorageAttachment' +TOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo' +VDU = 'tosca.nodes.mec.VDU' +IMAGE = 'tosca.artifacts.Deployment.Image.VM' +HEAT_SOFTWARE_CONFIG = 'OS::Heat::SoftwareConfig' +OS_RESOURCES = { + 'flavor': 'get_flavor_dict', + 'image': 'get_image_dict' +} + +FLAVOR_PROPS = { + "num_cpus": ("vcpus", 1, None), + "disk_size": ("disk", 1, "GB"), + "mem_size": ("ram", 512, "MB") +} + +CPU_PROP_MAP = (('hw:cpu_policy', 'cpu_affinity'), + ('hw:cpu_threads_policy', 'thread_allocation'), + ('hw:cpu_sockets', 'socket_count'), + ('hw:cpu_threads', 'thread_count'), + ('hw:cpu_cores', 'core_count')) + +CPU_PROP_KEY_SET = {'cpu_affinity', 'thread_allocation', 'socket_count', + 'thread_count', 'core_count'} + +FLAVOR_EXTRA_SPECS_LIST = ('cpu_allocation', + 'mem_page_size', + 'numa_node_count', + 'numa_nodes') + +delpropmap = {APMECVDU: ('mgmt_driver', 'config', 'service_type', + 'placement_policy', 'monitoring_policy', + 'metadata', 'failure_policy'), + APMECCP: ('management',)} + +convert_prop = {APMECCP: {'anti_spoofing_protection': + 'port_security_enabled', + 'type': + 'binding:vnic_type'}} + +convert_prop_values = {APMECCP: {'type': {'sriov': 'direct', + 'vnic': 'normal'}}} + +deletenodes = (MONITORING, FAILURE, PLACEMENT) + +HEAT_RESOURCE_MAP = { + "flavor": "OS::Nova::Flavor", + "image": "OS::Glance::Image" +} + +SCALE_GROUP_RESOURCE = "OS::Heat::AutoScalingGroup" +SCALE_POLICY_RESOURCE = "OS::Heat::ScalingPolicy" + + +@log.log +def updateimports(template): + path = os.path.dirname(os.path.abspath(__file__)) + '/lib/' + defsfile = path + 'apmec_defs.yaml' + + if 'imports' in template: + template['imports'].append(defsfile) + else: + template['imports'] = [defsfile] + + if 'mec' in template['tosca_definitions_version']: + mecfile = path + 'apmec_mec_defs.yaml' + + template['imports'].append(mecfile) + + LOG.debug(path) + + +@log.log +def check_for_substitution_mappings(template, params): + sm_dict = params.get('substitution_mappings', {}) + requirements = sm_dict.get('requirements') + node_tpl = template['topology_template']['node_templates'] + req_dict_tpl = template['topology_template']['substitution_mappings'].get( + 'requirements') + # Check if substitution_mappings and requirements are empty in params but + # not in template. If True raise exception + if (not sm_dict or not requirements) and req_dict_tpl: + raise mem.InvalidParamsForSM() + # Check if requirements are present for SM in template, if True then return + elif (not sm_dict or not requirements) and not req_dict_tpl: + return + del params['substitution_mappings'] + for req_name, req_val in (req_dict_tpl).items(): + if req_name not in requirements: + raise mem.SMRequirementMissing(requirement=req_name) + if not isinstance(req_val, list): + raise mem.InvalidSubstitutionMapping(requirement=req_name) + try: + node_name = req_val[0] + node_req = req_val[1] + + node_tpl[node_name]['requirements'].append({ + node_req: { + 'node': requirements[req_name] + } + }) + node_tpl[requirements[req_name]] = \ + sm_dict[requirements[req_name]] + except Exception: + raise mem.InvalidSubstitutionMapping(requirement=req_name) + + +@log.log +def get_vdu_monitoring(template): + monitoring_dict = dict() + policy_dict = dict() + policy_dict['vdus'] = collections.OrderedDict() + for nt in template.nodetemplates: + if nt.type_definition.is_derived_from(APMECVDU): + mon_policy = nt.get_property_value('monitoring_policy') or 'noop' + if mon_policy != 'noop': + if 'parameters' in mon_policy: + mon_policy['monitoring_params'] = mon_policy['parameters'] + policy_dict['vdus'][nt.name] = {} + policy_dict['vdus'][nt.name][mon_policy['name']] = mon_policy + if policy_dict.get('vdus'): + monitoring_dict = policy_dict + return monitoring_dict + + +@log.log +def get_vdu_metadata(template): + metadata = dict() + metadata.setdefault('vdus', {}) + for nt in template.nodetemplates: + if nt.type_definition.is_derived_from(APMECVDU): + metadata_dict = nt.get_property_value('metadata') or None + if metadata_dict: + metadata['vdus'][nt.name] = {} + metadata['vdus'][nt.name].update(metadata_dict) + return metadata + + +@log.log +def pre_process_alarm_resources(mea, template, vdu_metadata): + alarm_resources = dict() + matching_metadata = dict() + alarm_actions = dict() + for policy in template.policies: + if (policy.type_definition.is_derived_from(MONITORING)): + matching_metadata =\ + _process_matching_metadata(vdu_metadata, policy) + alarm_actions = _process_alarm_actions(mea, policy) + alarm_resources['matching_metadata'] = matching_metadata + alarm_resources['alarm_actions'] = alarm_actions + return alarm_resources + + +def _process_matching_metadata(metadata, policy): + matching_mtdata = dict() + triggers = policy.entity_tpl['triggers'] + for trigger_name, trigger_dict in triggers.items(): + if not (trigger_dict.get('metadata') and metadata): + raise mem.MetadataNotMatched() + is_matched = False + for vdu_name, metadata_dict in metadata['vdus'].items(): + if trigger_dict['metadata'] ==\ + metadata_dict['metering.mea']: + is_matched = True + if not is_matched: + raise mem.MetadataNotMatched() + matching_mtdata[trigger_name] = dict() + matching_mtdata[trigger_name]['metadata.user_metadata.mea'] =\ + trigger_dict['metadata'] + return matching_mtdata + + +def _process_alarm_actions(mea, policy): + # process alarm url here + triggers = policy.entity_tpl['triggers'] + alarm_actions = dict() + for trigger_name, trigger_dict in triggers.items(): + alarm_url = mea['attributes'].get(trigger_name) + if alarm_url: + alarm_url = str(alarm_url) + LOG.debug('Alarm url in heat %s', alarm_url) + alarm_actions[trigger_name] = dict() + alarm_actions[trigger_name]['alarm_actions'] = [alarm_url] + return alarm_actions + + +def get_volumes(template): + volume_dict = dict() + node_tpl = template['topology_template']['node_templates'] + for node_name in list(node_tpl.keys()): + node_value = node_tpl[node_name] + if node_value['type'] != BLOCKSTORAGE: + continue + volume_dict[node_name] = dict() + block_properties = node_value.get('properties', {}) + for prop_name, prop_value in block_properties.items(): + if prop_name == 'size': + prop_value = \ + re.compile('(\d+)\s*(\w+)').match(prop_value).groups()[0] + volume_dict[node_name][prop_name] = prop_value + del node_tpl[node_name] + return volume_dict + + +@log.log +def get_vol_attachments(template): + vol_attach_dict = dict() + node_tpl = template['topology_template']['node_templates'] + valid_properties = { + 'location': 'mountpoint' + } + for node_name in list(node_tpl.keys()): + node_value = node_tpl[node_name] + if node_value['type'] != BLOCKSTORAGE_ATTACHMENT: + continue + vol_attach_dict[node_name] = dict() + vol_attach_properties = node_value.get('properties', {}) + # parse properties + for prop_name, prop_value in vol_attach_properties.items(): + if prop_name in valid_properties: + vol_attach_dict[node_name][valid_properties[prop_name]] = \ + prop_value + # parse requirements to get mapping of cinder volume <-> Nova instance + for req in node_value.get('requirements', {}): + if 'virtualBinding' in req: + vol_attach_dict[node_name]['instance_uuid'] = \ + {'get_resource': req['virtualBinding']['node']} + elif 'virtualAttachment' in req: + vol_attach_dict[node_name]['volume_id'] = \ + {'get_resource': req['virtualAttachment']['node']} + del node_tpl[node_name] + return vol_attach_dict + + +@log.log +def get_block_storage_details(template): + block_storage_details = dict() + block_storage_details['volumes'] = get_volumes(template) + block_storage_details['volume_attachments'] = get_vol_attachments(template) + return block_storage_details + + +@log.log +def get_mgmt_ports(tosca): + mgmt_ports = {} + for nt in tosca.nodetemplates: + if nt.type_definition.is_derived_from(APMECCP): + mgmt = nt.get_property_value('management') or None + if mgmt: + vdu = None + for rel, node in nt.relationships.items(): + if rel.is_derived_from(TOSCA_BINDS_TO): + vdu = node.name + break + + if vdu is not None: + name = 'mgmt_ip-%s' % vdu + mgmt_ports[name] = nt.name + LOG.debug('mgmt_ports: %s', mgmt_ports) + return mgmt_ports + + +@log.log +def add_resources_tpl(heat_dict, hot_res_tpl): + for res, res_dict in (hot_res_tpl).items(): + for vdu, vdu_dict in (res_dict).items(): + res_name = vdu + "_" + res + heat_dict["resources"][res_name] = { + "type": HEAT_RESOURCE_MAP[res], + "properties": {} + } + + for prop, val in (vdu_dict).items(): + heat_dict["resources"][res_name]["properties"][prop] = val + if heat_dict["resources"].get(vdu): + heat_dict["resources"][vdu]["properties"][res] = { + "get_resource": res_name + } + + +@log.log +def convert_unsupported_res_prop(heat_dict, unsupported_res_prop): + res_dict = heat_dict['resources'] + + for res, attr in (res_dict).items(): + res_type = attr['type'] + if res_type in unsupported_res_prop: + prop_dict = attr['properties'] + unsupported_prop_dict = unsupported_res_prop[res_type] + unsupported_prop = set(prop_dict.keys()) & set( + unsupported_prop_dict.keys()) + for prop in unsupported_prop: + # some properties are just punted to 'value_specs' + # property if they are incompatible + new_prop = unsupported_prop_dict[prop] + if new_prop == 'value_specs': + prop_dict.setdefault(new_prop, {})[ + prop] = prop_dict.pop(prop) + else: + prop_dict[new_prop] = prop_dict.pop(prop) + + +@log.log +def represent_odict(dump, tag, mapping, flow_style=None): + value = [] + node = yaml.MappingNode(tag, value, flow_style=flow_style) + if dump.alias_key is not None: + dump.represented_objects[dump.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + for item_key, item_value in mapping: + node_key = dump.represent_data(item_key) + node_value = dump.represent_data(item_value) + if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, yaml.ScalarNode) + and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if dump.default_flow_style is not None: + node.flow_style = dump.default_flow_style + else: + node.flow_style = best_style + return node + + +@log.log +def post_process_heat_template(heat_tpl, mgmt_ports, metadata, + alarm_resources, res_tpl, + vol_res={}, unsupported_res_prop=None): + # + # TODO(bobh) - remove when heat-translator can support literal strings. + # + def fix_user_data(user_data_string): + user_data_string = re.sub('user_data: #', 'user_data: |\n #', + user_data_string, re.MULTILINE) + return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) + + heat_tpl = fix_user_data(heat_tpl) + # + # End temporary workaround for heat-translator + # + heat_dict = yamlparser.simple_ordered_parse(heat_tpl) + for outputname, portname in mgmt_ports.items(): + ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} + output = {outputname: {'value': ipval}} + if 'outputs' in heat_dict: + heat_dict['outputs'].update(output) + else: + heat_dict['outputs'] = output + LOG.debug('Added output for %s', outputname) + if metadata: + for vdu_name, metadata_dict in metadata['vdus'].items(): + if heat_dict['resources'].get(vdu_name): + heat_dict['resources'][vdu_name]['properties']['metadata'] =\ + metadata_dict + matching_metadata = alarm_resources.get('matching_metadata') + alarm_actions = alarm_resources.get('alarm_actions') + if matching_metadata: + for trigger_name, matching_metadata_dict in matching_metadata.items(): + if heat_dict['resources'].get(trigger_name): + matching_mtdata = dict() + matching_mtdata['matching_metadata'] =\ + matching_metadata[trigger_name] + heat_dict['resources'][trigger_name]['properties'].\ + update(matching_mtdata) + if alarm_actions: + for trigger_name, alarm_actions_dict in alarm_actions.items(): + if heat_dict['resources'].get(trigger_name): + heat_dict['resources'][trigger_name]['properties']. \ + update(alarm_actions_dict) + + add_resources_tpl(heat_dict, res_tpl) + for res in heat_dict["resources"].values(): + if not res['type'] == HEAT_SOFTWARE_CONFIG: + continue + config = res["properties"]["config"] + if 'get_file' in config: + res["properties"]["config"] = open(config["get_file"]).read() + + if vol_res.get('volumes'): + add_volume_resources(heat_dict, vol_res) + if unsupported_res_prop: + convert_unsupported_res_prop(heat_dict, unsupported_res_prop) + + yaml.SafeDumper.add_representer(OrderedDict, + lambda dumper, value: represent_odict(dumper, + u'tag:yaml.org,2002:map', value)) + + return yaml.safe_dump(heat_dict) + + +@log.log +def add_volume_resources(heat_dict, vol_res): + # Add cinder volumes + for res_name, cinder_vol in vol_res['volumes'].items(): + heat_dict['resources'][res_name] = { + 'type': 'OS::Cinder::Volume', + 'properties': {} + } + for prop_name, prop_val in cinder_vol.items(): + heat_dict['resources'][res_name]['properties'][prop_name] = \ + prop_val + # Add cinder volume attachments + for res_name, cinder_vol in vol_res['volume_attachments'].items(): + heat_dict['resources'][res_name] = { + 'type': 'OS::Cinder::VolumeAttachment', + 'properties': {} + } + for prop_name, prop_val in cinder_vol.items(): + heat_dict['resources'][res_name]['properties'][prop_name] = \ + prop_val + + +@log.log +def post_process_template(template): + for nt in template.nodetemplates: + if (nt.type_definition.is_derived_from(MONITORING) or + nt.type_definition.is_derived_from(FAILURE) or + nt.type_definition.is_derived_from(PLACEMENT)): + template.nodetemplates.remove(nt) + continue + + if nt.type in delpropmap.keys(): + for prop in delpropmap[nt.type]: + for p in nt.get_properties_objects(): + if prop == p.name: + nt.get_properties_objects().remove(p) + + # change the property value first before the property key + if nt.type in convert_prop_values: + for prop in convert_prop_values[nt.type].keys(): + for p in nt.get_properties_objects(): + if (prop == p.name and + p.value in + convert_prop_values[nt.type][prop].keys()): + v = convert_prop_values[nt.type][prop][p.value] + p.value = v + + if nt.type in convert_prop: + for prop in convert_prop[nt.type].keys(): + for p in nt.get_properties_objects(): + if prop == p.name: + schema_dict = {'type': p.type} + v = nt.get_property_value(p.name) + newprop = properties.Property( + convert_prop[nt.type][prop], v, schema_dict) + nt.get_properties_objects().append(newprop) + nt.get_properties_objects().remove(p) + + +@log.log +def get_mgmt_driver(template): + mgmt_driver = None + for nt in template.nodetemplates: + if nt.type_definition.is_derived_from(APMECVDU): + if (mgmt_driver and nt.get_property_value('mgmt_driver') != + mgmt_driver): + raise mem.MultipleMGMTDriversSpecified() + else: + mgmt_driver = nt.get_property_value('mgmt_driver') + + return mgmt_driver + + +def findvdus(template): + vdus = [] + for nt in template.nodetemplates: + if nt.type_definition.is_derived_from(APMECVDU): + vdus.append(nt) + return vdus + + +def get_flavor_dict(template, flavor_extra_input=None): + flavor_dict = {} + vdus = findvdus(template) + for nt in vdus: + flavor_tmp = nt.get_properties().get('flavor') + if flavor_tmp: + continue + if nt.get_capabilities().get("mec_compute"): + flavor_dict[nt.name] = {} + properties = nt.get_capabilities()["mec_compute"].get_properties() + for prop, (hot_prop, default, unit) in \ + (FLAVOR_PROPS).items(): + hot_prop_val = (properties[prop].value + if properties.get(prop, None) else None) + if unit and hot_prop_val: + hot_prop_val = \ + utils.change_memory_unit(hot_prop_val, unit) + flavor_dict[nt.name][hot_prop] = \ + hot_prop_val if hot_prop_val else default + if any(p in properties for p in FLAVOR_EXTRA_SPECS_LIST): + flavor_dict[nt.name]['extra_specs'] = {} + es_dict = flavor_dict[nt.name]['extra_specs'] + populate_flavor_extra_specs(es_dict, properties, + flavor_extra_input) + return flavor_dict + + +def populate_flavor_extra_specs(es_dict, properties, flavor_extra_input): + if 'mem_page_size' in properties: + mval = properties['mem_page_size'].value + if str(mval).isdigit(): + mval = mval * 1024 + elif mval not in ('small', 'large', 'any'): + raise mem.HugePageSizeInvalidInput( + error_msg_details=(mval + ":Invalid Input")) + es_dict['hw:mem_page_size'] = mval + if 'numa_nodes' in properties and 'numa_node_count' in properties: + LOG.warning('Both numa_nodes and numa_node_count have been' + 'specified; numa_node definitions will be ignored and' + 'numa_node_count will be applied') + if 'numa_node_count' in properties: + es_dict['hw:numa_nodes'] = \ + properties['numa_node_count'].value + if 'numa_nodes' in properties and 'numa_node_count' not in properties: + nodes_dict = dict(properties['numa_nodes'].value) + dval = list(nodes_dict.values()) + ncount = 0 + for ndict in dval: + invalid_input = set(ndict.keys()) - {'id', 'vcpus', 'mem_size'} + if invalid_input: + raise mem.NumaNodesInvalidKeys( + error_msg_details=(', '.join(invalid_input)), + valid_keys="id, vcpus and mem_size") + if 'id' in ndict and 'vcpus' in ndict: + vk = "hw:numa_cpus." + str(ndict['id']) + vval = ",".join([str(x) for x in ndict['vcpus']]) + es_dict[vk] = vval + if 'id' in ndict and 'mem_size' in ndict: + mk = "hw:numa_mem." + str(ndict['id']) + es_dict[mk] = ndict['mem_size'] + ncount += 1 + es_dict['hw:numa_nodes'] = ncount + if 'cpu_allocation' in properties: + cpu_dict = dict(properties['cpu_allocation'].value) + invalid_input = set(cpu_dict.keys()) - CPU_PROP_KEY_SET + if invalid_input: + raise mem.CpuAllocationInvalidKeys( + error_msg_details=(', '.join(invalid_input)), + valid_keys=(', '.join(CPU_PROP_KEY_SET))) + for(k, v) in CPU_PROP_MAP: + if v in cpu_dict: + es_dict[k] = cpu_dict[v] + if flavor_extra_input: + es_dict.update(flavor_extra_input) + + +def get_image_dict(template): + image_dict = {} + vdus = findvdus(template) + for vdu in vdus: + if not vdu.entity_tpl.get("artifacts"): + continue + artifacts = vdu.entity_tpl["artifacts"] + for name, artifact in (artifacts).items(): + if ('type' in artifact.keys() and + artifact["type"] == IMAGE): + if 'file' not in artifact.keys(): + raise mem.FilePathMissing() + image_dict[vdu.name] = { + "location": artifact["file"], + "container_format": "bare", + "disk_format": "raw", + "name": name + } + return image_dict + + +def get_resources_dict(template, flavor_extra_input=None): + res_dict = dict() + for res, method in (OS_RESOURCES).items(): + res_method = getattr(sys.modules[__name__], method) + if res is 'flavor': + res_dict[res] = res_method(template, flavor_extra_input) + else: + res_dict[res] = res_method(template) + return res_dict + + +@log.log +def get_scaling_policy(template): + scaling_policy_names = list() + for policy in template.policies: + if (policy.type_definition.is_derived_from(SCALING)): + scaling_policy_names.append(policy.name) + return scaling_policy_names + + +@log.log +def get_scaling_group_dict(ht_template, scaling_policy_names): + scaling_group_dict = dict() + scaling_group_names = list() + heat_dict = yamlparser.simple_ordered_parse(ht_template) + for resource_name, resource_dict in heat_dict['resources'].items(): + if resource_dict['type'] == SCALE_GROUP_RESOURCE: + scaling_group_names.append(resource_name) + if scaling_group_names: + scaling_group_dict[scaling_policy_names[0]] = scaling_group_names[0] + return scaling_group_dict + + +def get_nested_resources_name(template): + for policy in template.policies: + if (policy.type_definition.is_derived_from(SCALING)): + nested_resource_name = policy.name + '_res.yaml' + return nested_resource_name + + +def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata, + res_tpl, unsupported_res_prop=None): + nested_tpl = dict() + if nested_resources: + nested_resource_name, nested_resources_yaml =\ + list(nested_resources.items())[0] + nested_resources_dict =\ + yamlparser.simple_ordered_parse(nested_resources_yaml) + if metadata: + for vdu_name, metadata_dict in metadata['vdus'].items(): + nested_resources_dict['resources'][vdu_name]['properties']['metadata'] = \ + metadata_dict + add_resources_tpl(nested_resources_dict, res_tpl) + for res in nested_resources_dict["resources"].values(): + if not res['type'] == HEAT_SOFTWARE_CONFIG: + continue + config = res["properties"]["config"] + if 'get_file' in config: + res["properties"]["config"] = open(config["get_file"]).read() + + if unsupported_res_prop: + convert_unsupported_res_prop(nested_resources_dict, + unsupported_res_prop) + + for outputname, portname in mgmt_ports.items(): + ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} + output = {outputname: {'value': ipval}} + if 'outputs' in nested_resources_dict: + nested_resources_dict['outputs'].update(output) + else: + nested_resources_dict['outputs'] = output + LOG.debug(_('Added output for %s'), outputname) + yaml.SafeDumper.add_representer( + OrderedDict, lambda dumper, value: represent_odict( + dumper, u'tag:yaml.org,2002:map', value)) + nested_tpl[nested_resource_name] =\ + yaml.safe_dump(nested_resources_dict) + return nested_tpl diff --git a/apmec/cmd/__init__.py b/apmec/cmd/__init__.py new file mode 100644 index 0000000..7e16e4c --- /dev/null +++ b/apmec/cmd/__init__.py @@ -0,0 +1,28 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging as sys_logging + +from oslo_reports import guru_meditation_report as gmr + +from apmec import version + +# During the call to gmr.TextGuruMeditation.setup_autorun(), Guru Meditation +# Report tries to start logging. Set a handler here to accommodate this. +logger = sys_logging.getLogger(None) +if not logger.handlers: + logger.addHandler(sys_logging.StreamHandler()) + +_version_string = version.version_info.release_string() +gmr.TextGuruMeditation.setup_autorun(version=_version_string) diff --git a/apmec/cmd/eventlet/__init__.py b/apmec/cmd/eventlet/__init__.py new file mode 100644 index 0000000..84086c5 --- /dev/null +++ b/apmec/cmd/eventlet/__init__.py @@ -0,0 +1,17 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec.common import eventlet_utils + +eventlet_utils.monkey_patch() diff --git a/apmec/cmd/eventlet/apmec_server.py b/apmec/cmd/eventlet/apmec_server.py new file mode 100644 index 0000000..4091a36 --- /dev/null +++ b/apmec/cmd/eventlet/apmec_server.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# If ../apmec/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)tosca_lib/python... + +import sys + +from oslo_config import cfg +import oslo_i18n +from oslo_service import service as common_service + +from apmec import _i18n +_i18n.enable_lazy() +from apmec.common import config +from apmec import service + + +oslo_i18n.install("apmec") + + +def main(): + # the configuration will be read into the cfg.CONF global data structure + config.init(sys.argv[1:]) + if not cfg.CONF.config_file: + sys.exit(_("ERROR: Unable to find configuration file via the default" + " search paths (~/.apmec/, ~/, /etc/apmec/, /etc/) and" + " the '--config-file' option!")) + + try: + apmec_api = service.serve_wsgi(service.ApmecApiService) + launcher = common_service.launch(cfg.CONF, apmec_api, + workers=cfg.CONF.api_workers or None) + launcher.wait() + except KeyboardInterrupt: + pass + except RuntimeError as e: + sys.exit(_("ERROR: %s") % e) diff --git a/apmec/cmd/eventlet/conductor.py b/apmec/cmd/eventlet/conductor.py new file mode 100644 index 0000000..3acc887 --- /dev/null +++ b/apmec/cmd/eventlet/conductor.py @@ -0,0 +1,17 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec.conductor import conductor_server + + +def main(): + conductor_server.main() diff --git a/apmec/common/__init__.py b/apmec/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/common/clients.py b/apmec/common/clients.py new file mode 100644 index 0000000..2dcfac1 --- /dev/null +++ b/apmec/common/clients.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from heatclient import client as heatclient +from apmec.mem import keystone + + +class OpenstackClients(object): + + def __init__(self, auth_attr, region_name=None): + super(OpenstackClients, self).__init__() + self.keystone_plugin = keystone.Keystone() + self.heat_client = None + self.mistral_client = None + self.keystone_client = None + self.region_name = region_name + self.auth_attr = auth_attr + + def _keystone_client(self): + version = self.auth_attr['auth_url'].rpartition('/')[2] + return self.keystone_plugin.initialize_client(version, + **self.auth_attr) + + def _heat_client(self): + endpoint = self.keystone_session.get_endpoint( + service_type='orchestration', region_name=self.region_name) + return heatclient.Client('1', endpoint=endpoint, + session=self.keystone_session) + + @property + def keystone_session(self): + return self.keystone.session + + @property + def keystone(self): + if not self.keystone_client: + self.keystone_client = self._keystone_client() + return self.keystone_client + + @property + def heat(self): + if not self.heat_client: + self.heat_client = self._heat_client() + return self.heat_client diff --git a/apmec/common/cmd_executer.py b/apmec/common/cmd_executer.py new file mode 100644 index 0000000..1a1cb56 --- /dev/null +++ b/apmec/common/cmd_executer.py @@ -0,0 +1,106 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +import paramiko + +from apmec.common import exceptions + +LOG = logging.getLogger(__name__) + + +class CommandResult(object): + """Result class contains command, stdout, stderror and return code.""" + def __init__(self, cmd, stdout, stderr, return_code): + self.__cmd = cmd + self.__stdout = stdout + self.__stderr = stderr + self.__return_code = return_code + + def get_command(self): + return self.__cmd + + def get_stdout(self): + return self.__stdout + + def get_stderr(self): + return self.__stderr + + def get_return_code(self): + return self.__return_code + + def __str__(self): + return "cmd: %s, stdout: %s, stderr: %s, return code: %s" \ + % (self.__cmd, self.__stdout, self.__stderr, self.__return_code) + + def __repr__(self): + return "cmd: %s, stdout: %s, stderr: %s, return code: %s" \ + % (self.__cmd, self.__stdout, self.__stderr, self.__return_code) + + +class RemoteCommandExecutor(object): + """Class to execute a command on remote location""" + def __init__(self, user, password, host, timeout=10): + self.__user = user + self.__password = password + self.__host = host + self.__paramiko_conn = None + self.__ssh = None + self.__timeout = timeout + self.__connect() + + def __connect(self): + try: + self.__ssh = paramiko.SSHClient() + self.__ssh.set_missing_host_key_policy(paramiko.WarningPolicy()) + self.__ssh.connect(self.__host, username=self.__user, + password=self.__password, timeout=self.__timeout) + LOG.info("Connected to %s", self.__host) + except paramiko.AuthenticationException: + LOG.error("Authentication failed when connecting to %s", + self.__host) + raise exceptions.NotAuthorized + except paramiko.SSHException: + LOG.error("Could not connect to %s. Giving up", self.__host) + raise + + def close_session(self): + self.__ssh.close() + LOG.debug("Connection close") + + def execute_command(self, cmd, input_data=None): + try: + stdin, stdout, stderr = self.__ssh.exec_command(cmd) + if input_data: + stdin.write(input_data) + LOG.debug("Input data written successfully") + stdin.flush() + LOG.debug("Input data flushed") + stdin.channel.shutdown_write() + + # NOTE (dkushwaha): There might be a case, when server can take + # too long time to write data in stdout buffer or sometimes hang + # itself, in that case readlines() will stuck for long/infinite + # time. To handle such cases, timeout logic should be introduce + # here. + cmd_out = stdout.readlines() + cmd_err = stderr.readlines() + return_code = stdout.channel.recv_exit_status() + except paramiko.SSHException: + LOG.error("Command execution failed at %s. Giving up", self.__host) + raise + result = CommandResult(cmd, cmd_out, cmd_err, return_code) + LOG.debug("Remote command execution result: %s", result) + return result + + def __del__(self): + self.close_session() diff --git a/apmec/common/config.py b/apmec/common/config.py new file mode 100644 index 0000000..b856522 --- /dev/null +++ b/apmec/common/config.py @@ -0,0 +1,141 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Apmec +""" + +import os + +from oslo_config import cfg +from oslo_db import options as db_options +from oslo_log import log as logging +import oslo_messaging +from paste import deploy + +from apmec.common import utils +from apmec import version + + +LOG = logging.getLogger(__name__) + +core_opts = [ + cfg.HostAddressOpt('bind_host', default='0.0.0.0', + help=_("The host IP to bind to")), + cfg.IntOpt('bind_port', default=9896, + help=_("The port to bind to")), + cfg.StrOpt('api_paste_config', default="api-paste.ini", + help=_("The API paste config file to use")), + cfg.StrOpt('api_extensions_path', default="", + help=_("The path for API extensions")), + cfg.ListOpt('service_plugins', default=['meo', 'mem'], + help=_("The service plugins Apmec will use")), + cfg.StrOpt('policy_file', default="policy.json", + help=_("The policy file to use")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.BoolOpt('allow_bulk', default=True, + help=_("Allow the usage of the bulk API")), + cfg.BoolOpt('allow_pagination', default=False, + help=_("Allow the usage of the pagination")), + cfg.BoolOpt('allow_sorting', default=False, + help=_("Allow the usage of the sorting")), + cfg.StrOpt('pagination_max_limit', default="-1", + help=_("The maximum number of items returned " + "in a single response, value was 'infinite' " + "or negative integer means no limit")), + cfg.HostAddressOpt('host', default=utils.get_hostname(), + help=_("The hostname Apmec is running on")), +] + +core_cli_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/apmec', + help=_("Where to store Apmec state files. " + "This directory must be writable by " + "the agent.")), +] + +logging.register_options(cfg.CONF) +# Register the configuration options +cfg.CONF.register_opts(core_opts) +cfg.CONF.register_cli_opts(core_cli_opts) + + +def config_opts(): + return [(None, core_opts), (None, core_cli_opts)] + +# Ensure that the control exchange is set correctly +oslo_messaging.set_transport_defaults(control_exchange='apmec') + + +def set_db_defaults(): + # Update the default QueuePool parameters. These can be tweaked by the + # conf variables - max_pool_size, max_overflow and pool_timeout + db_options.set_defaults( + cfg.CONF, + connection='sqlite://', + max_pool_size=10, + max_overflow=20, pool_timeout=10) + +set_db_defaults() + + +def init(args, **kwargs): + cfg.CONF(args=args, project='apmec', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + # FIXME(ihrachys): if import is put in global, circular import + # failure occurs + from apmec.common import rpc as n_rpc + n_rpc.init(cfg.CONF) + + +def setup_logging(conf): + """Sets up the logging options for a log with supplied name. + + :param conf: a cfg.ConfOpts object + """ + product_name = "apmec" + logging.setup(conf, product_name) + LOG.info("Logging enabled!") + + +def load_paste_app(app_name): + """Builds and returns a WSGI app from a paste config file. + + :param app_name: Name of the application to load + :raises ConfigFilesNotFoundError: when config file cannot be located + :raises RuntimeError: when application cannot be loaded from config file + """ + + config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) + if not config_path: + raise cfg.ConfigFilesNotFoundError( + config_files=[cfg.CONF.api_paste_config]) + config_path = os.path.abspath(config_path) + LOG.info("Config paste file: %s", config_path) + + try: + app = deploy.loadapp("config:%s" % config_path, name=app_name) + except (LookupError, ImportError): + msg = (_("Unable to load %(app_name)s from " + "configuration file %(config_path)s.") % + {'app_name': app_name, + 'config_path': config_path}) + LOG.exception(msg) + raise RuntimeError(msg) + return app diff --git a/apmec/common/constants.py b/apmec/common/constants.py new file mode 100644 index 0000000..31ddd4e --- /dev/null +++ b/apmec/common/constants.py @@ -0,0 +1,47 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(salv-orlando): Verify if a single set of operational +# status constants is achievable + +TYPE_BOOL = "bool" +TYPE_INT = "int" +TYPE_LONG = "long" +TYPE_FLOAT = "float" +TYPE_LIST = "list" +TYPE_DICT = "dict" + +PAGINATION_INFINITE = 'infinite' + +SORT_DIRECTION_ASC = 'asc' +SORT_DIRECTION_DESC = 'desc' + +# attribute name for nova boot +ATTR_NAME_IMAGE = 'image' +ATTR_NAME_FLAVOR = 'flavor' +ATTR_NAME_META = 'meta' +ATTR_NAME_FILES = "files" +ATTR_NAME_RESERVEATION_ID = 'reservation_id' +ATTR_NAME_SECURITY_GROUPS = 'security_groups' +ATTR_NAME_USER_DATA = 'user_data' +ATTR_NAME_KEY_NAME = 'key_name' +ATTR_NAME_AVAILABILITY_ZONE = 'availability_zone' +ATTR_NAME_BLOCK_DEVICE_MAPPING = 'block_device_mapping' +ATTR_NAME_BLOCK_DEVICE_MAPPING_V2 = 'block_device_mapping_v2' +ATTR_NAME_NICS = 'nics' +ATTR_NAME_NIC = 'nic' +ATTR_NAME_SCHEDULER_HINTS = 'sheculer_hints' +ATTR_NAME_CONFIG_DRIVE = 'config_drive' +ATTR_NAME_DISK_CONFIG = 'disk_config' diff --git a/apmec/common/driver_manager.py b/apmec/common/driver_manager.py new file mode 100644 index 0000000..f7c3eef --- /dev/null +++ b/apmec/common/driver_manager.py @@ -0,0 +1,76 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging + +import stevedore.named + +LOG = logging.getLogger(__name__) + + +class DriverManager(object): + def __init__(self, namespace, driver_list, **kwargs): + super(DriverManager, self).__init__() + manager = stevedore.named.NamedExtensionManager( + namespace, driver_list, invoke_on_load=True, **kwargs) + + drivers = {} + for ext in manager: + type_ = ext.obj.get_type() + if type_ in drivers: + msg = _("driver '%(new_driver)s' ignored because " + "driver '%(old_driver)s' is already " + "registered for driver '%(type)s'") % { + 'new_driver': ext.name, + 'old_driver': drivers[type].name, + 'type': type_} + LOG.error(msg) + raise SystemExit(msg) + drivers[type_] = ext + + self._drivers = dict((type_, ext.obj) + for (type_, ext) in drivers.items()) + LOG.info("Registered drivers from %(namespace)s: %(keys)s", + {'namespace': namespace, 'keys': self._drivers.keys()}) + + @staticmethod + def _driver_name(driver): + return driver.__module__ + '.' + driver.__class__.__name__ + + def register(self, type_, driver): + if type_ in self._drivers: + new_driver = self._driver_name(driver) + old_driver = self._driver_name(self._drivers[type_]) + msg = _("can't load driver '%(new_driver)s' because " + "driver '%(old_driver)s' is already " + "registered for driver '%(type)s'") % { + 'new_driver': new_driver, + 'old_driver': old_driver, + 'type': type_} + LOG.error(msg) + raise SystemExit(msg) + self._drivers[type_] = driver + + def invoke(self, type_, method_name, **kwargs): + driver = self._drivers[type_] + return getattr(driver, method_name)(**kwargs) + + def __getitem__(self, type_): + return self._drivers[type_] + + def __contains__(self, type_): + return type_ in self._drivers diff --git a/apmec/common/eventlet_utils.py b/apmec/common/eventlet_utils.py new file mode 100644 index 0000000..c1c5179 --- /dev/null +++ b/apmec/common/eventlet_utils.py @@ -0,0 +1,26 @@ +# copyright (c) 2015 Cloudbase Solutions. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import eventlet +from oslo_utils import importutils + + +def monkey_patch(): + eventlet.monkey_patch() + if os.name != 'nt': + p_c_e = importutils.import_module('pyroute2.config.eventlet') + p_c_e.eventlet_config() diff --git a/apmec/common/exceptions.py b/apmec/common/exceptions.py new file mode 100644 index 0000000..698dd5c --- /dev/null +++ b/apmec/common/exceptions.py @@ -0,0 +1,285 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Apmec base exception handling. +""" + +from oslo_utils import excutils +import six + +from apmec._i18n import _ + + +class ApmecException(Exception): + """Base Apmec Exception. + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + super(ApmecException, self).__init__(self.message % kwargs) + self.msg = self.message % kwargs + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + if not self.use_fatal_exceptions(): + ctxt.reraise = False + # at least get the core message out if something happened + super(ApmecException, self).__init__(self.message) + + if six.PY2: + def __unicode__(self): + return unicode(self.msg) + + def __str__(self): + return self.msg + + def use_fatal_exceptions(self): + """Is the instance using fatal exceptions. + + :returns: Always returns False. + """ + return False + + +class BadRequest(ApmecException): + message = _('Bad %(resource)s request: %(msg)s') + + +class NotFound(ApmecException): + pass + + +class Conflict(ApmecException): + pass + + +class NotAuthorized(ApmecException): + message = _("Not authorized.") + + +class ServiceUnavailable(ApmecException): + message = _("The service is unavailable") + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges: %(reason)s") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class NetworkNotFound(NotFound): + message = _("Network %(net_id)s could not be found") + + +class PolicyFileNotFound(NotFound): + message = _("Policy configuration policy.json could not be found") + + +class PolicyInitError(ApmecException): + message = _("Failed to init policy %(policy)s because %(reason)s") + + +class PolicyCheckError(ApmecException): + message = _("Failed to check policy %(policy)s because %(reason)s") + + +class StateInvalid(BadRequest): + message = _("Unsupported port state: %(port_state)s") + + +class InUse(ApmecException): + message = _("The resource is inuse") + + +class ResourceExhausted(ServiceUnavailable): + pass + + +class MalformedRequestBody(BadRequest): + message = _("Malformed request body: %(reason)s") + + +class Invalid(ApmecException): + def __init__(self, message=None): + self.message = message + super(Invalid, self).__init__() + + +class InvalidInput(BadRequest): + message = _("Invalid input for operation: %(error_message)s.") + + +class InvalidAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s is not valid.") + + +class OverlappingAllocationPools(Conflict): + message = _("Found overlapping allocation pools:" + "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") + + +class OutOfBoundsAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s spans " + "beyond the subnet cidr %(subnet_cidr)s.") + + +class MacAddressGenerationFailure(ServiceUnavailable): + message = _("Unable to generate unique mac on network %(net_id)s.") + + +class IpAddressGenerationFailure(Conflict): + message = _("No more IP addresses available on network %(net_id)s.") + + +class BridgeDoesNotExist(ApmecException): + message = _("Bridge %(bridge)s does not exist.") + + +class PreexistingDeviceFailure(ApmecException): + message = _("Creation failed. %(dev_name)s already exists.") + + +class SudoRequired(ApmecException): + message = _("Sudo privilege is required to run this command.") + + +class QuotaResourceUnknown(NotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class OverQuota(Conflict): + message = _("Quota exceeded for resources: %(overs)s") + + +class QuotaMissingTenant(BadRequest): + message = _("Tenant-id was missing from Quota request") + + +class InvalidQuotaValue(Conflict): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class InvalidSharedSetting(Conflict): + message = _("Unable to reconfigure sharing settings for network " + "%(network)s. Multiple tenants are using it") + + +class InvalidExtensionEnv(BadRequest): + message = _("Invalid extension environment: %(reason)s") + + +class ExtensionsNotFound(NotFound): + message = _("Extensions not found: %(extensions)s") + + +class InvalidContentType(ApmecException): + message = _("Invalid content type %(content_type)s") + + +class ExternalIpAddressExhausted(BadRequest): + message = _("Unable to find any IP address on external " + "network %(net_id)s.") + + +class TooManyExternalNetworks(ApmecException): + message = _("More than one external network exists") + + +class InvalidConfigurationOption(ApmecException): + message = _("An invalid value was provided for %(opt_name)s: " + "%(opt_value)s") + + +class GatewayConflictWithAllocationPools(InUse): + message = _("Gateway ip %(ip_address)s conflicts with " + "allocation pool %(pool)s") + + +class GatewayIpInUse(InUse): + message = _("Current gateway ip %(ip_address)s already in use " + "by port %(port_id)s. Unable to update.") + + +class NetworkVlanRangeError(ApmecException): + message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'") + + def __init__(self, **kwargs): + # Convert vlan_range tuple to 'start:end' format for display + if isinstance(kwargs['vlan_range'], tuple): + kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] + super(NetworkVlanRangeError, self).__init__(**kwargs) + + +class NetworkVxlanPortRangeError(ApmecException): + message = _("Invalid network VXLAN port range: '%(vxlan_range)s'") + + +class VxlanNetworkUnsupported(ApmecException): + message = _("VXLAN Network unsupported.") + + +class DuplicatedExtension(ApmecException): + message = _("Found duplicate extension: %(alias)s") + + +class DeviceIDNotOwnedByTenant(Conflict): + message = _("The following device_id %(device_id)s is not owned by your " + "tenant or matches another tenants router.") + + +class InvalidCIDR(BadRequest): + message = _("Invalid CIDR %(input)s given as IP prefix") + + +class MgmtDriverException(ApmecException): + message = _("MEA configuration failed") + + +class AlarmUrlInvalid(BadRequest): + message = _("Invalid alarm url for MEA %(mea_id)s") + + +class TriggerNotFound(NotFound): + message = _("Trigger %(trigger_name)s does not exist for MEA %(mea_id)s") + + +class MeaPolicyNotFound(NotFound): + message = _("Policy %(policy)s does not exist for MEA %(mea_id)s") + + +class MeaPolicyActionInvalid(BadRequest): + message = _("Invalid action %(action)s for policy %(policy)s, " + "should be one of %(valid_actions)s") + + +class MeaPolicyTypeInvalid(BadRequest): + message = _("Invalid type %(type)s for policy %(policy)s, " + "should be one of %(valid_types)s") + + +class DuplicateResourceName(ApmecException): + message = _("%(resource)s with name %(name)s already exists") + + +class DuplicateEntity(ApmecException): + message = _("%(_type)s already exist with given %(entry)s") diff --git a/apmec/common/log.py b/apmec/common/log.py new file mode 100644 index 0000000..42f91b2 --- /dev/null +++ b/apmec/common/log.py @@ -0,0 +1,36 @@ +# Copyright (C) 2013 eNovance SAS +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Log helper functions.""" + +from oslo_log import log as logging +from oslo_utils import strutils + +LOG = logging.getLogger(__name__) + + +def log(method): + """Decorator helping to log method calls.""" + def wrapper(*args, **kwargs): + instance = args[0] + data = {"class_name": (instance.__class__.__module__ + '.' + + instance.__class__.__name__), + "method_name": method.__name__, + "args": strutils.mask_password(args[1:]), + "kwargs": strutils.mask_password(kwargs)} + LOG.debug('%(class_name)s method %(method_name)s' + ' called with arguments %(args)s %(kwargs)s', data) + return method(*args, **kwargs) + return wrapper diff --git a/apmec/common/rpc.py b/apmec/common/rpc.py new file mode 100644 index 0000000..cd8c2dc --- /dev/null +++ b/apmec/common/rpc.py @@ -0,0 +1,338 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import random +import time + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging +from oslo_messaging.rpc import dispatcher +from oslo_messaging import serializer as om_serializer +from oslo_service import service +from oslo_utils import excutils + +from apmec.common import exceptions +from apmec import context + +LOG = logging.getLogger(__name__) + + +TRANSPORT = None +NOTIFICATION_TRANSPORT = None +NOTIFIER = None + +ALLOWED_EXMODS = [ + exceptions.__name__, +] +EXTRA_EXMODS = [] + + +# NOTE(salv-orlando): I am afraid this is a global variable. While not ideal, +# they're however widely used throughout the code base. It should be set to +# true if the RPC server is not running in the current process space. This +# will prevent get_connection from creating connections to the AMQP server +RPC_DISABLED = False + + +def init_action_rpc(conf): + global TRANSPORT + TRANSPORT = oslo_messaging.get_transport(conf) + + +def init(conf): + global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER + exmods = get_allowed_exmods() + TRANSPORT = oslo_messaging.get_transport(conf, + allowed_remote_exmods=exmods) + NOTIFICATION_TRANSPORT = oslo_messaging.get_notification_transport( + conf, allowed_remote_exmods=exmods) + serializer = RequestContextSerializer() + NOTIFIER = oslo_messaging.Notifier(NOTIFICATION_TRANSPORT, + serializer=serializer) + + +def cleanup(): + global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER + assert TRANSPORT is not None + assert NOTIFICATION_TRANSPORT is not None + assert NOTIFIER is not None + TRANSPORT.cleanup() + NOTIFICATION_TRANSPORT.cleanup() + _ContextWrapper.reset_timeouts() + TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None + + +def add_extra_exmods(*args): + EXTRA_EXMODS.extend(args) + + +def clear_extra_exmods(): + del EXTRA_EXMODS[:] + + +def get_allowed_exmods(): + return ALLOWED_EXMODS + EXTRA_EXMODS + + +def _get_default_method_timeout(): + return TRANSPORT.conf.rpc_response_timeout + + +def _get_default_method_timeouts(): + return collections.defaultdict(_get_default_method_timeout) + + +class _ContextWrapper(object): + """Wraps oslo messaging contexts to set the timeout for calls. + + This intercepts RPC calls and sets the timeout value to the globally + adapting value for each method. An oslo messaging timeout results in + a doubling of the timeout value for the method on which it timed out. + There currently is no logic to reduce the timeout since busy Apmec + servers are more frequently the cause of timeouts rather than lost + messages. + """ + _METHOD_TIMEOUTS = _get_default_method_timeouts() + _max_timeout = None + + @classmethod + def reset_timeouts(cls): + # restore the original default timeout factory + cls._METHOD_TIMEOUTS = _get_default_method_timeouts() + cls._max_timeout = None + + @classmethod + def get_max_timeout(cls): + return cls._max_timeout or _get_default_method_timeout() * 10 + + @classmethod + def set_max_timeout(cls, max_timeout): + if max_timeout < cls.get_max_timeout(): + cls._METHOD_TIMEOUTS = collections.defaultdict( + lambda: max_timeout, **{ + k: min(v, max_timeout) + for k, v in cls._METHOD_TIMEOUTS.items() + }) + cls._max_timeout = max_timeout + + def __init__(self, original_context): + self._original_context = original_context + + def __getattr__(self, name): + return getattr(self._original_context, name) + + def call(self, ctxt, method, **kwargs): + # two methods with the same name in different namespaces should + # be tracked independently + if self._original_context.target.namespace: + scoped_method = '%s.%s' % (self._original_context.target.namespace, + method) + else: + scoped_method = method + # set the timeout from the global method timeout tracker for this + # method + self._original_context.timeout = self._METHOD_TIMEOUTS[scoped_method] + try: + return self._original_context.call(ctxt, method, **kwargs) + except oslo_messaging.MessagingTimeout: + with excutils.save_and_reraise_exception(): + wait = random.uniform( + 0, + min(self._METHOD_TIMEOUTS[scoped_method], + TRANSPORT.conf.rpc_response_timeout) + ) + LOG.error("Timeout in RPC method %(method)s. Waiting for " + "%(wait)s seconds before next attempt. If the " + "server is not down, consider increasing the " + "rpc_response_timeout option as message " + "server(s) may be overloaded and unable to " + "respond quickly enough.", + {'wait': int(round(wait)), 'method': scoped_method}) + new_timeout = min( + self._original_context.timeout * 2, self.get_max_timeout()) + if new_timeout > self._METHOD_TIMEOUTS[scoped_method]: + LOG.warning("Increasing timeout for %(method)s calls " + "to %(new)s seconds. Restart the client to " + "restore it to the default value.", + {'method': scoped_method, 'new': new_timeout}) + self._METHOD_TIMEOUTS[scoped_method] = new_timeout + time.sleep(wait) + + +class BackingOffClient(oslo_messaging.RPCClient): + """An oslo messaging RPC Client that implements a timeout backoff. + + This has all of the same interfaces as oslo_messaging.RPCClient but + if the timeout parameter is not specified, the _ContextWrapper returned + will track when call timeout exceptions occur and exponentially increase + the timeout for the given call method. + """ + def prepare(self, *args, **kwargs): + ctx = super(BackingOffClient, self).prepare(*args, **kwargs) + # don't enclose Contexts that explicitly set a timeout + return _ContextWrapper(ctx) if 'timeout' not in kwargs else ctx + + @staticmethod + def set_max_timeout(max_timeout): + '''Set RPC timeout ceiling for all backing-off RPC clients.''' + _ContextWrapper.set_max_timeout(max_timeout) + + +def get_client(target, version_cap=None, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return BackingOffClient(TRANSPORT, + target, + version_cap=version_cap, + serializer=serializer) + + +def get_server(target, endpoints, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + access_policy = dispatcher.DefaultRPCAccessPolicy + return oslo_messaging.get_rpc_server(TRANSPORT, target, endpoints, + 'eventlet', serializer, + access_policy=access_policy) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None + if not publisher_id: + publisher_id = "%s.%s" % (service, host or cfg.CONF.host) + return NOTIFIER.prepare(publisher_id=publisher_id) + + +class RequestContextSerializer(om_serializer.Serializer): + """convert RPC common context int apmec Context.""" + + def __init__(self, base=None): + super(RequestContextSerializer, self).__init__() + self._base = base + + def serialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.serialize_entity(ctxt, entity) + + def deserialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.deserialize_entity(ctxt, entity) + + def serialize_context(self, ctxt): + _context = ctxt.to_dict() + return _context + + def deserialize_context(self, ctxt): + rpc_ctxt_dict = ctxt.copy() + return context.Context.from_dict(rpc_ctxt_dict) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host. + """ + def __init__(self, host, topic, manager=None, serializer=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + self.serializer = serializer + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = create_connection() + LOG.debug("Creating Consumer connection for Service %s", + self.topic) + + endpoints = [self.manager] + + self.conn.create_consumer(self.topic, endpoints) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() + + +class Connection(object): + + def __init__(self): + super(Connection, self).__init__() + self.servers = [] + + def create_consumer(self, topic, endpoints, fanout=False, + exchange='apmec', host=None): + target = oslo_messaging.Target( + topic=topic, server=host or cfg.CONF.host, fanout=fanout, + exchange=exchange) + server = get_server(target, endpoints) + self.servers.append(server) + + def consume_in_threads(self): + for server in self.servers: + server.start() + return self.servers + + def close(self): + for server in self.servers: + server.stop() + for server in self.servers: + server.wait() + + +class VoidConnection(object): + + def create_consumer(self, topic, endpoints, fanout=False, + exchange='apmec', host=None): + pass + + def consume_in_threads(self): + pass + + def close(self): + pass + + +# functions +def create_connection(): + # NOTE(salv-orlando): This is a clever interpretation of the factory design + # patter aimed at preventing plugins from initializing RPC servers upon + # initialization when they are running in the REST over HTTP API server. + # The educated reader will perfectly be able that this a fairly dirty hack + # to avoid having to change the initialization process of every plugin. + if RPC_DISABLED: + return VoidConnection() + return Connection() diff --git a/apmec/common/test_lib.py b/apmec/common/test_lib.py new file mode 100644 index 0000000..f8b7d3e --- /dev/null +++ b/apmec/common/test_lib.py @@ -0,0 +1,42 @@ +# Copyright (c) 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# describes parameters used by different unit/functional tests +# a plugin-specific testing mechanism should import this dictionary +# and override the values in it if needed (e.g., run_tests.py in +# apmec/plugins/openvswitch/ ) +test_config = {} diff --git a/apmec/common/topics.py b/apmec/common/topics.py new file mode 100644 index 0000000..ae5e49c --- /dev/null +++ b/apmec/common/topics.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +TOPIC_ACTION_KILL = 'KILL_ACTION' +TOPIC_CONDUCTOR = 'APMEC_CONDUCTOR' diff --git a/apmec/common/utils.py b/apmec/common/utils.py new file mode 100644 index 0000000..86e8807 --- /dev/null +++ b/apmec/common/utils.py @@ -0,0 +1,226 @@ +# Copyright 2011, VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Borrowed from nova code base, more utilities will be added/borrowed as and +# when needed. + +"""Utilities and helper functions.""" + +import logging as std_logging +import os +import random +import signal +import socket +import string +import sys + +from eventlet.green import subprocess +import netaddr +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_log import versionutils +from oslo_utils import importutils +from stevedore import driver + +from apmec._i18n import _ +from apmec.common import constants as q_const + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +LOG = logging.getLogger(__name__) +SYNCHRONIZED_PREFIX = 'apmec-' +MEM_UNITS = { + "MB": { + "MB": { + "op": "*", + "val": "1" + }, + "GB": { + "op": "/", + "val": "1024" + } + }, + "GB": { + "MB": { + "op": "*", + "val": "1024" + }, + "GB": { + "op": "*", + "val": "1" + } + } +} +CONF = cfg.CONF +synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX) + + +def find_config_file(options, config_file): + """Return the first config file found. + + We search for the paste config file in the following order: + * If --config-file option is used, use that + * Search for the configuration files via common cfg directories + :retval Full path to config file, or None if no config file found + """ + fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) + if options.get('config_file'): + if os.path.exists(options['config_file']): + return fix_path(options['config_file']) + + dir_to_common = os.path.dirname(os.path.abspath(__file__)) + root = os.path.join(dir_to_common, '..', '..', '..', '..') + # Handle standard directory search for the config file + config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')), + fix_path(os.path.join('~', '.apmec-venv', 'etc', + 'apmec')), + fix_path('~'), + os.path.join(cfg.CONF.state_path, 'etc'), + os.path.join(cfg.CONF.state_path, 'etc', 'apmec'), + fix_path(os.path.join('~', '.local', + 'etc', 'apmec')), + '/usr/etc/apmec', + '/usr/local/etc/apmec', + '/etc/apmec/', + '/etc'] + + if 'plugin' in options: + config_file_dirs = [ + os.path.join(x, 'apmec', 'plugins', options['plugin']) + for x in config_file_dirs + ] + + if os.path.exists(os.path.join(root, 'plugins')): + plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc')) + for p in os.listdir(os.path.join(root, 'plugins'))] + plugins = [p for p in plugins if os.path.isdir(p)] + config_file_dirs.extend(plugins) + + for cfg_dir in config_file_dirs: + cfg_file = os.path.join(cfg_dir, config_file) + if os.path.exists(cfg_file): + return cfg_file + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, + env=None): + return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, preexec_fn=_subprocess_setup, + close_fds=True, env=env) + + +def get_hostname(): + return socket.gethostname() + + +def dict2tuple(d): + items = list(d.items()) + items.sort() + return tuple(items) + + +def log_opt_values(log): + cfg.CONF.log_opt_values(log, std_logging.DEBUG) + + +def is_valid_vlan_tag(vlan): + return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG + + +def is_valid_ipv4(address): + """Verify that address represents a valid IPv4 address.""" + try: + return netaddr.valid_ipv4(address) + except Exception: + return False + + +def change_memory_unit(mem, to): + """Changes the memory value(mem) based on the unit('to') specified. + + If the unit is not specified in 'mem', by default, it is considered + as "MB". And this method returns only integer. + """ + + mem = str(mem) + " MB" if str(mem).isdigit() else mem.upper() + for unit, value in (MEM_UNITS).items(): + mem_arr = mem.split(unit) + if len(mem_arr) < 2: + continue + return eval(mem_arr[0] + + MEM_UNITS[unit][to]["op"] + + MEM_UNITS[unit][to]["val"]) + + +def load_class_by_alias_or_classname(namespace, name): + """Load class using stevedore alias or the class name + + Load class using the stevedore driver manager + :param namespace: namespace where the alias is defined + :param name: alias or class name of the class to be loaded + :returns: class if calls can be loaded + :raises ImportError: if class cannot be loaded + """ + + if not name: + LOG.error("Alias or class name is not set") + raise ImportError(_("Class not found.")) + try: + # Try to resolve class by alias + mgr = driver.DriverManager(namespace, name) + class_to_load = mgr.driver + except RuntimeError: + e1_info = sys.exc_info() + # Fallback to class name + try: + class_to_load = importutils.import_class(name) + except (ImportError, ValueError): + LOG.error("Error loading class by alias", + exc_info=e1_info) + LOG.error("Error loading class by class name", + exc_info=True) + raise ImportError(_("Class not found.")) + return class_to_load + + +def deep_update(orig_dict, new_dict): + for key, value in new_dict.items(): + if isinstance(value, dict): + if key in orig_dict and isinstance(orig_dict[key], dict): + deep_update(orig_dict[key], value) + continue + + orig_dict[key] = value + + +def deprecate_warning(what, as_of, in_favor_of=None, remove_in=1): + versionutils.deprecation_warning(as_of=as_of, what=what, + in_favor_of=in_favor_of, + remove_in=remove_in) + + +def generate_resource_name(resource, prefix='tmpl'): + return prefix + '-' \ + + ''.join(random.SystemRandom().choice( + string.ascii_lowercase + string.digits) + for _ in range(16)) \ + + '-' + resource diff --git a/apmec/conductor/__init__.py b/apmec/conductor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/conductor/conductor_server.py b/apmec/conductor/conductor_server.py new file mode 100644 index 0000000..790f1e0 --- /dev/null +++ b/apmec/conductor/conductor_server.py @@ -0,0 +1,91 @@ +# Copyright 2017 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging +from oslo_service import service +from oslo_utils import timeutils +from sqlalchemy.orm import exc as orm_exc + +from apmec.common import topics +from apmec import context as t_context +from apmec.db.common_services import common_services_db +from apmec.db.meo import meo_db +from apmec.extensions import meo +from apmec import manager +from apmec.plugins.common import constants +from apmec import service as apmec_service +from apmec import version + + +LOG = logging.getLogger(__name__) + + +class Conductor(manager.Manager): + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + super(Conductor, self).__init__(host=self.conf.host) + + def update_vim(self, context, vim_id, status): + t_admin_context = t_context.get_admin_context() + update_time = timeutils.utcnow() + with t_admin_context.session.begin(subtransactions=True): + try: + query = t_admin_context.session.query(meo_db.Vim) + query.filter( + meo_db.Vim.id == vim_id).update( + {'status': status, + 'updated_at': update_time}) + except orm_exc.NoResultFound: + raise meo.VimNotFoundException(vim_id=vim_id) + event_db = common_services_db.Event( + resource_id=vim_id, + resource_type=constants.RES_TYPE_VIM, + resource_state=status, + event_details="", + event_type=constants.RES_EVT_MONITOR, + timestamp=update_time) + t_admin_context.session.add(event_db) + return status + + +def init(args, **kwargs): + cfg.CONF(args=args, project='apmec', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + # FIXME(ihrachys): if import is put in global, circular import + # failure occurs + from apmec.common import rpc as n_rpc + n_rpc.init(cfg.CONF) + + +def main(manager='apmec.conductor.conductor_server.Conductor'): + init(sys.argv[1:]) + logging.setup(cfg.CONF, "apmec") + oslo_messaging.set_transport_defaults(control_exchange='apmec') + logging.setup(cfg.CONF, "apmec") + cfg.CONF.log_opt_values(LOG, logging.DEBUG) + server = apmec_service.Service.create( + binary='apmec-conductor', + topic=topics.TOPIC_CONDUCTOR, + manager=manager) + service.launch(cfg.CONF, server).wait() diff --git a/apmec/conductor/conductorrpc/__init__.py b/apmec/conductor/conductorrpc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/conductor/conductorrpc/vim_monitor_rpc.py b/apmec/conductor/conductorrpc/vim_monitor_rpc.py new file mode 100644 index 0000000..8bd9825 --- /dev/null +++ b/apmec/conductor/conductorrpc/vim_monitor_rpc.py @@ -0,0 +1,30 @@ +# Copyright 2017 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_messaging + +from apmec.common import topics + + +class VIMUpdateRPC(object): + + target = oslo_messaging.Target( + exchange='apmec', + topic=topics.TOPIC_CONDUCTOR, + fanout=False, + version='1.0') + + def update_vim(self, context, **kwargs): + pass diff --git a/apmec/context.py b/apmec/context.py new file mode 100644 index 0000000..9567540 --- /dev/null +++ b/apmec/context.py @@ -0,0 +1,141 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Context: context for security/db session.""" + +import copy +import datetime + +from oslo_context import context as oslo_context +from oslo_db.sqlalchemy import enginefacade + +from apmec.db import api as db_api +from apmec import policy + + +class ContextBase(oslo_context.RequestContext): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, tenant_id, is_admin=None, roles=None, + timestamp=None, request_id=None, tenant_name=None, + user_name=None, overwrite=True, auth_token=None, + **kwargs): + """Object initialization. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + super(ContextBase, self).__init__(auth_token=auth_token, + user=user_id, tenant=tenant_id, + is_admin=is_admin, + request_id=request_id, + overwrite=overwrite, + roles=roles) + self.user_name = user_name + self.tenant_name = tenant_name + + if not timestamp: + timestamp = datetime.datetime.utcnow() + self.timestamp = timestamp + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self) + + @property + def project_id(self): + return self.tenant + + @property + def tenant_id(self): + return self.tenant + + @tenant_id.setter + def tenant_id(self, tenant_id): + self.tenant = tenant_id + + @property + def user_id(self): + return self.user + + @user_id.setter + def user_id(self, user_id): + self.user = user_id + + def to_dict(self): + context = super(ContextBase, self).to_dict() + context.update({ + 'user_id': self.user_id, + 'tenant_id': self.tenant_id, + 'project_id': self.project_id, + 'timestamp': str(self.timestamp), + 'tenant_name': self.tenant_name, + 'project_name': self.tenant_name, + 'user_name': self.user_name, + }) + return context + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in [x.lower() for x in context.roles]: + context.roles = context.roles + ["admin"] + + return context + + +@enginefacade.transaction_context_provider +class ContextBaseWithSession(ContextBase): + pass + + +class Context(ContextBaseWithSession): + def __init__(self, *args, **kwargs): + super(Context, self).__init__(*args, **kwargs) + self._session = None + + @property + def session(self): + # TODO(akamyshnikova): checking for session attribute won't be needed + # when reader and writer will be used + if hasattr(super(Context, self), 'session'): + return super(Context, self).session + if self._session is None: + self._session = db_api.get_session() + return self._session + + +def get_admin_context(): + return Context(user_id=None, + tenant_id=None, + is_admin=True, + overwrite=False) + + +def get_admin_context_without_session(): + return ContextBase(user_id=None, + tenant_id=None, + is_admin=True) diff --git a/apmec/db/__init__.py b/apmec/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/api.py b/apmec/db/api.py new file mode 100644 index 0000000..b888c8b --- /dev/null +++ b/apmec/db/api.py @@ -0,0 +1,45 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_db.sqlalchemy import enginefacade + + +context_manager = enginefacade.transaction_context() + +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + + if _FACADE is None: + context_manager.configure(sqlite_fk=True, **cfg.CONF.database) + _FACADE = context_manager._factory.get_legacy_facade() + + return _FACADE + + +def get_engine(): + """Helper method to grab engine.""" + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(autocommit=True, expire_on_commit=False): + """Helper method to grab session.""" + facade = _create_facade_lazily() + return facade.get_session(autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/apmec/db/common_services/__init__.py b/apmec/db/common_services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/common_services/common_services_db.py b/apmec/db/common_services/common_services_db.py new file mode 100644 index 0000000..b1deecc --- /dev/null +++ b/apmec/db/common_services/common_services_db.py @@ -0,0 +1,31 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from apmec.db import model_base +from apmec.db import types + + +class Event(model_base.BASE): + id = sa.Column(sa.Integer, primary_key=True, nullable=False, + autoincrement=True) + resource_id = sa.Column(types.Uuid, nullable=False) + resource_state = sa.Column(sa.String(64), nullable=False) + resource_type = sa.Column(sa.String(64), nullable=False) + timestamp = sa.Column(sa.DateTime, nullable=False) + event_type = sa.Column(sa.String(64), nullable=False) + event_details = sa.Column(types.Json) diff --git a/apmec/db/common_services/common_services_db_plugin.py b/apmec/db/common_services/common_services_db_plugin.py new file mode 100644 index 0000000..bca403a --- /dev/null +++ b/apmec/db/common_services/common_services_db_plugin.py @@ -0,0 +1,88 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc as orm_exc + +from oslo_log import log as logging + +from apmec.common import log +from apmec.db.common_services import common_services_db +from apmec.db import db_base +from apmec.extensions import common_services +from apmec import manager + + +LOG = logging.getLogger(__name__) + +EVENT_ATTRIBUTES = ('id', 'resource_id', 'resource_type', 'resource_state', + 'timestamp', 'event_type', 'event_details') + + +class CommonServicesPluginDb(common_services.CommonServicesPluginBase, + db_base.CommonDbMixin): + + def __init__(self): + super(CommonServicesPluginDb, self).__init__() + + @property + def _core_plugin(self): + return manager.ApmecManager.get_plugin() + + def _make_event_dict(self, event_db, fields=None): + res = dict((key, event_db[key]) for key in EVENT_ATTRIBUTES) + return self._fields(res, fields) + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + @log.log + def create_event(self, context, res_id, res_type, res_state, evt_type, + tstamp, details=""): + try: + with context.session.begin(subtransactions=True): + event_db = common_services_db.Event( + resource_id=res_id, + resource_type=res_type, + resource_state=res_state, + event_details=details, + event_type=evt_type, + timestamp=tstamp) + context.session.add(event_db) + except Exception as e: + LOG.exception("create event error: %s", str(e)) + raise common_services.EventCreationFailureException( + error_str=str(e)) + return self._make_event_dict(event_db) + + @log.log + def get_event(self, context, event_id, fields=None): + try: + events_db = self._get_by_id(context, + common_services_db.Event, event_id) + except orm_exc.NoResultFound: + raise common_services.EventNotFoundException(evt_id=event_id) + return self._make_event_dict(events_db, fields) + + @log.log + def get_events(self, context, filters=None, fields=None, sorts=None, + limit=None, marker_obj=None, page_reverse=False): + return self._get_collection(context, common_services_db.Event, + self._make_event_dict, + filters, fields, sorts, limit, + marker_obj, page_reverse) diff --git a/apmec/db/db_base.py b/apmec/db/db_base.py new file mode 100644 index 0000000..2a5e63d --- /dev/null +++ b/apmec/db/db_base.py @@ -0,0 +1,217 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime +import weakref + +from oslo_log import log as logging +import six +from six import iteritems +from sqlalchemy.orm import exc as orm_exc +from sqlalchemy import sql + +from apmec.common import exceptions as n_exc +from apmec.db import sqlalchemyutils + +LOG = logging.getLogger(__name__) + + +class CommonDbMixin(object): + """Common methods used in core and service plugins.""" + # Plugins, mixin classes implementing extension will register + # hooks into the dict below for "augmenting" the "core way" of + # building a query for retrieving objects from a model class. + # To this aim, the register_model_query_hook and unregister_query_hook + # from this class should be invoked + _model_query_hooks = {} + + # This dictionary will store methods for extending attributes of + # api resources. Mixins can use this dict for adding their own methods + # TODO(salvatore-orlando): Avoid using class-level variables + _dict_extend_functions = {} + + @classmethod + def register_model_query_hook(cls, model, name, query_hook, filter_hook, + result_filters=None): + """Register a hook to be invoked when a query is executed. + + Add the hooks to the _model_query_hooks dict. Models are the keys + of this dict, whereas the value is another dict mapping hook names to + callables performing the hook. + Each hook has a "query" component, used to build the query expression + and a "filter" component, which is used to build the filter expression. + + Query hooks take as input the query being built and return a + transformed query expression. + + Filter hooks take as input the filter expression being built and return + a transformed filter expression + """ + model_hooks = cls._model_query_hooks.get(model) + if not model_hooks: + # add key to dict + model_hooks = {} + cls._model_query_hooks[model] = model_hooks + model_hooks[name] = {'query': query_hook, 'filter': filter_hook, + 'result_filters': result_filters} + + @property + def safe_reference(self): + """Return a weakref to the instance. + + Minimize the potential for the instance persisting + unnecessarily in memory by returning a weakref proxy that + won't prevent deallocation. + """ + return weakref.proxy(self) + + def _model_query(self, context, model): + query = context.session.query(model) + # define basic filter condition for model query + # NOTE(jkoelker) non-admin queries are scoped to their tenant_id + # NOTE(salvatore-orlando): unless the model allows for shared objects + query_filter = None + if not context.is_admin and hasattr(model, 'tenant_id'): + if hasattr(model, 'shared'): + query_filter = ((model.tenant_id == context.tenant_id) | + (model.shared == sql.true())) + else: + query_filter = (model.tenant_id == context.tenant_id) + # Execute query hooks registered from mixins and plugins + for _name, hooks in iteritems(self._model_query_hooks.get(model, {})): + query_hook = hooks.get('query') + if isinstance(query_hook, six.string_types): + query_hook = getattr(self, query_hook, None) + if query_hook: + query = query_hook(context, model, query) + + filter_hook = hooks.get('filter') + if isinstance(filter_hook, six.string_types): + filter_hook = getattr(self, filter_hook, None) + if filter_hook: + query_filter = filter_hook(context, model, query_filter) + + # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the + # condition, raising an exception + if query_filter is not None: + query = query.filter(query_filter) + + # Don't list the deleted entries + if hasattr(model, 'deleted_at'): + query = query.filter_by(deleted_at=datetime.min) + + return query + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def _get_tenant_id_for_create(self, context, resource): + if context.is_admin and 'tenant_id' in resource: + tenant_id = resource['tenant_id'] + elif ('tenant_id' in resource and + resource['tenant_id'] != context.tenant_id): + reason = _('Cannot create resource for another tenant') + raise n_exc.AdminRequired(reason=reason) + else: + tenant_id = context.tenant_id + return tenant_id + + def _get_by_id(self, context, model, id): + query = self._model_query(context, model) + return query.filter(model.id == id).one() + + def _apply_filters_to_query(self, query, model, filters): + if filters: + for key, value in iteritems(filters): + column = getattr(model, key, None) + if column: + query = query.filter(column.in_(value)) + for _name, hooks in iteritems( + self._model_query_hooks.get(model, {})): + result_filter = hooks.get('result_filters', None) + if isinstance(result_filter, six.string_types): + result_filter = getattr(self, result_filter, None) + + if result_filter: + query = result_filter(query, filters) + + return query + + def _apply_dict_extend_functions(self, resource_type, + response, db_object): + for func in self._dict_extend_functions.get( + resource_type, []): + args = (response, db_object) + if isinstance(func, six.string_types): + func = getattr(self, func, None) + else: + # must call unbound method - use self as 1st argument + args = (self,) + args + if func: + func(*args) + + def _get_collection_query(self, context, model, filters=None, + sorts=None, limit=None, marker_obj=None, + page_reverse=False): + collection = self._model_query(context, model) + collection = self._apply_filters_to_query(collection, model, filters) + if limit and page_reverse and sorts: + sorts = [(s[0], not s[1]) for s in sorts] + collection = sqlalchemyutils.paginate_query(collection, model, limit, + sorts, + marker_obj=marker_obj) + return collection + + def _get_collection(self, context, model, dict_func, filters=None, + fields=None, sorts=None, limit=None, marker_obj=None, + page_reverse=False): + query = self._get_collection_query(context, model, filters=filters, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + items = [dict_func(c, fields) for c in query] + if limit and page_reverse: + items.reverse() + return items + + def _get_collection_count(self, context, model, filters=None): + return self._get_collection_query(context, model, filters).count() + + def _get_marker_obj(self, context, resource, limit, marker): + if limit and marker: + return getattr(self, '_get_%s' % resource)(context, marker) + return None + + def _filter_non_model_columns(self, data, model): + """Removes attributes from data. + + Remove all the attributes from data which are not columns of + the model passed as second parameter. + """ + columns = [c.name for c in model.__table__.columns] + return dict((k, v) for (k, v) in + iteritems(data) if k in columns) + + def _get_by_name(self, context, model, name): + try: + query = self._model_query(context, model) + return query.filter(model.name == name).one() + except orm_exc.NoResultFound: + LOG.info("No result found for %(name)s in %(model)s table", + {'name': name, 'model': model}) diff --git a/apmec/db/mem/__init__.py b/apmec/db/mem/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/mem/mem_db.py b/apmec/db/mem/mem_db.py new file mode 100644 index 0000000..89041e2 --- /dev/null +++ b/apmec/db/mem/mem_db.py @@ -0,0 +1,683 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +from oslo_db.exception import DBDuplicateEntry +from oslo_log import log as logging +from oslo_utils import timeutils +from oslo_utils import uuidutils + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as orm_exc +from sqlalchemy import schema + +from apmec.api.v1 import attributes +from apmec.common import exceptions +from apmec import context as t_context +from apmec.db.common_services import common_services_db_plugin +from apmec.db import db_base +from apmec.db import model_base +from apmec.db import models_v1 +from apmec.db import types +from apmec.extensions import mem +from apmec import manager +from apmec.plugins.common import constants + +LOG = logging.getLogger(__name__) +_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE) +_ACTIVE_UPDATE_ERROR_DEAD = ( + constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE, + constants.ERROR, constants.DEAD) +CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD) + + +########################################################################### +# db tables + +class MEAD(model_base.BASE, models_v1.HasId, models_v1.HasTenant, + models_v1.Audit): + """Represents MEAD to create MEA.""" + + __tablename__ = 'mead' + # Descriptive name + name = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.Text) + + # service type that this service vm provides. + # At first phase, this includes only single service + # In future, single service VM may accommodate multiple services. + service_types = orm.relationship('ServiceType', backref='mead') + + # driver to communicate with service management + mgmt_driver = sa.Column(sa.String(255)) + + # (key, value) pair to spin up + attributes = orm.relationship('MEADAttribute', + backref='mead') + + # mead template source - inline or onboarded + template_source = sa.Column(sa.String(255), server_default='onboarded') + + __table_args__ = ( + schema.UniqueConstraint( + "tenant_id", + "name", + "deleted_at", + name="uniq_mead0tenant_id0name0deleted_at"), + ) + + +class ServiceType(model_base.BASE, models_v1.HasId, models_v1.HasTenant): + """Represents service type which hosting mea provides. + + Since a mea may provide many services, This is one-to-many + relationship. + """ + mead_id = sa.Column(types.Uuid, sa.ForeignKey('mead.id'), + nullable=False) + service_type = sa.Column(sa.String(64), nullable=False) + + +class MEADAttribute(model_base.BASE, models_v1.HasId): + """Represents attributes necessary for spinning up VM in (key, value) pair + + key value pair is adopted for being agnostic to actuall manager of VMs. + The interpretation is up to actual driver of hosting mea. + """ + + __tablename__ = 'mead_attribute' + mead_id = sa.Column(types.Uuid, sa.ForeignKey('mead.id'), + nullable=False) + key = sa.Column(sa.String(255), nullable=False) + value = sa.Column(sa.TEXT(65535), nullable=True) + + +class MEA(model_base.BASE, models_v1.HasId, models_v1.HasTenant, + models_v1.Audit): + """Represents meas that hosts services. + + Here the term, 'VM', is intentionally avoided because it can be + VM or other container. + """ + + __tablename__ = 'mea' + mead_id = sa.Column(types.Uuid, sa.ForeignKey('mead.id')) + mead = orm.relationship('MEAD') + + name = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.Text, nullable=True) + + # sufficient information to uniquely identify hosting mea. + # In case of openstack manager, it's UUID of heat stack. + instance_id = sa.Column(sa.String(64), nullable=True) + + # For a management tool to talk to manage this hosting mea. + # opaque string. + # e.g. (driver, mgmt_url) = (ssh, ip address), ... + mgmt_url = sa.Column(sa.String(255), nullable=True) + attributes = orm.relationship("MEAAttribute", backref="mea") + + status = sa.Column(sa.String(64), nullable=False) + vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False) + placement_attr = sa.Column(types.Json, nullable=True) + vim = orm.relationship('Vim') + error_reason = sa.Column(sa.Text, nullable=True) + + __table_args__ = ( + schema.UniqueConstraint( + "tenant_id", + "name", + "deleted_at", + name="uniq_mea0tenant_id0name0deleted_at"), + ) + + +class MEAAttribute(model_base.BASE, models_v1.HasId): + """Represents kwargs necessary for spinning up VM in (key, value) pair. + + key value pair is adopted for being agnostic to actuall manager of VMs. + The interpretation is up to actual driver of hosting mea. + """ + + __tablename__ = 'mea_attribute' + mea_id = sa.Column(types.Uuid, sa.ForeignKey('mea.id'), + nullable=False) + key = sa.Column(sa.String(255), nullable=False) + # json encoded value. example + # "nic": [{"net-id": }, {"port-id": }] + value = sa.Column(sa.TEXT(65535), nullable=True) + + +class MEMPluginDb(mem.MEMPluginBase, db_base.CommonDbMixin): + + @property + def _core_plugin(self): + return manager.ApmecManager.get_plugin() + + def subnet_id_to_network_id(self, context, subnet_id): + subnet = self._core_plugin.get_subnet(context, subnet_id) + return subnet['network_id'] + + def __init__(self): + super(MEMPluginDb, self).__init__() + self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + + def _get_resource(self, context, model, id): + try: + if uuidutils.is_uuid_like(id): + return self._get_by_id(context, model, id) + return self._get_by_name(context, model, id) + except orm_exc.NoResultFound: + if issubclass(model, MEAD): + raise mem.MEADNotFound(mead_id=id) + elif issubclass(model, ServiceType): + raise mem.ServiceTypeNotFound(service_type_id=id) + if issubclass(model, MEA): + raise mem.MEANotFound(mea_id=id) + else: + raise + + def _make_attributes_dict(self, attributes_db): + return dict((attr.key, attr.value) for attr in attributes_db) + + def _make_service_types_list(self, service_types): + return [service_type.service_type + for service_type in service_types] + + def _make_mead_dict(self, mead, fields=None): + res = { + 'attributes': self._make_attributes_dict(mead['attributes']), + 'service_types': self._make_service_types_list( + mead.service_types) + } + key_list = ('id', 'tenant_id', 'name', 'description', + 'mgmt_driver', 'created_at', 'updated_at', + 'template_source') + res.update((key, mead[key]) for key in key_list) + return self._fields(res, fields) + + def _make_dev_attrs_dict(self, dev_attrs_db): + return dict((arg.key, arg.value) for arg in dev_attrs_db) + + def _make_mea_dict(self, mea_db, fields=None): + LOG.debug('mea_db %s', mea_db) + LOG.debug('mea_db attributes %s', mea_db.attributes) + res = { + 'mead': + self._make_mead_dict(mea_db.mead), + 'attributes': self._make_dev_attrs_dict(mea_db.attributes), + } + key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id', + 'vim_id', 'placement_attr', 'mead_id', 'status', + 'mgmt_url', 'error_reason', 'created_at', 'updated_at') + res.update((key, mea_db[key]) for key in key_list) + return self._fields(res, fields) + + @staticmethod + def _mgmt_driver_name(mea_dict): + return mea_dict['mead']['mgmt_driver'] + + @staticmethod + def _instance_id(mea_dict): + return mea_dict['instance_id'] + + def create_mead(self, context, mead): + mead = mead['mead'] + LOG.debug('mead %s', mead) + tenant_id = self._get_tenant_id_for_create(context, mead) + service_types = mead.get('service_types') + mgmt_driver = mead.get('mgmt_driver') + template_source = mead.get("template_source") + + if (not attributes.is_attr_set(service_types)): + LOG.debug('service types unspecified') + raise mem.ServiceTypesNotSpecified() + + try: + with context.session.begin(subtransactions=True): + mead_id = uuidutils.generate_uuid() + mead_db = MEAD( + id=mead_id, + tenant_id=tenant_id, + name=mead.get('name'), + description=mead.get('description'), + mgmt_driver=mgmt_driver, + template_source=template_source, + deleted_at=datetime.min) + context.session.add(mead_db) + for (key, value) in mead.get('attributes', {}).items(): + attribute_db = MEADAttribute( + id=uuidutils.generate_uuid(), + mead_id=mead_id, + key=key, + value=value) + context.session.add(attribute_db) + for service_type in (item['service_type'] + for item in mead['service_types']): + service_type_db = ServiceType( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + mead_id=mead_id, + service_type=service_type) + context.session.add(service_type_db) + except DBDuplicateEntry as e: + raise exceptions.DuplicateEntity( + _type="mead", + entry=e.columns) + LOG.debug('mead_db %(mead_db)s %(attributes)s ', + {'mead_db': mead_db, + 'attributes': mead_db.attributes}) + mead_dict = self._make_mead_dict(mead_db) + LOG.debug('mead_dict %s', mead_dict) + self._cos_db_plg.create_event( + context, res_id=mead_dict['id'], + res_type=constants.RES_TYPE_MEAD, + res_state=constants.RES_EVT_ONBOARDED, + evt_type=constants.RES_EVT_CREATE, + tstamp=mead_dict[constants.RES_EVT_CREATED_FLD]) + return mead_dict + + def update_mead(self, context, mead_id, + mead): + with context.session.begin(subtransactions=True): + mead_db = self._get_resource(context, MEAD, + mead_id) + mead_db.update(mead['mead']) + mead_db.update({'updated_at': timeutils.utcnow()}) + mead_dict = self._make_mead_dict(mead_db) + self._cos_db_plg.create_event( + context, res_id=mead_dict['id'], + res_type=constants.RES_TYPE_MEAD, + res_state=constants.RES_EVT_NA_STATE, + evt_type=constants.RES_EVT_UPDATE, + tstamp=mead_dict[constants.RES_EVT_UPDATED_FLD]) + return mead_dict + + def delete_mead(self, + context, + mead_id, + soft_delete=True): + with context.session.begin(subtransactions=True): + # TODO(yamahata): race. prevent from newly inserting hosting mea + # that refers to this mead + meas_db = context.session.query(MEA).filter_by( + mead_id=mead_id).first() + if meas_db is not None and meas_db.deleted_at is None: + raise mem.MEADInUse(mead_id=mead_id) + mead_db = self._get_resource(context, MEAD, + mead_id) + if soft_delete: + mead_db.update({'deleted_at': timeutils.utcnow()}) + self._cos_db_plg.create_event( + context, res_id=mead_db['id'], + res_type=constants.RES_TYPE_MEAD, + res_state=constants.RES_EVT_NA_STATE, + evt_type=constants.RES_EVT_DELETE, + tstamp=mead_db[constants.RES_EVT_DELETED_FLD]) + else: + context.session.query(ServiceType).filter_by( + mead_id=mead_id).delete() + context.session.query(MEADAttribute).filter_by( + mead_id=mead_id).delete() + context.session.delete(mead_db) + + def get_mead(self, context, mead_id, fields=None): + mead_db = self._get_resource(context, MEAD, mead_id) + return self._make_mead_dict(mead_db) + + def get_meads(self, context, filters, fields=None): + if 'template_source' in filters and \ + filters['template_source'][0] == 'all': + filters.pop('template_source') + return self._get_collection(context, MEAD, + self._make_mead_dict, + filters=filters, fields=fields) + + def choose_mead(self, context, service_type, + required_attributes=None): + required_attributes = required_attributes or [] + LOG.debug('required_attributes %s', required_attributes) + with context.session.begin(subtransactions=True): + query = ( + context.session.query(MEAD). + filter( + sa.exists(). + where(sa.and_( + MEAD.id == ServiceType.mead_id, + ServiceType.service_type == service_type)))) + for key in required_attributes: + query = query.filter( + sa.exists(). + where(sa.and_( + MEAD.id == + MEADAttribute.mead_id, + MEADAttribute.key == key))) + LOG.debug('statements %s', query) + mead_db = query.first() + if mead_db: + return self._make_mead_dict(mead_db) + + def _mea_attribute_update_or_create( + self, context, mea_id, key, value): + arg = (self._model_query(context, MEAAttribute). + filter(MEAAttribute.mea_id == mea_id). + filter(MEAAttribute.key == key).first()) + if arg: + arg.value = value + else: + arg = MEAAttribute( + id=uuidutils.generate_uuid(), mea_id=mea_id, + key=key, value=value) + context.session.add(arg) + + # called internally, not by REST API + def _create_mea_pre(self, context, mea): + LOG.debug('mea %s', mea) + tenant_id = self._get_tenant_id_for_create(context, mea) + mead_id = mea['mead_id'] + name = mea.get('name') + mea_id = uuidutils.generate_uuid() + attributes = mea.get('attributes', {}) + vim_id = mea.get('vim_id') + placement_attr = mea.get('placement_attr', {}) + try: + with context.session.begin(subtransactions=True): + mead_db = self._get_resource(context, MEAD, + mead_id) + mea_db = MEA(id=mea_id, + tenant_id=tenant_id, + name=name, + description=mead_db.description, + instance_id=None, + mead_id=mead_id, + vim_id=vim_id, + placement_attr=placement_attr, + status=constants.PENDING_CREATE, + error_reason=None, + deleted_at=datetime.min) + context.session.add(mea_db) + for key, value in attributes.items(): + arg = MEAAttribute( + id=uuidutils.generate_uuid(), mea_id=mea_id, + key=key, value=value) + context.session.add(arg) + except DBDuplicateEntry as e: + raise exceptions.DuplicateEntity( + _type="mea", + entry=e.columns) + evt_details = "MEA UUID assigned." + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=constants.PENDING_CREATE, + evt_type=constants.RES_EVT_CREATE, + tstamp=mea_db[constants.RES_EVT_CREATED_FLD], + details=evt_details) + return self._make_mea_dict(mea_db) + + # called internally, not by REST API + # intsance_id = None means error on creation + def _create_mea_post(self, context, mea_id, instance_id, + mgmt_url, mea_dict): + LOG.debug('mea_dict %s', mea_dict) + with context.session.begin(subtransactions=True): + query = (self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(MEA.status.in_(CREATE_STATES)). + one()) + query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url}) + if instance_id is None or mea_dict['status'] == constants.ERROR: + query.update({'status': constants.ERROR}) + + for (key, value) in mea_dict['attributes'].items(): + # do not store decrypted vim auth in mea attr table + if 'vim_auth' not in key: + self._mea_attribute_update_or_create(context, mea_id, + key, value) + evt_details = ("Infra Instance ID created: %s and " + "Mgmt URL set: %s") % (instance_id, mgmt_url) + self._cos_db_plg.create_event( + context, res_id=mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=mea_dict['status'], + evt_type=constants.RES_EVT_CREATE, + tstamp=timeutils.utcnow(), details=evt_details) + + def _create_mea_status(self, context, mea_id, new_status): + with context.session.begin(subtransactions=True): + query = (self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(MEA.status.in_(CREATE_STATES)).one()) + query.update({'status': new_status}) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=new_status, + evt_type=constants.RES_EVT_CREATE, + tstamp=timeutils.utcnow(), details="MEA creation completed") + + def _get_mea_db(self, context, mea_id, current_statuses, new_status): + try: + mea_db = ( + self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(MEA.status.in_(current_statuses)). + with_lockmode('update').one()) + except orm_exc.NoResultFound: + raise mem.MEANotFound(mea_id=mea_id) + if mea_db.status == constants.PENDING_UPDATE: + raise mem.MEAInUse(mea_id=mea_id) + mea_db.update({'status': new_status}) + return mea_db + + def _update_mea_scaling_status(self, + context, + policy, + previous_statuses, + status, + mgmt_url=None): + with context.session.begin(subtransactions=True): + mea_db = self._get_mea_db( + context, policy['mea']['id'], previous_statuses, status) + if mgmt_url: + mea_db.update({'mgmt_url': mgmt_url}) + updated_mea_dict = self._make_mea_dict(mea_db) + self._cos_db_plg.create_event( + context, res_id=updated_mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=updated_mea_dict['status'], + evt_type=constants.RES_EVT_SCALE, + tstamp=timeutils.utcnow()) + return updated_mea_dict + + def _update_mea_pre(self, context, mea_id): + with context.session.begin(subtransactions=True): + mea_db = self._get_mea_db( + context, mea_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE) + updated_mea_dict = self._make_mea_dict(mea_db) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=updated_mea_dict['status'], + evt_type=constants.RES_EVT_UPDATE, + tstamp=timeutils.utcnow()) + return updated_mea_dict + + def _update_mea_post(self, context, mea_id, new_status, + new_mea_dict): + updated_time_stamp = timeutils.utcnow() + with context.session.begin(subtransactions=True): + (self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(MEA.status == constants.PENDING_UPDATE). + update({'status': new_status, + 'updated_at': updated_time_stamp})) + + dev_attrs = new_mea_dict.get('attributes', {}) + (context.session.query(MEAAttribute). + filter(MEAAttribute.mea_id == mea_id). + filter(~MEAAttribute.key.in_(dev_attrs.keys())). + delete(synchronize_session='fetch')) + + for (key, value) in dev_attrs.items(): + if 'vim_auth' not in key: + self._mea_attribute_update_or_create(context, mea_id, + key, value) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=new_status, + evt_type=constants.RES_EVT_UPDATE, + tstamp=updated_time_stamp) + + def _delete_mea_pre(self, context, mea_id): + with context.session.begin(subtransactions=True): + mea_db = self._get_mea_db( + context, mea_id, _ACTIVE_UPDATE_ERROR_DEAD, + constants.PENDING_DELETE) + deleted_mea_db = self._make_mea_dict(mea_db) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=deleted_mea_db['status'], + evt_type=constants.RES_EVT_DELETE, + tstamp=timeutils.utcnow(), details="MEA delete initiated") + return deleted_mea_db + + def _delete_mea_post(self, context, mea_dict, error, soft_delete=True): + mea_id = mea_dict['id'] + with context.session.begin(subtransactions=True): + query = ( + self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(MEA.status == constants.PENDING_DELETE)) + if error: + query.update({'status': constants.ERROR}) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=constants.ERROR, + evt_type=constants.RES_EVT_DELETE, + tstamp=timeutils.utcnow(), + details="MEA Delete ERROR") + else: + if soft_delete: + deleted_time_stamp = timeutils.utcnow() + query.update({'deleted_at': deleted_time_stamp}) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=constants.PENDING_DELETE, + evt_type=constants.RES_EVT_DELETE, + tstamp=deleted_time_stamp, + details="MEA Delete Complete") + else: + (self._model_query(context, MEAAttribute). + filter(MEAAttribute.mea_id == mea_id).delete()) + query.delete() + + # Delete corresponding mead + if mea_dict['mead']['template_source'] == "inline": + self.delete_mead(context, mea_dict["mead_id"]) + + # reference implementation. needs to be overrided by subclass + def create_mea(self, context, mea): + mea_dict = self._create_mea_pre(context, mea) + # start actual creation of hosting mea. + # Waiting for completion of creation should be done backgroundly + # by another thread if it takes a while. + instance_id = uuidutils.generate_uuid() + mea_dict['instance_id'] = instance_id + self._create_mea_post(context, mea_dict['id'], instance_id, None, + mea_dict) + self._create_mea_status(context, mea_dict['id'], + constants.ACTIVE) + return mea_dict + + # reference implementation. needs to be overrided by subclass + def update_mea(self, context, mea_id, mea): + mea_dict = self._update_mea_pre(context, mea_id) + # start actual update of hosting mea + # waiting for completion of update should be done backgroundly + # by another thread if it takes a while + self._update_mea_post(context, mea_id, + constants.ACTIVE, + mea_dict) + return mea_dict + + # reference implementation. needs to be overrided by subclass + def delete_mea(self, context, mea_id, soft_delete=True): + mea_dict = self._delete_mea_pre(context, mea_id) + # start actual deletion of hosting mea. + # Waiting for completion of deletion should be done backgroundly + # by another thread if it takes a while. + self._delete_mea_post(context, + mea_dict, + False, + soft_delete=soft_delete) + + def get_mea(self, context, mea_id, fields=None): + mea_db = self._get_resource(context, MEA, mea_id) + return self._make_mea_dict(mea_db, fields) + + def get_meas(self, context, filters=None, fields=None): + return self._get_collection(context, MEA, self._make_mea_dict, + filters=filters, fields=fields) + + def set_mea_error_status_reason(self, context, mea_id, new_reason): + with context.session.begin(subtransactions=True): + (self._model_query(context, MEA). + filter(MEA.id == mea_id). + update({'error_reason': new_reason})) + + def _mark_mea_status(self, mea_id, exclude_status, new_status): + context = t_context.get_admin_context() + with context.session.begin(subtransactions=True): + try: + mea_db = ( + self._model_query(context, MEA). + filter(MEA.id == mea_id). + filter(~MEA.status.in_(exclude_status)). + with_lockmode('update').one()) + except orm_exc.NoResultFound: + LOG.warning('no mea found %s', mea_id) + return False + + mea_db.update({'status': new_status}) + self._cos_db_plg.create_event( + context, res_id=mea_id, + res_type=constants.RES_TYPE_MEA, + res_state=new_status, + evt_type=constants.RES_EVT_MONITOR, + tstamp=timeutils.utcnow()) + return True + + def _mark_mea_error(self, mea_id): + return self._mark_mea_status( + mea_id, [constants.DEAD], constants.ERROR) + + def _mark_mea_dead(self, mea_id): + exclude_status = [ + constants.DOWN, + constants.PENDING_CREATE, + constants.PENDING_UPDATE, + constants.PENDING_DELETE, + constants.INACTIVE, + constants.ERROR] + return self._mark_mea_status( + mea_id, exclude_status, constants.DEAD) diff --git a/apmec/db/meo/__init__.py b/apmec/db/meo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/meo/meo_db.py b/apmec/db/meo/meo_db.py new file mode 100644 index 0000000..4ccfc7b --- /dev/null +++ b/apmec/db/meo/meo_db.py @@ -0,0 +1,57 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy import schema +from sqlalchemy import sql + +from apmec.db import model_base +from apmec.db import models_v1 +from apmec.db import types + + +class Vim(model_base.BASE, + models_v1.HasId, + models_v1.HasTenant, + models_v1.Audit): + type = sa.Column(sa.String(64), nullable=False) + name = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.Text, nullable=True) + placement_attr = sa.Column(types.Json, nullable=True) + shared = sa.Column(sa.Boolean, default=False, server_default=sql.false( + ), nullable=False) + is_default = sa.Column(sa.Boolean, default=False, server_default=sql.false( + ), nullable=False) + vim_auth = orm.relationship('VimAuth') + status = sa.Column(sa.String(255), nullable=False) + + __table_args__ = ( + schema.UniqueConstraint( + "tenant_id", + "name", + "deleted_at", + name="uniq_vim0tenant_id0name0deleted_at"), + ) + + +class VimAuth(model_base.BASE, models_v1.HasId): + vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), + nullable=False) + password = sa.Column(sa.String(255), nullable=False) + auth_url = sa.Column(sa.String(255), nullable=False) + vim_project = sa.Column(types.Json, nullable=False) + auth_cred = sa.Column(types.Json, nullable=False) diff --git a/apmec/db/meo/meo_db_plugin.py b/apmec/db/meo/meo_db_plugin.py new file mode 100644 index 0000000..d22e1d6 --- /dev/null +++ b/apmec/db/meo/meo_db_plugin.py @@ -0,0 +1,208 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +from oslo_db.exception import DBDuplicateEntry +from oslo_utils import strutils +from oslo_utils import timeutils +from oslo_utils import uuidutils +from sqlalchemy.orm import exc as orm_exc +from sqlalchemy import sql + +from apmec.common import exceptions +from apmec.db.common_services import common_services_db_plugin +from apmec.db import db_base +from apmec.db.meo import meo_db +from apmec.db.mem import mem_db +from apmec.extensions import meo +from apmec import manager +from apmec.plugins.common import constants + + +VIM_ATTRIBUTES = ('id', 'type', 'tenant_id', 'name', 'description', + 'placement_attr', 'shared', 'is_default', + 'created_at', 'updated_at', 'status') + +VIM_AUTH_ATTRIBUTES = ('auth_url', 'vim_project', 'password', 'auth_cred') + + +class MeoPluginDb(meo.MEOPluginBase, db_base.CommonDbMixin): + + def __init__(self): + super(MeoPluginDb, self).__init__() + self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + + @property + def _core_plugin(self): + return manager.ApmecManager.get_plugin() + + def _make_vim_dict(self, vim_db, fields=None, mask_password=True): + res = dict((key, vim_db[key]) for key in VIM_ATTRIBUTES) + vim_auth_db = vim_db.vim_auth + res['auth_url'] = vim_auth_db[0].auth_url + res['vim_project'] = vim_auth_db[0].vim_project + res['auth_cred'] = vim_auth_db[0].auth_cred + res['auth_cred']['password'] = vim_auth_db[0].password + if mask_password: + res['auth_cred'] = strutils.mask_dict_password(res['auth_cred']) + return self._fields(res, fields) + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def _get_resource(self, context, model, id): + try: + return self._get_by_id(context, model, id) + except orm_exc.NoResultFound: + if issubclass(model, meo_db.Vim): + raise meo.VimNotFoundException(vim_id=id) + else: + raise + + def create_vim(self, context, vim): + self._validate_default_vim(context, vim) + vim_cred = vim['auth_cred'] + + try: + with context.session.begin(subtransactions=True): + vim_db = meo_db.Vim( + id=vim.get('id'), + type=vim.get('type'), + tenant_id=vim.get('tenant_id'), + name=vim.get('name'), + description=vim.get('description'), + placement_attr=vim.get('placement_attr'), + is_default=vim.get('is_default'), + status=vim.get('status'), + deleted_at=datetime.min) + context.session.add(vim_db) + vim_auth_db = meo_db.VimAuth( + id=uuidutils.generate_uuid(), + vim_id=vim.get('id'), + password=vim_cred.pop('password'), + vim_project=vim.get('vim_project'), + auth_url=vim.get('auth_url'), + auth_cred=vim_cred) + context.session.add(vim_auth_db) + except DBDuplicateEntry as e: + raise exceptions.DuplicateEntity( + _type="vim", + entry=e.columns) + vim_dict = self._make_vim_dict(vim_db) + self._cos_db_plg.create_event( + context, res_id=vim_dict['id'], + res_type=constants.RES_TYPE_VIM, + res_state=vim_dict['status'], + evt_type=constants.RES_EVT_CREATE, + tstamp=vim_dict['created_at']) + return vim_dict + + def delete_vim(self, context, vim_id, soft_delete=True): + with context.session.begin(subtransactions=True): + vim_db = self._get_resource(context, meo_db.Vim, vim_id) + if soft_delete: + vim_db.update({'deleted_at': timeutils.utcnow()}) + self._cos_db_plg.create_event( + context, res_id=vim_db['id'], + res_type=constants.RES_TYPE_VIM, + res_state=vim_db['status'], + evt_type=constants.RES_EVT_DELETE, + tstamp=vim_db[constants.RES_EVT_DELETED_FLD]) + else: + context.session.query(meo_db.VimAuth).filter_by( + vim_id=vim_id).delete() + context.session.delete(vim_db) + + def is_vim_still_in_use(self, context, vim_id): + with context.session.begin(subtransactions=True): + meas_db = self._model_query(context, mem_db.MEA).filter_by( + vim_id=vim_id).first() + if meas_db is not None: + raise meo.VimInUseException(vim_id=vim_id) + return meas_db + + def get_vim(self, context, vim_id, fields=None, mask_password=True): + vim_db = self._get_resource(context, meo_db.Vim, vim_id) + return self._make_vim_dict(vim_db, mask_password=mask_password) + + def get_vims(self, context, filters=None, fields=None): + return self._get_collection(context, meo_db.Vim, self._make_vim_dict, + filters=filters, fields=fields) + + def update_vim(self, context, vim_id, vim): + self._validate_default_vim(context, vim, vim_id=vim_id) + with context.session.begin(subtransactions=True): + vim_cred = vim['auth_cred'] + vim_project = vim['vim_project'] + vim_db = self._get_resource(context, meo_db.Vim, vim_id) + try: + if 'name' in vim: + vim_db.update({'name': vim.get('name')}) + if 'description' in vim: + vim_db.update({'description': vim.get('description')}) + if 'is_default' in vim: + vim_db.update({'is_default': vim.get('is_default')}) + if 'placement_attr' in vim: + vim_db.update( + {'placement_attr': vim.get('placement_attr')}) + vim_auth_db = (self._model_query( + context, meo_db.VimAuth).filter( + meo_db.VimAuth.vim_id == vim_id).with_lockmode( + 'update').one()) + except orm_exc.NoResultFound: + raise meo.VimNotFoundException(vim_id=vim_id) + vim_auth_db.update({'auth_cred': vim_cred, 'password': + vim_cred.pop('password'), 'vim_project': + vim_project}) + vim_db.update({'updated_at': timeutils.utcnow()}) + self._cos_db_plg.create_event( + context, res_id=vim_db['id'], + res_type=constants.RES_TYPE_VIM, + res_state=vim_db['status'], + evt_type=constants.RES_EVT_UPDATE, + tstamp=vim_db[constants.RES_EVT_UPDATED_FLD]) + + return self.get_vim(context, vim_id) + + def update_vim_status(self, context, vim_id, status): + with context.session.begin(subtransactions=True): + try: + vim_db = (self._model_query(context, meo_db.Vim).filter( + meo_db.Vim.id == vim_id).with_lockmode('update').one()) + except orm_exc.NoResultFound: + raise meo.VimNotFoundException(vim_id=vim_id) + vim_db.update({'status': status, + 'updated_at': timeutils.utcnow()}) + return self._make_vim_dict(vim_db) + + def _validate_default_vim(self, context, vim, vim_id=None): + if not vim.get('is_default'): + return True + try: + vim_db = self._get_default_vim(context) + except orm_exc.NoResultFound: + return True + if vim_id == vim_db.id: + return True + raise meo.VimDefaultDuplicateException(vim_id=vim_db.id) + + def _get_default_vim(self, context): + query = self._model_query(context, meo_db.Vim) + return query.filter(meo_db.Vim.is_default == sql.true()).one() + + def get_default_vim(self, context): + vim_db = self._get_default_vim(context) + return self._make_vim_dict(vim_db, mask_password=False) diff --git a/apmec/db/meo/mes_db.py b/apmec/db/meo/mes_db.py new file mode 100644 index 0000000..d94333f --- /dev/null +++ b/apmec/db/meo/mes_db.py @@ -0,0 +1,384 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +from datetime import datetime + +from oslo_db.exception import DBDuplicateEntry +from oslo_log import log as logging +from oslo_utils import timeutils +from oslo_utils import uuidutils +from six import iteritems + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as orm_exc +from sqlalchemy import schema + +from apmec.common import exceptions +from apmec.db.common_services import common_services_db_plugin +from apmec.db import db_base +from apmec.db import model_base +from apmec.db import models_v1 +from apmec.db import types +from apmec.extensions import meo +from apmec.extensions.meo_plugins import edge_service +from apmec.plugins.common import constants + +LOG = logging.getLogger(__name__) +_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE) +_ACTIVE_UPDATE_ERROR_DEAD = ( + constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE, + constants.ERROR, constants.DEAD) +CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD) + + +########################################################################### +# db tables + +class MESD(model_base.BASE, models_v1.HasId, models_v1.HasTenant, + models_v1.Audit): + """Represents MESD to create MES.""" + + __tablename__ = 'mesd' + # Descriptive name + name = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.Text) + meads = sa.Column(types.Json, nullable=True) + + # Mesd template source - onboarded + template_source = sa.Column(sa.String(255), server_default='onboarded') + + # (key, value) pair to spin up + attributes = orm.relationship('MESDAttribute', + backref='mesd') + + __table_args__ = ( + schema.UniqueConstraint( + "tenant_id", + "name", + name="uniq_mesd0tenant_id0name"), + ) + + +class MESDAttribute(model_base.BASE, models_v1.HasId): + """Represents attributes necessary for creation of mes in (key, value) pair + + """ + + __tablename__ = 'mesd_attribute' + mesd_id = sa.Column(types.Uuid, sa.ForeignKey('mesd.id'), + nullable=False) + key = sa.Column(sa.String(255), nullable=False) + value = sa.Column(sa.TEXT(65535), nullable=True) + + +class MES(model_base.BASE, models_v1.HasId, models_v1.HasTenant, + models_v1.Audit): + """Represents network services that deploys services. + + """ + + __tablename__ = 'mes' + mesd_id = sa.Column(types.Uuid, sa.ForeignKey('mesd.id')) + mesd = orm.relationship('MESD') + + name = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.Text, nullable=True) + + # Dict of MEA details that network service launches + mea_ids = sa.Column(sa.TEXT(65535), nullable=True) + + # Dict of mgmt urls that network servic launches + mgmt_urls = sa.Column(sa.TEXT(65535), nullable=True) + + status = sa.Column(sa.String(64), nullable=False) + vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False) + error_reason = sa.Column(sa.Text, nullable=True) + + __table_args__ = ( + schema.UniqueConstraint( + "tenant_id", + "name", + name="uniq_mes0tenant_id0name"), + ) + + +class MESPluginDb(edge_service.MESPluginBase, db_base.CommonDbMixin): + + def __init__(self): + super(MESPluginDb, self).__init__() + self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + + def _get_resource(self, context, model, id): + try: + return self._get_by_id(context, model, id) + except orm_exc.NoResultFound: + if issubclass(model, MESD): + raise edge_service.MESDNotFound(mesd_id=id) + if issubclass(model, MES): + raise edge_service.MESNotFound(mes_id=id) + else: + raise + + def _get_mes_db(self, context, mes_id, current_statuses, new_status): + try: + mes_db = ( + self._model_query(context, MES). + filter(MES.id == mes_id). + filter(MES.status.in_(current_statuses)). + with_lockmode('update').one()) + except orm_exc.NoResultFound: + raise edge_service.MESNotFound(mes_id=mes_id) + mes_db.update({'status': new_status}) + return mes_db + + def _make_attributes_dict(self, attributes_db): + return dict((attr.key, attr.value) for attr in attributes_db) + + def _make_mesd_dict(self, mesd, fields=None): + res = { + 'attributes': self._make_attributes_dict(mesd['attributes']), + } + key_list = ('id', 'tenant_id', 'name', 'description', + 'created_at', 'updated_at', 'meads', 'template_source') + res.update((key, mesd[key]) for key in key_list) + return self._fields(res, fields) + + def _make_dev_attrs_dict(self, dev_attrs_db): + return dict((arg.key, arg.value) for arg in dev_attrs_db) + + def _make_mes_dict(self, mes_db, fields=None): + LOG.debug('mes_db %s', mes_db) + res = {} + key_list = ('id', 'tenant_id', 'mesd_id', 'name', 'description', + 'mea_ids', 'status', 'mgmt_urls', 'error_reason', + 'vim_id', 'created_at', 'updated_at') + res.update((key, mes_db[key]) for key in key_list) + return self._fields(res, fields) + + def create_mesd(self, context, mesd): + meads = mesd['meads'] + mesd = mesd['mesd'] + LOG.debug('mesd %s', mesd) + tenant_id = self._get_tenant_id_for_create(context, mesd) + template_source = mesd.get('template_source') + + try: + with context.session.begin(subtransactions=True): + mesd_id = uuidutils.generate_uuid() + mesd_db = MESD( + id=mesd_id, + tenant_id=tenant_id, + name=mesd.get('name'), + meads=meads, + description=mesd.get('description'), + deleted_at=datetime.min, + template_source=template_source) + context.session.add(mesd_db) + for (key, value) in mesd.get('attributes', {}).items(): + attribute_db = MESDAttribute( + id=uuidutils.generate_uuid(), + mesd_id=mesd_id, + key=key, + value=value) + context.session.add(attribute_db) + except DBDuplicateEntry as e: + raise exceptions.DuplicateEntity( + _type="mesd", + entry=e.columns) + LOG.debug('mesd_db %(mesd_db)s %(attributes)s ', + {'mesd_db': mesd_db, + 'attributes': mesd_db.attributes}) + mesd_dict = self._make_mesd_dict(mesd_db) + LOG.debug('mesd_dict %s', mesd_dict) + self._cos_db_plg.create_event( + context, res_id=mesd_dict['id'], + res_type=constants.RES_TYPE_MESD, + res_state=constants.RES_EVT_ONBOARDED, + evt_type=constants.RES_EVT_CREATE, + tstamp=mesd_dict[constants.RES_EVT_CREATED_FLD]) + return mesd_dict + + def delete_mesd(self, + context, + mesd_id, + soft_delete=True): + with context.session.begin(subtransactions=True): + mess_db = context.session.query(MES).filter_by( + mesd_id=mesd_id).first() + if mess_db is not None and mess_db.deleted_at is None: + raise meo.MESDInUse(mesd_id=mesd_id) + + mesd_db = self._get_resource(context, MESD, + mesd_id) + if soft_delete: + mesd_db.update({'deleted_at': timeutils.utcnow()}) + self._cos_db_plg.create_event( + context, res_id=mesd_db['id'], + res_type=constants.RES_TYPE_MESD, + res_state=constants.RES_EVT_NA_STATE, + evt_type=constants.RES_EVT_DELETE, + tstamp=mesd_db[constants.RES_EVT_DELETED_FLD]) + else: + context.session.query(MESDAttribute).filter_by( + mesd_id=mesd_id).delete() + context.session.delete(mesd_db) + + def get_mesd(self, context, mesd_id, fields=None): + mesd_db = self._get_resource(context, MESD, mesd_id) + return self._make_mesd_dict(mesd_db) + + def get_mesds(self, context, filters, fields=None): + if ('template_source' in filters) and \ + (filters['template_source'][0] == 'all'): + filters.pop('template_source') + return self._get_collection(context, MESD, + self._make_mesd_dict, + filters=filters, fields=fields) + + # reference implementation. needs to be overrided by subclass + def create_mes(self, context, mes): + LOG.debug('mes %s', mes) + mes = mes['mes'] + tenant_id = self._get_tenant_id_for_create(context, mes) + mesd_id = mes['mesd_id'] + vim_id = mes['vim_id'] + name = mes.get('name') + mes_id = uuidutils.generate_uuid() + try: + with context.session.begin(subtransactions=True): + mesd_db = self._get_resource(context, MESD, + mesd_id) + mes_db = MES(id=mes_id, + tenant_id=tenant_id, + name=name, + description=mesd_db.description, + mea_ids=None, + status=constants.PENDING_CREATE, + mgmt_urls=None, + mesd_id=mesd_id, + vim_id=vim_id, + error_reason=None, + deleted_at=datetime.min) + context.session.add(mes_db) + except DBDuplicateEntry as e: + raise exceptions.DuplicateEntity( + _type="mes", + entry=e.columns) + evt_details = "MES UUID assigned." + self._cos_db_plg.create_event( + context, res_id=mes_id, + res_type=constants.RES_TYPE_mes, + res_state=constants.PENDING_CREATE, + evt_type=constants.RES_EVT_CREATE, + tstamp=mes_db[constants.RES_EVT_CREATED_FLD], + details=evt_details) + return self._make_mes_dict(mes_db) + + def create_mes_post(self, context, mes_id, mistral_obj, + mead_dict, error_reason): + LOG.debug('mes ID %s', mes_id) + output = ast.literal_eval(mistral_obj.output) + mgmt_urls = dict() + mea_ids = dict() + if len(output) > 0: + for mead_name, mead_val in iteritems(mead_dict): + for instance in mead_val['instances']: + if 'mgmt_url_' + instance in output: + mgmt_urls[instance] = ast.literal_eval( + output['mgmt_url_' + instance].strip()) + mea_ids[instance] = output['mea_id_' + instance] + mea_ids = str(mea_ids) + mgmt_urls = str(mgmt_urls) + + if not mea_ids: + mea_ids = None + if not mgmt_urls: + mgmt_urls = None + status = constants.ACTIVE if mistral_obj.state == 'SUCCESS' \ + else constants.ERROR + with context.session.begin(subtransactions=True): + mes_db = self._get_resource(context, MES, + mes_id) + mes_db.update({'mea_ids': mea_ids}) + mes_db.update({'mgmt_urls': mgmt_urls}) + mes_db.update({'status': status}) + mes_db.update({'error_reason': error_reason}) + mes_db.update({'updated_at': timeutils.utcnow()}) + mes_dict = self._make_mes_dict(mes_db) + self._cos_db_plg.create_event( + context, res_id=mes_dict['id'], + res_type=constants.RES_TYPE_mes, + res_state=constants.RES_EVT_NA_STATE, + evt_type=constants.RES_EVT_UPDATE, + tstamp=mes_dict[constants.RES_EVT_UPDATED_FLD]) + return mes_dict + + # reference implementation. needs to be overrided by subclass + def delete_mes(self, context, mes_id): + with context.session.begin(subtransactions=True): + mes_db = self._get_mes_db( + context, mes_id, _ACTIVE_UPDATE_ERROR_DEAD, + constants.PENDING_DELETE) + deleted_mes_db = self._make_mes_dict(mes_db) + self._cos_db_plg.create_event( + context, res_id=mes_id, + res_type=constants.RES_TYPE_mes, + res_state=deleted_mes_db['status'], + evt_type=constants.RES_EVT_DELETE, + tstamp=timeutils.utcnow(), details="MES delete initiated") + return deleted_mes_db + + def delete_mes_post(self, context, mes_id, mistral_obj, + error_reason, soft_delete=True): + mes = self.get_mes(context, mes_id) + mesd_id = mes.get('mesd_id') + with context.session.begin(subtransactions=True): + query = ( + self._model_query(context, MES). + filter(MES.id == mes_id). + filter(MES.status == constants.PENDING_DELETE)) + if mistral_obj and mistral_obj.state == 'ERROR': + query.update({'status': constants.ERROR}) + self._cos_db_plg.create_event( + context, res_id=mes_id, + res_type=constants.RES_TYPE_mes, + res_state=constants.ERROR, + evt_type=constants.RES_EVT_DELETE, + tstamp=timeutils.utcnow(), + details="MES Delete ERROR") + else: + if soft_delete: + deleted_time_stamp = timeutils.utcnow() + query.update({'deleted_at': deleted_time_stamp}) + self._cos_db_plg.create_event( + context, res_id=mes_id, + res_type=constants.RES_TYPE_mes, + res_state=constants.PENDING_DELETE, + evt_type=constants.RES_EVT_DELETE, + tstamp=deleted_time_stamp, + details="mes Delete Complete") + else: + query.delete() + template_db = self._get_resource(context, MESD, mesd_id) + if template_db.get('template_source') == 'inline': + self.delete_mesd(context, mesd_id) + + def get_mes(self, context, mes_id, fields=None): + mes_db = self._get_resource(context, MES, mes_id) + return self._make_mes_dict(mes_db) + + def get_mess(self, context, filters=None, fields=None): + return self._get_collection(context, MES, + self._make_mes_dict, + filters=filters, fields=fields) diff --git a/apmec/db/migration/README b/apmec/db/migration/README new file mode 100644 index 0000000..29255d2 --- /dev/null +++ b/apmec/db/migration/README @@ -0,0 +1,87 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Mark McClain (DreamHost) + +The migrations in the alembic/versions contain the changes needed to migrate +from older Apmec releases to newer versions. A migration occurs by executing +a script that details the changes needed to upgrade the database. The +migration scripts are ordered so that multiple scripts can run sequentially to +update the database. The scripts are executed by Apmec's migration wrapper +which uses the Alembic library to manage the migration. Apmec supports +migration from Folsom or later. + + +If you are a deployer or developer and want to migrate from Folsom to Grizzly +or later you must first add version tracking to the database: + +$ apmec-db-manage --config-file /path/to/apmec.conf \ + --config-file /path/to/plugin/config.ini stamp folsom + +You can then upgrade to the latest database version via: +$ apmec-db-manage --config-file /path/to/apmec.conf \ + --config-file /path/to/plugin/config.ini upgrade head + +To check the current database version: +$ apmec-db-manage --config-file /path/to/apmec.conf \ + --config-file /path/to/plugin/config.ini current + +To create a script to run the migration offline: +$ apmec-db-manage --config-file /path/to/apmec.conf \ + --config-file /path/to/plugin/config.ini upgrade head --sql + +To run the offline migration between specific migration versions: +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini upgrade \ +: --sql + +Upgrade the database incrementally: +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini upgrade --delta <# of revs> + + +DEVELOPERS: +A database migration script is required when you submit a change to Apmec +that alters the database model definition. The migration script is a special +python file that includes code to upgrade the database to match the +changes in the model definition. Alembic will execute these scripts in order to +provide a linear migration path between revision. The apmec-db-manage command +can be used to generate migration template for you to complete. The operations +in the template are those supported by the Alembic migration library. + +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini revision \ +-m "description of revision" \ +--autogenerate + +This generates a prepopulated template with the changes needed to match the +database state with the models. You should inspect the autogenerated template +to ensure that the proper models have been altered. + +In rare circumstances, you may want to start with an empty migration template +and manually author the changes necessary for an upgrade. You can +create a blank file via: + +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini revision \ +-m "description of revision" + +The migration timeline should remain linear so that there is a clear path when +upgrading. To verify that the timeline does branch, you can run this command: +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini check_migration + +If the migration path does branch, you can find the branch point via: +$ apmec-db-manage --config-file /path/to/apmec.conf \ +--config-file /path/to/plugin/config.ini history diff --git a/apmec/db/migration/__init__.py b/apmec/db/migration/__init__.py new file mode 100644 index 0000000..4617f29 --- /dev/null +++ b/apmec/db/migration/__init__.py @@ -0,0 +1,95 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from alembic import op +import contextlib +import sqlalchemy as sa +from sqlalchemy.engine import reflection + + +def alter_enum(table, column, enum_type, nullable): + bind = op.get_bind() + engine = bind.engine + if engine.name == 'postgresql': + values = {'table': table, + 'column': column, + 'name': enum_type.name} + op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) + enum_type.create(bind, checkfirst=False) + op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " + "old_%(column)s" % values) + op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) + op.execute("UPDATE %(table)s SET %(column)s = " + "old_%(column)s::text::%(name)s" % values) + op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) + op.execute("DROP TYPE old_%(name)s" % values) + else: + op.alter_column(table, column, type_=enum_type, + existing_nullable=nullable) + + +def create_foreign_key_constraint(table_name, fk_constraints): + for fk in fk_constraints: + op.create_foreign_key( + constraint_name=fk['name'], + source_table=table_name, + referent_table=fk['referred_table'], + local_cols=fk['constrained_columns'], + remote_cols=fk['referred_columns'], + ondelete=fk['options'].get('ondelete') + ) + + +def drop_foreign_key_constraint(table_name, fk_constraints): + for fk in fk_constraints: + op.drop_constraint( + constraint_name=fk['name'], + table_name=table_name, + type_='foreignkey' + ) + + +@contextlib.contextmanager +def modify_foreign_keys_constraint(table_names): + inspector = reflection.Inspector.from_engine(op.get_bind()) + try: + for table in table_names: + fk_constraints = inspector.get_foreign_keys(table) + drop_foreign_key_constraint(table, fk_constraints) + yield + finally: + for table in table_names: + fk_constraints = inspector.get_foreign_keys(table) + create_foreign_key_constraint(table, fk_constraints) + + +def modify_foreign_keys_constraint_with_col_change( + table_name, old_local_col, new_local_col, existing_type, + nullable=False): + inspector = reflection.Inspector.from_engine(op.get_bind()) + fk_constraints = inspector.get_foreign_keys(table_name) + for fk in fk_constraints: + if old_local_col in fk['constrained_columns']: + drop_foreign_key_constraint(table_name, [fk]) + op.alter_column(table_name, old_local_col, + new_column_name=new_local_col, + existing_type=existing_type, + nullable=nullable) + fk_constraints = inspector.get_foreign_keys(table_name) + for fk in fk_constraints: + for i in range(len(fk['constrained_columns'])): + if old_local_col == fk['constrained_columns'][i]: + fk['constrained_columns'][i] = new_local_col + create_foreign_key_constraint(table_name, [fk]) + break diff --git a/apmec/db/migration/alembic.ini b/apmec/db/migration/alembic.ini new file mode 100644 index 0000000..9134bae --- /dev/null +++ b/apmec/db/migration/alembic.ini @@ -0,0 +1,52 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = %(here)s/alembic_migrations + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# default to an empty string because the Apmec migration cli will +# extract the correct value and set it programatically before alemic is fully +# invoked. +sqlalchemy.url = + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/apmec/db/migration/alembic_migrations/__init__.py b/apmec/db/migration/alembic_migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/migration/alembic_migrations/env.py b/apmec/db/migration/alembic_migrations/env.py new file mode 100644 index 0000000..fe4383b --- /dev/null +++ b/apmec/db/migration/alembic_migrations/env.py @@ -0,0 +1,84 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from logging import config as logging_config + +from alembic import context +from sqlalchemy import create_engine, pool + +from apmec.db import model_base + + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config +apmec_config = config.apmec_config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +logging_config.fileConfig(config.config_file_name) + +# set the target for 'autogenerate' support +target_metadata = model_base.BASE.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with either a URL + or an Engine. + + Calls to context.execute() here emit the given string to the + script output. + + """ + kwargs = dict() + if apmec_config.database.connection: + kwargs['url'] = apmec_config.database.connection + else: + kwargs['dialect_name'] = apmec_config.database.engine + context.configure(**kwargs) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + engine = create_engine( + apmec_config.database.connection, + poolclass=pool.NullPool) + + connection = engine.connect() + context.configure( + connection=connection, + target_metadata=target_metadata + ) + + try: + with context.begin_transaction(): + context.run_migrations() + finally: + connection.close() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/apmec/db/migration/alembic_migrations/script.py.mako b/apmec/db/migration/alembic_migrations/script.py.mako new file mode 100644 index 0000000..590d5ab --- /dev/null +++ b/apmec/db/migration/alembic_migrations/script.py.mako @@ -0,0 +1,36 @@ +# Copyright ${create_date.year} OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision} +Create Date: ${create_date} + +""" + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +from apmec.db import migration + + +def upgrade(active_plugins=None, options=None): + ${upgrades if upgrades else "pass"} diff --git a/apmec/db/migration/alembic_migrations/versions/000632983ada_add_template_source_column.py b/apmec/db/migration/alembic_migrations/versions/000632983ada_add_template_source_column.py new file mode 100644 index 0000000..9fb3f73 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/000632983ada_add_template_source_column.py @@ -0,0 +1,33 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add template_source column + +Revision ID: 000632983ada +Revises: 0ae5b1ce3024 +Create Date: 2016-12-22 20:30:03.931290 + +""" + +# revision identifiers, used by Alembic. +revision = '000632983ada' +down_revision = '0ad3bbce1c19' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('mead', sa.Column('template_source', sa.String(length=255))) diff --git a/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c18_create_of_network_service_tables.py b/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c18_create_of_network_service_tables.py new file mode 100644 index 0000000..25d7884 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c18_create_of_network_service_tables.py @@ -0,0 +1,73 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""create of Network service tables + +Revision ID: 0ad3bbce1c18 +Revises: 0ae5b1ce3024 +Create Date: 2016-12-17 19:41:01.906138 + +""" + +# revision identifiers, used by Alembic. +revision = '0ad3bbce1c18' +down_revision = '8f7145914cb0' + +from alembic import op +import sqlalchemy as sa + +from apmec.db import types + + +def upgrade(active_plugins=None, options=None): + op.create_table('mesd', + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('id', types.Uuid(length=36), nullable=False), + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.Column('meads', types.Json, nullable=True), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table('mes', + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('id', types.Uuid(length=36), nullable=False), + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('mesd_id', types.Uuid(length=36), nullable=True), + sa.Column('vim_id', sa.String(length=64), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.Column('mea_ids', sa.TEXT(length=65535), nullable=True), + sa.Column('mgmt_urls', sa.TEXT(length=65535), nullable=True), + sa.Column('status', sa.String(length=64), nullable=False), + sa.Column('error_reason', sa.Text(), nullable=True), + sa.ForeignKeyConstraint(['mesd_id'], ['mesd.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table('mesd_attribute', + sa.Column('id', types.Uuid(length=36), nullable=False), + sa.Column('mesd_id', types.Uuid(length=36), nullable=False), + sa.Column('key', sa.String(length=255), nullable=False), + sa.Column('value', sa.TEXT(length=65535), nullable=True), + sa.ForeignKeyConstraint(['mesd_id'], ['mesd.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) diff --git a/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c19_increase_vim_password_size.py b/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c19_increase_vim_password_size.py new file mode 100644 index 0000000..4296f90 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/0ad3bbce1c19_increase_vim_password_size.py @@ -0,0 +1,35 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""increase_vim_password_size + +Revision ID: 0ad3bbce1c19 +Revises: 0ad3bbce1c19 +Create Date: 2017-01-17 09:50:46.296206 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '0ad3bbce1c19' +down_revision = '0ad3bbce1c18' + + +def upgrade(active_plugins=None, options=None): + op.alter_column('vimauths', + 'password', + type_=sa.String(length=255)) diff --git a/apmec/db/migration/alembic_migrations/versions/0ae5b1ce3024_unique_constraint_name.py b/apmec/db/migration/alembic_migrations/versions/0ae5b1ce3024_unique_constraint_name.py new file mode 100644 index 0000000..28ce0ca --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/0ae5b1ce3024_unique_constraint_name.py @@ -0,0 +1,58 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""unique_constraint_name + +Revision ID: 0ae5b1ce3024 +Revises: 507122918800 +Create Date: 2016-09-15 16:27:08.736673 + +""" + +# revision identifiers, used by Alembic. +revision = '0ae5b1ce3024' +down_revision = '507122918800' + +from alembic import op +import sqlalchemy as sa + + +def _migrate_duplicate_names(table): + + meta = sa.MetaData(bind=op.get_bind()) + t = sa.Table(table, meta, autoload=True) + + session = sa.orm.Session(bind=op.get_bind()) + with session.begin(subtransactions=True): + dup_names = session.query(t.c.name).group_by( + t.c.name).having(sa.func.count() > 1).all() + if dup_names: + for name in dup_names: + duplicate_obj_query = session.query(t).filter(t.c.name == name[ + 0]).all() + for dup_obj in duplicate_obj_query: + name = dup_obj.name[:242] if dup_obj.name > 242 else \ + dup_obj.name + new_name = '{0}-{1}'.format(name, dup_obj.id[-12:]) + session.execute(t.update().where( + t.c.id == dup_obj.id).values(name=new_name)) + session.commit() + + +def upgrade(active_plugins=None, options=None): + + _migrate_duplicate_names('mea') + _migrate_duplicate_names('mead') + _migrate_duplicate_names('vims') diff --git a/apmec/db/migration/alembic_migrations/versions/12a57080b277_add_service_db.py b/apmec/db/migration/alembic_migrations/versions/12a57080b277_add_service_db.py new file mode 100644 index 0000000..ddaf32a --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/12a57080b277_add_service_db.py @@ -0,0 +1,98 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add Service related dbs + +Revision ID: 12a57080b277 +Revises: 5958429bcb3c +Create Date: 2015-11-26 15:18:19.623170 + +""" + +# revision identifiers, used by Alembic. +revision = '12a57080b277' +down_revision = '5958429bcb3c' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + # commands auto generated by Alembic - please adjust! # + op.create_table( + 'servicetypes', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('template_id', sa.String(length=36), nullable=False), + sa.Column('service_type', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['template_id'], ['devicetemplates.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table( + 'deviceservicecontexts', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('device_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('subnet_id', sa.String(length=36), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('role', sa.String(length=255), nullable=True), + sa.Column('index', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table( + 'serviceinstances', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('service_type_id', sa.String(length=36), nullable=True), + sa.Column('service_table_id', sa.String(length=36), nullable=True), + sa.Column('managed_by_user', sa.Boolean(), nullable=True), + sa.Column('mgmt_driver', sa.String(length=255), nullable=True), + sa.Column('mgmt_url', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['service_type_id'], ['servicetypes.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table( + 'servicecontexts', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('service_instance_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('subnet_id', sa.String(length=36), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('role', sa.String(length=255), nullable=True), + sa.Column('index', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['service_instance_id'], + ['serviceinstances.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table( + 'servicedevicebindings', + sa.Column('service_instance_id', sa.String(length=36), nullable=False), + sa.Column('device_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ), + sa.ForeignKeyConstraint(['service_instance_id'], + ['serviceinstances.id'], ), + sa.PrimaryKeyConstraint('service_instance_id', 'device_id'), + mysql_engine='InnoDB' + ) + # end Alembic commands # diff --git a/apmec/db/migration/alembic_migrations/versions/12a57080b278_alter_devices.py b/apmec/db/migration/alembic_migrations/versions/12a57080b278_alter_devices.py new file mode 100644 index 0000000..72acd31 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/12a57080b278_alter_devices.py @@ -0,0 +1,42 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Alter devices + +Revision ID: 12a57080b278 +Revises: 12a57080b277 +Create Date: 2015-11-26 15:18:19.623170 + +""" + +# revision identifiers, used by Alembic. +revision = '12a57080b278' +down_revision = '12a57080b277' + +from alembic import op +from sqlalchemy.dialects import mysql +from apmec.db import migration + + +def upgrade(active_plugins=None, options=None): + # commands auto generated by Alembic - please adjust! # + fk_constraint = ('deviceattributes', ) + with migration.modify_foreign_keys_constraint(fk_constraint): + op.alter_column(u'deviceattributes', 'device_id', + existing_type=mysql.VARCHAR(length=255), + nullable=False) + op.alter_column(u'devices', 'status', existing_type=mysql.VARCHAR( + length=255), nullable=False) + # end Alembic commands #s diff --git a/apmec/db/migration/alembic_migrations/versions/13c0e0661015_add_descrition_to_vnf.py b/apmec/db/migration/alembic_migrations/versions/13c0e0661015_add_descrition_to_vnf.py new file mode 100644 index 0000000..1942e00 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/13c0e0661015_add_descrition_to_vnf.py @@ -0,0 +1,35 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add descrition to mea + +Revision ID: 13c0e0661015 +Revises: 4c31092895b8 +Create Date: 2015-05-18 18:47:22.180962 + +""" + +# revision identifiers, used by Alembic. +revision = '13c0e0661015' +down_revision = '4c31092895b8' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('devices', + sa.Column('description', sa.String(255), + nullable=True, server_default='')) diff --git a/apmec/db/migration/alembic_migrations/versions/1c6b0d82afcd_servicevm_framework.py b/apmec/db/migration/alembic_migrations/versions/1c6b0d82afcd_servicevm_framework.py new file mode 100644 index 0000000..5122133 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/1c6b0d82afcd_servicevm_framework.py @@ -0,0 +1,72 @@ +# Copyright 2013 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add tables for apmec framework + +Revision ID: 1c6b0d82afcd +Revises: 2db5203cb7a9 +Create Date: 2013-11-25 18:06:13.980301 + +""" + +# revision identifiers, used by Alembic. +revision = '1c6b0d82afcd' +down_revision = None + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.create_table( + 'devicetemplates', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('infra_driver', sa.String(length=255), nullable=True), + sa.Column('mgmt_driver', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + op.create_table( + 'devicetemplateattributes', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('template_id', sa.String(length=36), nullable=False), + sa.Column('key', sa.String(length=255), nullable=False), + sa.Column('value', sa.String(length=4096), nullable=True), + sa.ForeignKeyConstraint(['template_id'], ['devicetemplates.id'], ), + sa.PrimaryKeyConstraint('id'), + ) + op.create_table( + 'devices', + sa.Column('id', sa.String(length=255), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('template_id', sa.String(length=36), nullable=True), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.Column('mgmt_url', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['template_id'], ['devicetemplates.id'], ), + sa.PrimaryKeyConstraint('id'), + ) + op.create_table( + 'deviceattributes', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('device_id', sa.String(length=255)), + sa.Column('key', sa.String(length=255), nullable=False), + sa.Column('value', sa.String(length=4096), nullable=True), + sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ), + sa.PrimaryKeyConstraint('id'), + ) diff --git a/apmec/db/migration/alembic_migrations/versions/22f5385a3d3f_add_status_to_vims.py b/apmec/db/migration/alembic_migrations/versions/22f5385a3d3f_add_status_to_vims.py new file mode 100644 index 0000000..c180de6 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/22f5385a3d3f_add_status_to_vims.py @@ -0,0 +1,35 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add status to vims + +Revision ID: 22f5385a3d3f +Revises: 5246a6bd410f +Create Date: 2016-05-12 13:29:30.615609 + +""" + +# revision identifiers, used by Alembic. +revision = '22f5385a3d3f' +down_revision = '5f88e86b35c7' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('vims', + sa.Column('status', sa.String(255), + nullable=False, server_default='')) diff --git a/apmec/db/migration/alembic_migrations/versions/22f5385a3d4f_remove_proxydb.py b/apmec/db/migration/alembic_migrations/versions/22f5385a3d4f_remove_proxydb.py new file mode 100644 index 0000000..17bc823 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/22f5385a3d4f_remove_proxydb.py @@ -0,0 +1,35 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove proxydb related + +Revision ID: 22f5385a3d4f +Revises: d4f265e8eb9d +Create Date: 2016-08-01 15:47:51.161749 + +""" + +# revision identifiers, used by Alembic. +revision = '22f5385a3d4f' +down_revision = 'd4f265e8eb9d' + +from alembic import op + + +def upgrade(active_plugins=None, options=None): + # commands auto generated by Alembic - please adjust! # + op.drop_table('proxymgmtports') + op.drop_table('proxyserviceports') + # end Alembic commands # diff --git a/apmec/db/migration/alembic_migrations/versions/22f5385a3d50_rename_devicedb.py b/apmec/db/migration/alembic_migrations/versions/22f5385a3d50_rename_devicedb.py new file mode 100644 index 0000000..1f07942 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/22f5385a3d50_rename_devicedb.py @@ -0,0 +1,52 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""rename device db tables + +Revision ID: 22f5385a3d50 +Revises: 22f5385a3d4f +Create Date: 2016-08-01 15:47:51.161749 + +""" + +# revision identifiers, used by Alembic. +revision = '22f5385a3d50' +down_revision = '22f5385a3d4f' + +from alembic import op +import sqlalchemy as sa + +from apmec.db import migration + + +def upgrade(active_plugins=None, options=None): + # commands auto generated by Alembic - please adjust! # + op.rename_table('devicetemplates', 'mead') + op.rename_table('devicetemplateattributes', 'mead_attribute') + op.rename_table('devices', 'mea') + op.rename_table('deviceattributes', 'mea_attribute') + migration.modify_foreign_keys_constraint_with_col_change( + 'mead_attribute', 'template_id', 'mead_id', + sa.String(length=36)) + migration.modify_foreign_keys_constraint_with_col_change( + 'servicetypes', 'template_id', 'mead_id', + sa.String(length=36)) + migration.modify_foreign_keys_constraint_with_col_change( + 'mea', 'template_id', 'mead_id', + sa.String(length=36)) + migration.modify_foreign_keys_constraint_with_col_change( + 'mea_attribute', 'device_id', 'mea_id', + sa.String(length=36)) + # end Alembic commands # diff --git a/apmec/db/migration/alembic_migrations/versions/24bec5f211c7_alter_value_in_deviceattributes.py b/apmec/db/migration/alembic_migrations/versions/24bec5f211c7_alter_value_in_deviceattributes.py new file mode 100644 index 0000000..3d9fad0 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/24bec5f211c7_alter_value_in_deviceattributes.py @@ -0,0 +1,34 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Alter value in deviceattributes + +Revision ID: 24bec5f211c7 +Revises: 2774a42c7163 +Create Date: 2016-01-24 19:21:03.410029 + +""" + +# revision identifiers, used by Alembic. +revision = '24bec5f211c7' +down_revision = '2774a42c7163' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.alter_column('deviceattributes', + 'value', type_=sa.TEXT(65535), nullable=True) diff --git a/apmec/db/migration/alembic_migrations/versions/2774a42c7163_remove_service_related.py b/apmec/db/migration/alembic_migrations/versions/2774a42c7163_remove_service_related.py new file mode 100644 index 0000000..047181d --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/2774a42c7163_remove_service_related.py @@ -0,0 +1,37 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove service related + +Revision ID: 2774a42c7163 +Revises: 12a57080b278 +Create Date: 2015-11-26 15:47:51.161749 + +""" + +# revision identifiers, used by Alembic. +revision = '2774a42c7163' +down_revision = '12a57080b278' + +from alembic import op + + +def upgrade(active_plugins=None, options=None): + # commands auto generated by Alembic - please adjust! # + op.drop_table('servicecontexts') + op.drop_table('deviceservicecontexts') + op.drop_table('servicedevicebindings') + op.drop_table('serviceinstances') + # end Alembic commands # diff --git a/apmec/db/migration/alembic_migrations/versions/2ff0a0e360f1_audit_support.py b/apmec/db/migration/alembic_migrations/versions/2ff0a0e360f1_audit_support.py new file mode 100644 index 0000000..557babd --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/2ff0a0e360f1_audit_support.py @@ -0,0 +1,37 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""audit support + +Revision ID: 2ff0a0e360f1 +Revises: 22f5385a3d50 +Create Date: 2016-06-02 15:14:31.888078 + +""" + +# revision identifiers, used by Alembic. +revision = '2ff0a0e360f1' +down_revision = '22f5385a3d50' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + for table in ['vims', 'mea', 'mead']: + op.add_column(table, + sa.Column('created_at', sa.DateTime(), nullable=True)) + op.add_column(table, + sa.Column('updated_at', sa.DateTime(), nullable=True)) diff --git a/apmec/db/migration/alembic_migrations/versions/31acbaeb8299_change_vim_shared_property_to_false.py b/apmec/db/migration/alembic_migrations/versions/31acbaeb8299_change_vim_shared_property_to_false.py new file mode 100644 index 0000000..6ac4ebc --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/31acbaeb8299_change_vim_shared_property_to_false.py @@ -0,0 +1,36 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""change_vim_shared_property_to_false + +Revision ID: 31acbaeb8299 +Revises: e7993093baf1 +Create Date: 2017-05-30 23:46:20.034085 + +""" + +# revision identifiers, used by Alembic. +revision = '31acbaeb8299' +down_revision = 'e7993093baf1' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.alter_column('vims', 'shared', + existing_type=sa.Boolean(), + server_default=sa.text('false'), + nullable=False) diff --git a/apmec/db/migration/alembic_migrations/versions/354de64ba129_set_mandatory_columns_not_null.py b/apmec/db/migration/alembic_migrations/versions/354de64ba129_set_mandatory_columns_not_null.py new file mode 100644 index 0000000..31f0bf4 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/354de64ba129_set_mandatory_columns_not_null.py @@ -0,0 +1,37 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set-mandatory-columns-not-null + +Revision ID: 354de64ba129 +Revises: b07673bb8654 +Create Date: 2016-06-02 10:05:22.299780 + +""" + +# revision identifiers, used by Alembic. +revision = '354de64ba129' +down_revision = 'b07673bb8654' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + for table in ['devices', 'devicetemplates', 'vims', 'servicetypes']: + op.alter_column(table, + 'tenant_id', + existing_type=sa.String(64), + nullable=False) diff --git a/apmec/db/migration/alembic_migrations/versions/4c31092895b8_remove_service_instance.py b/apmec/db/migration/alembic_migrations/versions/4c31092895b8_remove_service_instance.py new file mode 100644 index 0000000..5ba349e --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/4c31092895b8_remove_service_instance.py @@ -0,0 +1,30 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""empty message + +Revision ID: 4c31092895b8 +Revises: 81ffa86020d +Create Date: 2014-08-01 11:48:10.319498 + +""" + +# revision identifiers, used by Alembic. +revision = '4c31092895b8' +down_revision = '81ffa86020d' + + +def upgrade(active_plugins=None, options=None): + pass diff --git a/apmec/db/migration/alembic_migrations/versions/4ee19c8a6d0a_audit_support_events.py b/apmec/db/migration/alembic_migrations/versions/4ee19c8a6d0a_audit_support_events.py new file mode 100644 index 0000000..fb17982 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/4ee19c8a6d0a_audit_support_events.py @@ -0,0 +1,45 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""audit_support_events + +Revision ID: 4ee19c8a6d0a +Revises: acf941e54075 +Create Date: 2016-06-07 03:16:53.513392 + +""" + +# revision identifiers, used by Alembic. +revision = '4ee19c8a6d0a' +down_revision = '941b5a6fff9e' + +from alembic import op +import sqlalchemy as sa + +from apmec.db import types + + +def upgrade(active_plugins=None, options=None): + op.create_table('events', + sa.Column('id', sa.Integer, nullable=False, autoincrement=True), + sa.Column('resource_id', types.Uuid, nullable=False), + sa.Column('resource_state', sa.String(64), nullable=False), + sa.Column('resource_type', sa.String(64), nullable=False), + sa.Column('event_type', sa.String(64), nullable=False), + sa.Column('timestamp', sa.DateTime, nullable=False), + sa.Column('event_details', types.Json), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) diff --git a/apmec/db/migration/alembic_migrations/versions/507122918800_adds_vnffg.py b/apmec/db/migration/alembic_migrations/versions/507122918800_adds_vnffg.py new file mode 100644 index 0000000..e1b1f0c --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/507122918800_adds_vnffg.py @@ -0,0 +1,145 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""adds_NFY + +Revision ID: 507122918800 +Revises: 4ee19c8a6d0a +Create Date: 2016-07-29 21:48:18.816277 + +""" + +# revision identifiers, used by Alembic. +revision = '507122918800' +down_revision = '4ee19c8a6d0a' + +import sqlalchemy as sa + +from alembic import op +from apmec.db.types import Json + + +def upgrade(active_plugins=None, options=None): + + op.create_table( + 'NANYtemplates', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('template', Json), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + + op.create_table( + 'NANYs', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('NANYD_id', sa.String(length=36), nullable=False), + sa.Column('status', sa.String(length=255), nullable=False), + sa.Column('mea_mapping', Json), + sa.ForeignKeyConstraint(['NANYD_id'], ['NANYtemplates.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + + op.create_table( + 'NANYnfps', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('NANY_id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('status', sa.String(length=255), nullable=False), + sa.Column('path_id', sa.String(length=255), nullable=False), + sa.Column('symmetrical', sa.Boolean, default=False), + sa.ForeignKeyConstraint(['NANY_id'], ['NANYs.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + + op.create_table( + 'NANYchains', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.Column('nfp_id', sa.String(length=36), nullable=False), + sa.Column('status', sa.String(length=255), nullable=False), + sa.Column('path_id', sa.String(length=255), nullable=False), + sa.Column('symmetrical', sa.Boolean, default=False), + sa.Column('chain', Json), + sa.ForeignKeyConstraint(['nfp_id'], ['NANYnfps.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + + op.create_table( + 'NANYclassifiers', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=64), nullable=False), + sa.Column('nfp_id', sa.String(length=36), nullable=False), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.Column('chain_id', sa.String(length=36), nullable=False), + sa.Column('status', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['nfp_id'], ['NANYnfps.id'], ), + sa.ForeignKeyConstraint(['chain_id'], ['NANYchains.id'], ), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + + op.create_table( + 'aclmatchcriterias', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('NANYc_id', sa.String(length=36), nullable=False), + sa.Column('eth_src', sa.String(length=36), nullable=True), + sa.Column('eth_dst', sa.String(length=36), nullable=True), + sa.Column('eth_type', sa.String(length=36), nullable=True), + sa.Column('vlan_id', sa.Integer, nullable=True), + sa.Column('vlan_pcp', sa.Integer, nullable=True), + sa.Column('mpls_label', sa.Integer, nullable=True), + sa.Column('mpls_tc', sa.Integer, nullable=True), + sa.Column('ip_dscp', sa.Integer, nullable=True), + sa.Column('ip_ecn', sa.Integer, nullable=True), + sa.Column('ip_src_prefix', sa.String(length=36), nullable=True), + sa.Column('ip_dst_prefix', sa.String(length=36), nullable=True), + sa.Column('source_port_min', sa.Integer, nullable=True), + sa.Column('source_port_max', sa.Integer, nullable=True), + sa.Column('destination_port_min', sa.Integer, nullable=True), + sa.Column('destination_port_max', sa.Integer, nullable=True), + sa.Column('ip_proto', sa.Integer, nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('network_src_port_id', sa.String(length=36), nullable=True), + sa.Column('network_dst_port_id', sa.String(length=36), nullable=True), + sa.Column('tenant_id', sa.String(length=64), nullable=True), + sa.Column('icmpv4_type', sa.Integer, nullable=True), + sa.Column('icmpv4_code', sa.Integer, nullable=True), + sa.Column('arp_op', sa.Integer, nullable=True), + sa.Column('arp_spa', sa.Integer, nullable=True), + sa.Column('arp_tpa', sa.Integer, nullable=True), + sa.Column('arp_sha', sa.Integer, nullable=True), + sa.Column('arp_tha', sa.Integer, nullable=True), + sa.Column('ipv6_src', sa.String(36), nullable=True), + sa.Column('ipv6_dst', sa.String(36), nullable=True), + sa.Column('ipv6_flabel', sa.Integer, nullable=True), + sa.Column('icmpv6_type', sa.Integer, nullable=True), + sa.Column('icmpv6_code', sa.Integer, nullable=True), + sa.Column('ipv6_nd_target', sa.String(36), nullable=True), + sa.Column('ipv6_nd_sll', sa.String(36), nullable=True), + sa.Column('ipv6_nd_tll', sa.String(36), nullable=True), + sa.ForeignKeyConstraint(['NANYc_id'], ['NANYclassifiers.id'], ), + sa.PrimaryKeyConstraint('id'), + ) diff --git a/apmec/db/migration/alembic_migrations/versions/5246a6bd410f_multisite_vim.py b/apmec/db/migration/alembic_migrations/versions/5246a6bd410f_multisite_vim.py new file mode 100644 index 0000000..6d06de6 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/5246a6bd410f_multisite_vim.py @@ -0,0 +1,60 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""multisite_vim + +Revision ID: 5246a6bd410f +Revises: 24bec5f211c7 +Create Date: 2016-03-22 14:05:15.129330 + +""" + +# revision identifiers, used by Alembic. +revision = '5246a6bd410f' +down_revision = '24bec5f211c7' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.create_table('vims', + sa.Column('id', sa.String(length=255), nullable=False), + sa.Column('type', sa.String(length=255), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('placement_attr', sa.PickleType(), nullable=True), + sa.Column('shared', sa.Boolean(), server_default=sa.text(u'true'), + nullable=False), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB' + ) + op.create_table('vimauths', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vim_id', sa.String(length=255), nullable=False), + sa.Column('password', sa.String(length=128), nullable=False), + sa.Column('auth_url', sa.String(length=255), nullable=False), + sa.Column('vim_project', sa.PickleType(), nullable=False), + sa.Column('auth_cred', sa.PickleType(), nullable=False), + sa.ForeignKeyConstraint(['vim_id'], ['vims.id'], ), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('auth_url') + ) + op.add_column(u'devices', sa.Column('placement_attr', sa.PickleType(), + nullable=True)) + op.add_column(u'devices', sa.Column('vim_id', sa.String(length=36), + nullable=False)) + op.create_foreign_key(None, 'devices', 'vims', ['vim_id'], ['id']) diff --git a/apmec/db/migration/alembic_migrations/versions/5958429bcb3c_modify_datatype_of_value.py b/apmec/db/migration/alembic_migrations/versions/5958429bcb3c_modify_datatype_of_value.py new file mode 100644 index 0000000..67a3d40 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/5958429bcb3c_modify_datatype_of_value.py @@ -0,0 +1,34 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""modify datatype of value + +Revision ID: 5958429bcb3c +Revises: 13c0e0661015 +Create Date: 2015-10-05 17:09:24.710961 + +""" + +# revision identifiers, used by Alembic. +revision = '5958429bcb3c' +down_revision = '13c0e0661015' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.alter_column('devicetemplateattributes', + 'value', type_=sa.TEXT(65535), nullable=True) diff --git a/apmec/db/migration/alembic_migrations/versions/5f88e86b35c7_make_vnfd_vnf_vim_name_mandatory.py b/apmec/db/migration/alembic_migrations/versions/5f88e86b35c7_make_vnfd_vnf_vim_name_mandatory.py new file mode 100644 index 0000000..8ce49ae --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/5f88e86b35c7_make_vnfd_vnf_vim_name_mandatory.py @@ -0,0 +1,41 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""make MEAD/MEA/VIM name mandatory + +Revision ID: 5f88e86b35c7 +Revises: 354de64ba129 +Create Date: 2016-06-14 11:16:16.303343 + +""" + +# revision identifiers, used by Alembic. +revision = '5f88e86b35c7' +down_revision = '354de64ba129' + +from alembic import op +from sqlalchemy.dialects import mysql + + +def upgrade(active_plugins=None, options=None): + op.alter_column('devices', 'name', + existing_type=mysql.VARCHAR(length=255), + nullable=False) + op.alter_column('devicetemplates', 'name', + existing_type=mysql.VARCHAR(length=255), + nullable=False) + op.alter_column('vims', 'name', + existing_type=mysql.VARCHAR(length=255), + nullable=False) diff --git a/apmec/db/migration/alembic_migrations/versions/6e56d4474b2a_blob_to_json_text.py b/apmec/db/migration/alembic_migrations/versions/6e56d4474b2a_blob_to_json_text.py new file mode 100644 index 0000000..8469f5f --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/6e56d4474b2a_blob_to_json_text.py @@ -0,0 +1,55 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""blob-to-json-text + +Revision ID: 6e56d4474b2a +Revises: f958f58e5daa +Create Date: 2016-06-01 09:50:46.296206 + +""" + +import json +import pickle + +from alembic import op +import sqlalchemy as sa + +from apmec.db import types + +# revision identifiers, used by Alembic. +revision = '6e56d4474b2a' +down_revision = 'f958f58e5daa' + + +def _migrate_data(table, column_name): + meta = sa.MetaData(bind=op.get_bind()) + t = sa.Table(table, meta, autoload=True) + + for r in t.select().execute(): + stmt = t.update().where(t.c.id == r.id).values( + {column_name: json.dumps(pickle.loads(getattr(r, column_name)))}) + op.execute(stmt) + + op.alter_column(table, + column_name, + type_=types.Json) + + +def upgrade(active_plugins=None, options=None): + _migrate_data('vims', 'placement_attr') + _migrate_data('vimauths', 'vim_project') + _migrate_data('vimauths', 'auth_cred') + _migrate_data('devices', 'placement_attr') diff --git a/apmec/db/migration/alembic_migrations/versions/81ffa86020d_rpc_proxy.py b/apmec/db/migration/alembic_migrations/versions/81ffa86020d_rpc_proxy.py new file mode 100644 index 0000000..465e211 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/81ffa86020d_rpc_proxy.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""rpc_proxy + +Revision ID: 81ffa86020d +Revises: 1c6b0d82afcd +Create Date: 2014-03-19 15:50:11.712686 + +""" + +# revision identifiers, used by Alembic. +revision = '81ffa86020d' +down_revision = '1c6b0d82afcd' + +# Change to ['*'] if this migration applies to all plugins + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.create_table( + 'proxymgmtports', + sa.Column('device_id', sa.String(255)), + sa.Column('port_id', sa.String(36), nullable=False), + sa.Column('dst_transport_url', sa.String(255)), + sa.Column('svr_proxy_id', sa.String(36)), + sa.Column('svr_mes_proxy_id', sa.String(36)), + sa.Column('clt_proxy_id', sa.String(36)), + sa.Column('clt_mes_proxy_id', sa.String(36)), + sa.PrimaryKeyConstraint('device_id'), + ) + op.create_table( + 'proxyserviceports', + sa.Column('service_instance_id', sa.String(255)), + sa.Column('svr_proxy_id', sa.String(36)), + sa.Column('svr_mes_proxy_id', sa.String(36)), + sa.Column('clt_proxy_id', sa.String(36)), + sa.Column('clt_mes_proxy_id', sa.String(36)), + sa.PrimaryKeyConstraint('service_instance_id'), + ) diff --git a/apmec/db/migration/alembic_migrations/versions/8f7145914cb0_remove_infra_driver_column.py b/apmec/db/migration/alembic_migrations/versions/8f7145914cb0_remove_infra_driver_column.py new file mode 100644 index 0000000..68a2266 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/8f7145914cb0_remove_infra_driver_column.py @@ -0,0 +1,32 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove infra_driver column + +Revision ID: 8f7145914cb0 +Revises: 0ae5b1ce3024 +Create Date: 2016-12-08 17:28:26.609343 + +""" + +# revision identifiers, used by Alembic. +revision = '8f7145914cb0' +down_revision = '0ae5b1ce3024' + +from alembic import op + + +def upgrade(active_plugins=None, options=None): + op.drop_column('mead', 'infra_driver') diff --git a/apmec/db/migration/alembic_migrations/versions/941b5a6fff9e_enable_soft_delete.py b/apmec/db/migration/alembic_migrations/versions/941b5a6fff9e_enable_soft_delete.py new file mode 100644 index 0000000..b330b0d --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/941b5a6fff9e_enable_soft_delete.py @@ -0,0 +1,39 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""enable soft delete + +Revision ID: 941b5a6fff9e +Revises: 2ff0a0e360f1 +Create Date: 2016-06-06 10:12:49.787430 + +""" + +# revision identifiers, used by Alembic. +revision = '941b5a6fff9e' +down_revision = '2ff0a0e360f1' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + for table in ['vims', 'mea', 'mead']: + op.add_column(table, + sa.Column('deleted_at', sa.DateTime(), nullable=True)) + + # unique constraint is taken care by the meo_db plugin to support + # soft deletion of vim + op.drop_index('auth_url', table_name='vimauths') diff --git a/apmec/db/migration/alembic_migrations/versions/HEAD b/apmec/db/migration/alembic_migrations/versions/HEAD new file mode 100644 index 0000000..656f806 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/HEAD @@ -0,0 +1 @@ +e9a1e47fb0b5 diff --git a/apmec/db/migration/alembic_migrations/versions/README b/apmec/db/migration/alembic_migrations/versions/README new file mode 100644 index 0000000..5b7a981 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/README @@ -0,0 +1,5 @@ +This directory contains the migration scripts for the Apmec project. Please +see the README in apmec/db/migration on how to use and generate new +migrations. + + diff --git a/apmec/db/migration/alembic_migrations/versions/acf941e54075_add_error_reason_to_device.py b/apmec/db/migration/alembic_migrations/versions/acf941e54075_add_error_reason_to_device.py new file mode 100644 index 0000000..55efd0b --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/acf941e54075_add_error_reason_to_device.py @@ -0,0 +1,34 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add error_reason to device + +Revision ID: acf941e54075 +Revises: 5246a6bd410f +Create Date: 2016-04-07 23:53:56.623647 + +""" + +# revision identifiers, used by Alembic. +revision = 'acf941e54075' +down_revision = '5246a6bd410f' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('devices', sa.Column('error_reason', + sa.Text(), nullable=True)) diff --git a/apmec/db/migration/alembic_migrations/versions/b07673bb8654_set_status_type_tenant_id_length.py b/apmec/db/migration/alembic_migrations/versions/b07673bb8654_set_status_type_tenant_id_length.py new file mode 100644 index 0000000..dfd112b --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/b07673bb8654_set_status_type_tenant_id_length.py @@ -0,0 +1,54 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set-status-type-tenant-id-length + +Revision ID: b07673bb8654 +Revises: c7cde2f45f82 +Create Date: 2016-06-01 12:46:07.499279 + +""" + +# revision identifiers, used by Alembic. +revision = 'b07673bb8654' +down_revision = 'c7cde2f45f82' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + for table in ['devices', 'devicetemplates', 'vims', 'servicetypes']: + op.alter_column(table, + 'tenant_id', + type_=sa.String(64), nullable=False) + op.alter_column('vims', + 'type', + type_=sa.String(64)) + op.alter_column('devices', + 'instance_id', + type_=sa.String(64), nullable=True) + op.alter_column('devices', + 'status', + type_=sa.String(64)) + op.alter_column('proxymgmtports', + 'device_id', + type_=sa.String(64), nullable=False) + op.alter_column('proxyserviceports', + 'service_instance_id', + type_=sa.String(64), nullable=False) + op.alter_column('servicetypes', + 'service_type', + type_=sa.String(64)) diff --git a/apmec/db/migration/alembic_migrations/versions/c256228ed37c_unique_constraint_on_name_and_id.py b/apmec/db/migration/alembic_migrations/versions/c256228ed37c_unique_constraint_on_name_and_id.py new file mode 100644 index 0000000..8704f3d --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/c256228ed37c_unique_constraint_on_name_and_id.py @@ -0,0 +1,40 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""unique constraint on name and id + +Revision ID: c256228ed37c +Revises: 8f7145914cb0 +Create Date: 2017-03-01 12:28:58.467900 + +""" + +# revision identifiers, used by Alembic. +revision = 'c256228ed37c' +down_revision = 'ef14f8026327' + +from alembic import op + + +def _add_unique_constraint(table): + op.create_unique_constraint( + constraint_name='uniq_%s0tenant_id0name' % table, + table_name=table, + columns=['tenant_id', 'name']) + + +def upgrade(active_plugins=None, options=None): + for table in ['mea', 'mead', 'vims', 'mes', 'mesd']: + _add_unique_constraint(table) diff --git a/apmec/db/migration/alembic_migrations/versions/c7cde2f45f82_set_description_to_text.py b/apmec/db/migration/alembic_migrations/versions/c7cde2f45f82_set_description_to_text.py new file mode 100644 index 0000000..9513b3a --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/c7cde2f45f82_set_description_to_text.py @@ -0,0 +1,41 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set-description-to-text + +Revision ID: c7cde2f45f82 +Revises: 6e56d4474b2a +Create Date: 2016-06-01 10:58:43.022668 + +""" + +# revision identifiers, used by Alembic. +revision = 'c7cde2f45f82' +down_revision = '6e56d4474b2a' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.alter_column('vims', + 'description', + type_=sa.Text) + op.alter_column('devices', + 'description', + type_=sa.Text) + op.alter_column('devicetemplates', + 'description', + type_=sa.Text) diff --git a/apmec/db/migration/alembic_migrations/versions/d4f265e8eb9d_add_default_to_vim.py b/apmec/db/migration/alembic_migrations/versions/d4f265e8eb9d_add_default_to_vim.py new file mode 100644 index 0000000..933323d --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/d4f265e8eb9d_add_default_to_vim.py @@ -0,0 +1,37 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add default to vim + +Revision ID: d4f265e8eb9d +Revises: 22f5385a3d3f +Create Date: 2016-07-14 11:07:28.115225 + +""" + +# revision identifiers, used by Alembic. +revision = 'd4f265e8eb9d' +down_revision = '22f5385a3d3f' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + + +def upgrade(active_plugins=None, options=None): + op.add_column('vims', sa.Column('is_default', + sa.Boolean(), + server_default=sql.false(), + nullable=False)) diff --git a/apmec/db/migration/alembic_migrations/versions/e7993093baf1_add_unique_constraint_on_deleted_at.py b/apmec/db/migration/alembic_migrations/versions/e7993093baf1_add_unique_constraint_on_deleted_at.py new file mode 100644 index 0000000..7ac5b4a --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/e7993093baf1_add_unique_constraint_on_deleted_at.py @@ -0,0 +1,47 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add unique constraint on deleted_at + +Revision ID: e7993093baf1 +Revises: c256228ed37c +Create Date: 2017-04-19 10:57:22.157326 + +""" + +# revision identifiers, used by Alembic. +revision = 'e7993093baf1' +down_revision = 'c256228ed37c' + +from alembic import op + + +def _drop_unique_constraint(table): + op.drop_constraint( + constraint_name='uniq_%s0tenant_id0name' % table, + table_name=table, type_='unique') + + +def _add_unique_constraint(table): + op.create_unique_constraint( + constraint_name='uniq_%s0tenant_id0name0deleted_at' % table, + table_name=table, + columns=['tenant_id', 'name', 'deleted_at']) + + +def upgrade(active_plugins=None, options=None): + for table in ['mea', 'mead', 'vims', 'mes', 'mesd']: + _drop_unique_constraint(table) + _add_unique_constraint(table) diff --git a/apmec/db/migration/alembic_migrations/versions/e8918cda6433_add_attributes_to_vnffg.py b/apmec/db/migration/alembic_migrations/versions/e8918cda6433_add_attributes_to_vnffg.py new file mode 100644 index 0000000..559f32a --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/e8918cda6433_add_attributes_to_vnffg.py @@ -0,0 +1,34 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add_attributes_to_NANY + +Revision ID: e8918cda6433 +Revises: 000632983ada +Create Date: 2017-02-09 00:11:08.081746 + +""" + +# revision identifiers, used by Alembic. +revision = 'e8918cda6433' +down_revision = '000632983ada' + +from alembic import op +import sqlalchemy as sa +from apmec.db.types import Json + + +def upgrade(active_plugins=None, options=None): + op.add_column('NANYs', sa.Column('attributes', Json)) diff --git a/apmec/db/migration/alembic_migrations/versions/e9a1e47fb0b5_add_template_source_status_to_vnffgtemplate_and_nsd.py b/apmec/db/migration/alembic_migrations/versions/e9a1e47fb0b5_add_template_source_status_to_vnffgtemplate_and_nsd.py new file mode 100644 index 0000000..7a3385f --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/e9a1e47fb0b5_add_template_source_status_to_vnffgtemplate_and_nsd.py @@ -0,0 +1,45 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add onboarded status for NANYD and mesd + +Revision ID: e9a1e47fb0b5 +Revises: f5c1c3b0f6b4 +Create Date: 2017-07-17 10:02:37.572587 + +""" + +# revision identifiers, used by Alembic. +revision = 'e9a1e47fb0b5' +down_revision = 'f5c1c3b0f6b4' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('NANYtemplates', + sa.Column('template_source', + sa.String(length=255), + server_default='onboarded')) + op.execute("UPDATE NANYtemplates set template_source='onboarded'" + " WHERE template_source is NULL") + + op.add_column('mesd', + sa.Column('template_source', + sa.String(length=255), + server_default='onboarded')) + op.execute("UPDATE mesd set template_source='onboarded'" + " WHERE template_source is NULL") diff --git a/apmec/db/migration/alembic_migrations/versions/ef14f8026327_add_default_onboarded_template_source.py b/apmec/db/migration/alembic_migrations/versions/ef14f8026327_add_default_onboarded_template_source.py new file mode 100644 index 0000000..2537a39 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/ef14f8026327_add_default_onboarded_template_source.py @@ -0,0 +1,36 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add default onboarded template source + +Revision ID: ef14f8026327 +Revises: e8918cda6433 +Create Date: 2017-02-10 12:10:09.606460 + +""" + +# revision identifiers, used by Alembic. +revision = 'ef14f8026327' +down_revision = 'e8918cda6433' + +from alembic import op + + +def upgrade(active_plugins=None, options=None): + op.alter_column('mead', 'template_source', + server_default="onboarded") + + op.execute("UPDATE mead set template_source='onboarded'" + " WHERE template_source is NULL") diff --git a/apmec/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py b/apmec/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py new file mode 100644 index 0000000..ae1bca3 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py @@ -0,0 +1,46 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set default value for deleted_at + +Revision ID: f5c1c3b0f6b4 +Revises: 31acbaeb8299 +Create Date: 2017-06-23 03:03:12.200270 + +""" + +# revision identifiers, used by Alembic. +revision = 'f5c1c3b0f6b4' +down_revision = '31acbaeb8299' + +from alembic import op +from datetime import datetime + + +def upgrade(active_plugins=None, options=None): + op.execute(("UPDATE mead set deleted_at='%s'" + " WHERE deleted_at is NULL") % datetime.min) + + op.execute(("UPDATE mea set deleted_at='%s'" + " WHERE deleted_at is NULL") % datetime.min) + + op.execute(("UPDATE vims set deleted_at='%s'" + " WHERE deleted_at is NULL") % datetime.min) + + op.execute(("UPDATE mes set deleted_at='%s'" + " WHERE deleted_at is NULL") % datetime.min) + + op.execute(("UPDATE mesd set deleted_at='%s'" + " WHERE deleted_at is NULL") % datetime.min) diff --git a/apmec/db/migration/alembic_migrations/versions/f958f58e5daa_uuid_consistency.py b/apmec/db/migration/alembic_migrations/versions/f958f58e5daa_uuid_consistency.py new file mode 100644 index 0000000..73d9691 --- /dev/null +++ b/apmec/db/migration/alembic_migrations/versions/f958f58e5daa_uuid_consistency.py @@ -0,0 +1,66 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""uuid consistency + +Revision ID: f958f58e5daa +Revises: acf941e54075 +Create Date: 2016-05-28 07:13:07.125562 + +""" + +# revision identifiers, used by Alembic. +revision = 'f958f58e5daa' +down_revision = 'acf941e54075' + + +from alembic import op + +from apmec.db import migration +from apmec.db import types + +FK_MAP = {'vims': ('vimauths', 'devices'), 'devices': ('deviceattributes', + 'proxymgmtports'), 'devicetemplates': ('devices', 'servicetypes', + 'devicetemplateattributes')} + + +def upgrade(active_plugins=None, options=None): + + pk_id_tables = ('vims', 'vimauths', 'devices', 'deviceattributes', + 'servicetypes', 'devicetemplates', + 'devicetemplateattributes') + for table in pk_id_tables: + with migration.modify_foreign_keys_constraint(FK_MAP.get(table, [])): + op.alter_column(table, 'id', type_=types.Uuid, + nullable=False) + + fk_template_id_tables = ('devices', 'servicetypes', + 'devicetemplateattributes') + for table in fk_template_id_tables: + with migration.modify_foreign_keys_constraint(fk_template_id_tables): + op.alter_column(table, 'template_id', type_=types.Uuid, + nullable=False) + + fk_vim_id_tables = ('devices', 'vimauths') + for table in fk_vim_id_tables: + with migration.modify_foreign_keys_constraint(fk_vim_id_tables): + op.alter_column(table, 'vim_id', type_=types.Uuid, + nullable=False) + + fk_device_id_tables = ('deviceattributes', 'proxymgmtports') + for table in fk_device_id_tables: + with migration.modify_foreign_keys_constraint(fk_device_id_tables): + op.alter_column(table, 'device_id', type_=types.Uuid, + nullable=False) diff --git a/apmec/db/migration/cli.py b/apmec/db/migration/cli.py new file mode 100644 index 0000000..d32ddb2 --- /dev/null +++ b/apmec/db/migration/cli.py @@ -0,0 +1,177 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from alembic import command as alembic_command +from alembic import config as alembic_config +from alembic import script as alembic_script +from alembic import util as alembic_util +from oslo_config import cfg + +from apmec.db.migration.models import head # noqa +from apmec.db.migration import purge_tables + +HEAD_FILENAME = 'HEAD' + + +_db_opts = [ + cfg.StrOpt('connection', + deprecated_name='sql_connection', + default='', + secret=True, + help=_('URL to database')), + cfg.StrOpt('engine', + default='', + help=_('Database engine')), +] + +CONF = cfg.ConfigOpts() +CONF.register_cli_opts(_db_opts, 'database') + + +def do_alembic_command(config, cmd, *args, **kwargs): + try: + getattr(alembic_command, cmd)(config, *args, **kwargs) + except alembic_util.CommandError as e: + alembic_util.err(str(e)) + + +def do_check_migration(config, cmd): + do_alembic_command(config, 'branches') + validate_head_file(config) + + +def do_upgrade(config, cmd): + if not CONF.command.revision and not CONF.command.delta: + raise SystemExit(_('You must provide a revision or relative delta')) + + revision = CONF.command.revision + + if CONF.command.delta: + revision = '+%s' % str(CONF.command.delta) + else: + revision = CONF.command.revision + + do_alembic_command(config, cmd, revision, sql=CONF.command.sql) + + +def do_stamp(config, cmd): + do_alembic_command(config, cmd, + CONF.command.revision, + sql=CONF.command.sql) + + +def do_revision(config, cmd): + do_alembic_command(config, cmd, + message=CONF.command.message, + autogenerate=CONF.command.autogenerate, + sql=CONF.command.sql) + update_head_file(config) + + +def validate_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + if (os.path.isfile(head_path) and + open(head_path).read().strip() == script.get_current_head()): + return + else: + alembic_util.err(_('HEAD file does not match migration timeline head')) + + +def update_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + with open(head_path, 'w+') as f: + f.write(script.get_current_head()) + + +def purge_deleted(config, cmd): + """Remove database records that have been previously soft deleted.""" + purge_tables.purge_deleted(config.apmec_config, + CONF.command.resource, + CONF.command.age, + CONF.command.granularity) + + +def add_command_parsers(subparsers): + for name in ['current', 'history', 'branches']: + parser = subparsers.add_parser(name) + parser.set_defaults(func=do_alembic_command) + + parser = subparsers.add_parser('check_migration') + parser.set_defaults(func=do_check_migration) + + parser = subparsers.add_parser('upgrade') + parser.add_argument('--delta', type=int) + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision', nargs='?') + parser.set_defaults(func=do_upgrade) + + parser = subparsers.add_parser('stamp') + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision') + parser.set_defaults(func=do_stamp) + + parser = subparsers.add_parser('revision') + parser.add_argument('-m', '--message') + parser.add_argument('--autogenerate', action='store_true') + parser.add_argument('--sql', action='store_true') + parser.set_defaults(func=do_revision) + + parser = subparsers.add_parser('purge_deleted') + parser.set_defaults(func=purge_deleted) + # positional parameter + parser.add_argument( + 'resource', + choices=['all', 'events', 'mea', 'mead', 'vims'], + help=_('Resource name for which deleted entries are to be purged.')) + # optional parameter, can be skipped. default='90' + parser.add_argument('-a', '--age', nargs='?', default='90', + help=_('How long to preserve deleted data,' + 'defaults to 90')) + # optional parameter, can be skipped. default='days' + parser.add_argument( + '-g', '--granularity', default='days', + choices=['days', 'hours', 'minutes', 'seconds'], + help=_('Granularity to use for age argument, defaults to days.')) + + +command_opt = cfg.SubCommandOpt('command', + title='Command', + help=_('Available commands'), + handler=add_command_parsers) + +CONF.register_cli_opt(command_opt) + + +def main(): + config = alembic_config.Config( + os.path.join(os.path.dirname(__file__), 'alembic.ini') + ) + config.set_main_option('script_location', + 'apmec.db.migration:alembic_migrations') + # attach the Apmec conf to the Alembic conf + config.apmec_config = CONF + + CONF() + # TODO(gongysh) enable logging + CONF.command.func(config, CONF.command.name) diff --git a/apmec/db/migration/models/__init__.py b/apmec/db/migration/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/db/migration/models/head.py b/apmec/db/migration/models/head.py new file mode 100644 index 0000000..bf6a85d --- /dev/null +++ b/apmec/db/migration/models/head.py @@ -0,0 +1,28 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The module provides all database models at current HEAD. + +Its purpose is to create comparable metadata with current database schema. +Based on this comparison database can be healed with healing migration. + +""" + +from apmec.db import model_base + + +def get_metadata(): + return model_base.BASE.metadata diff --git a/apmec/db/migration/purge_tables.py b/apmec/db/migration/purge_tables.py new file mode 100644 index 0000000..18eeaf7 --- /dev/null +++ b/apmec/db/migration/purge_tables.py @@ -0,0 +1,121 @@ +# Copyright 2016 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import sqlalchemy +from sqlalchemy import and_ +from sqlalchemy import create_engine, pool +from sqlalchemy import inspect + +from oslo_utils import timeutils + +from apmec.common import exceptions + + +GRANULARITY = {'days': 86400, 'hours': 3600, 'minutes': 60, 'seconds': 1} + + +def _generate_associated_tables_map(inspector): + assoc_map = {} + table_names = inspector.get_table_names() + for t in table_names: + fk_list = inspector.get_foreign_keys(t) + for fk in fk_list: + k = str(fk['referred_table']) + v = str(fk['constrained_columns'][0]) + if k not in assoc_map.keys(): + assoc_map[k] = {str(t): v} + else: + assoc_map[k][str(t)] = v + assoc_keys = assoc_map.keys() + for k, v in assoc_map.items(): + for k1 in v.keys(): + if k1 in assoc_keys: + del assoc_map[k][k1] + return assoc_map + + +def _purge_resource_tables(t, meta, engine, time_line, assoc_map): + table_load = sqlalchemy.Table(t, meta, autoload=True) + table_del_query = table_load.delete().where( + table_load.c.deleted_at <= time_line) + if t in assoc_map.keys(): + select_id_query = sqlalchemy.select([table_load.c.id]).where( + table_load.c.deleted_at <= time_line) + resource_ids = [i[0] for i in list(engine.execute(select_id_query))] + if resource_ids: + for key, val in assoc_map[t].items(): + assoc_table_load = sqlalchemy.Table(key, meta, autoload=True) + assoc_table_del_query = assoc_table_load.delete().where( + assoc_table_load.c[val].in_(resource_ids)) + engine.execute(assoc_table_del_query) + engine.execute(table_del_query) + + +def _purge_events_table(meta, engine, time_line): + tname = "events" + event_table_load = sqlalchemy.Table(tname, meta, autoload=True) + event_select_query = sqlalchemy.select( + [event_table_load.c.resource_id] + ).where( + and_(event_table_load.c.event_type == 'DELETE', + event_table_load.c.timestamp <= time_line + ) + ) + resource_ids = [i[0] for i in list(engine.execute(event_select_query))] + if resource_ids: + event_delete_query = event_table_load.delete().where( + event_table_load.c.resource_id.in_(resource_ids) + ) + engine.execute(event_delete_query) + + +def purge_deleted(apmec_config, table_name, age, granularity='days'): + try: + age = int(age) + except ValueError: + msg = _("'%s' - age should be an integer") % age + raise exceptions.InvalidInput(error_message=msg) + if age < 0: + msg = _("'%s' - age should be a positive integer") % age + raise exceptions.InvalidInput(error_message=msg) + + if granularity not in GRANULARITY.keys(): + msg = _("'%s' granularity should be days, hours, minutes, " + "or seconds") % granularity + raise exceptions.InvalidInput(error_message=msg) + + age *= GRANULARITY[granularity] + + time_line = timeutils.utcnow() - datetime.timedelta(seconds=age) + engine = get_engine(apmec_config) + meta = sqlalchemy.MetaData() + meta.bind = engine + inspector = inspect(engine) + assoc_map = _generate_associated_tables_map(inspector) + + if table_name == 'events': + _purge_events_table(meta, engine, time_line) + elif table_name == 'all': + _purge_events_table(meta, engine, time_line) + for t in ['mea', 'mead', 'vims']: + _purge_resource_tables(t, meta, engine, time_line, assoc_map) + else: + _purge_resource_tables(table_name, meta, engine, time_line, assoc_map) + + +def get_engine(apmec_config): + return create_engine(apmec_config.database.connection, + poolclass=pool.NullPool) diff --git a/apmec/db/model_base.py b/apmec/db/model_base.py new file mode 100644 index 0000000..0fe44f7 --- /dev/null +++ b/apmec/db/model_base.py @@ -0,0 +1,51 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_db.sqlalchemy import models +from sqlalchemy.ext import declarative +from sqlalchemy import orm + + +class ApmecBase(models.ModelBase): + """Base class for Apmec Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + + def __iter__(self): + self._i = iter(orm.object_mapper(self).columns) + return self + + def next(self): + n = next(self._i).name + return n, getattr(self, n) + + def __repr__(self): + """sqlalchemy based automatic __repr__ method.""" + items = ['%s=%r' % (col.name, getattr(self, col.name)) + for col in self.__table__.columns] + return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__, + self.__class__.__name__, + id(self), ', '.join(items)) + + +class ApmecBaseV1(ApmecBase): + + @declarative.declared_attr + def __tablename__(cls): + # NOTE(jkoelker) use the pluralized name of the class as the table + return cls.__name__.lower() + 's' + + +BASE = declarative.declarative_base(cls=ApmecBaseV1) diff --git a/apmec/db/models_v1.py b/apmec/db/models_v1.py new file mode 100644 index 0000000..f166a54 --- /dev/null +++ b/apmec/db/models_v1.py @@ -0,0 +1,43 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import timeutils +from oslo_utils import uuidutils +import sqlalchemy as sa + +from apmec.db import types + + +class HasTenant(object): + """Tenant mixin, add to subclasses that have a tenant.""" + + tenant_id = sa.Column(sa.String(64), nullable=False) + + +class HasId(object): + """id mixin, add to subclasses that have an id.""" + + id = sa.Column(types.Uuid, + primary_key=True, + default=uuidutils.generate_uuid) + + +class Audit(object): + """Helps to add time stamp for create, update and delete actions. """ + + created_at = sa.Column(sa.DateTime, + default=lambda: timeutils.utcnow()) + updated_at = sa.Column(sa.DateTime) + deleted_at = sa.Column(sa.DateTime) diff --git a/apmec/db/sqlalchemyutils.py b/apmec/db/sqlalchemyutils.py new file mode 100644 index 0000000..1642c4d --- /dev/null +++ b/apmec/db/sqlalchemyutils.py @@ -0,0 +1,103 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six import moves +import sqlalchemy +from sqlalchemy.orm.properties import RelationshipProperty + +from apmec.common import exceptions as n_exc + + +def paginate_query(query, model, limit, sorts, marker_obj=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort key, specified by sorts. + (If sort keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort key, this would be easy: sort_key > X. + With a compound-values sort key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + The reason of didn't use OFFSET clause was it don't scale, please refer + discussion at https://lists.launchpad.net/openstack/msg02547.html + + We also have to cope with different sort directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sorts: array of attributes and direction by which results should + be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + if not sorts: + return query + + # A primary key must be specified in sort keys + assert not (limit and + len(set(dict(sorts).keys()) & + set(model.__table__.primary_key.columns.keys())) == 0) + + # Add sorting + for sort_key, sort_direction in sorts: + sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc + try: + sort_key_attr = getattr(model, sort_key) + except AttributeError: + # Extension attribute doesn't support for sorting. Because it + # existed in attr_info, it will be catched at here + msg = _("%s is invalid attribute for sort_key") % sort_key + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + if isinstance(sort_key_attr.property, RelationshipProperty): + msg = _("The attribute '%(attr)s' is reference to other " + "resource, can't used by sort " + "'%(resource)s'") % {'attr': sort_key, + 'resource': model.__tablename__} + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker_obj: + marker_values = [getattr(marker_obj, sort[0]) for sort in sorts] + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i, sort in enumerate(sorts): + crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j]) + for j in moves.xrange(i)] + model_attr = getattr(model, sort[0]) + if sort[1]: + crit_attrs.append((model_attr > marker_values[i])) + else: + crit_attrs.append((model_attr < marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit: + query = query.limit(limit) + + return query diff --git a/apmec/db/types.py b/apmec/db/types.py new file mode 100644 index 0000000..ae563ab --- /dev/null +++ b/apmec/db/types.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import uuid + +from sqlalchemy.types import String +from sqlalchemy.types import Text +from sqlalchemy.types import TypeDecorator + + +class Uuid(TypeDecorator): + impl = String(36) + + def process_bind_param(self, value, dialect): + if value is not None: + try: + uuid.UUID(value, version=4) + except ValueError: + raise ValueError( + "Invalid format. It should be in UUID v4 format") + + return value + + def process_result_value(self, value, dialect): + return value + + +class Json(TypeDecorator): + impl = Text + + def process_bind_param(self, value, dialect): + return json.dumps(value) + + def process_result_value(self, value, dialect): + if value is None: + return None + return json.loads(value) diff --git a/apmec/extensions/__init__.py b/apmec/extensions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/extensions/common_services.py b/apmec/extensions/common_services.py new file mode 100644 index 0000000..1bc3e08 --- /dev/null +++ b/apmec/extensions/common_services.py @@ -0,0 +1,153 @@ +# Copyright 2016 Brocade Communications Systems Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions +from apmec.api.v1 import attributes as attr +from apmec.api.v1 import resource_helper +from apmec.common import exceptions +from apmec.plugins.common import constants +from apmec.services import service_base + + +class EventCreationFailureException(exceptions.ApmecException): + message = _("Failed to create an event: %(error_str)s") + + +class EventNotFoundException(exceptions.ApmecException): + message = _("Specified Event id %(evt_id)s is invalid. Please verify and " + "pass a valid Event id") + + +class InvalidModelException(exceptions.ApmecException): + message = _("Specified model is invalid, only Event model supported") + + +class InputValuesMissing(exceptions.InvalidInput): + message = _("Parameter input values missing for the key '%(key)s'") + + +class ParamYAMLInputMissing(exceptions.InvalidInput): + message = _("Parameter YAML input missing") + + +RESOURCE_ATTRIBUTE_MAP = { + + 'events': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'resource_id': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True + }, + 'resource_type': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True + }, + 'resource_state': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True + }, + 'timestamp': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'event_details': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'event_type': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + } +} + + +class Common_services(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return 'COMMONSERVICES' + + @classmethod + def get_alias(cls): + return 'Commonservices' + + @classmethod + def get_description(cls): + return "Extension for CommonServices" + + @classmethod + def get_namespace(cls): + return 'http://wiki.openstack.org/Apmec' + + @classmethod + def get_updated(cls): + return "2016-06-06T13:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + return resource_helper.build_resource_info( + plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.COMMONSERVICES, + translate_name=True) + + @classmethod + def get_plugin_interface(cls): + return CommonServicesPluginBase + + def update_attributes_map(self, attributes): + super(Common_services, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + version_map = {'1.0': RESOURCE_ATTRIBUTE_MAP} + return version_map.get(version, {}) + + +@six.add_metaclass(abc.ABCMeta) +class CommonServicesPluginBase(service_base.MECPluginBase): + def get_plugin_name(self): + return constants.COMMONSERVICES + + def get_plugin_type(self): + return constants.COMMONSERVICES + + def get_plugin_description(self): + return 'Apmec CommonServices plugin' + + @abc.abstractmethod + def get_event(self, context, event_id, fields=None): + pass + + @abc.abstractmethod + def get_events(self, context, filters=None, fields=None, sorts=None, + limit=None, marker_obj=None, page_reverse=False): + pass diff --git a/apmec/extensions/mem.py b/apmec/extensions/mem.py new file mode 100644 index 0000000..8c78388 --- /dev/null +++ b/apmec/extensions/mem.py @@ -0,0 +1,572 @@ +# Copyright 2015 Intel Corporation.. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_log import log as logging +import six + +from apmec.api import extensions +from apmec.api.v1 import attributes as attr +from apmec.api.v1 import base +from apmec.api.v1 import resource_helper +from apmec.common import exceptions +from apmec import manager +from apmec.plugins.common import constants +from apmec.services import service_base + + +LOG = logging.getLogger(__name__) + + +class MultipleMGMTDriversSpecified(exceptions.InvalidInput): + message = _('More than one MGMT Driver per mead is not supported') + + +class ServiceTypesNotSpecified(exceptions.InvalidInput): + message = _('service types are not specified') + + +class MEADInUse(exceptions.InUse): + message = _('MEAD %(mead_id)s is still in use') + + +class MEAInUse(exceptions.InUse): + message = _('MEA %(mea_id)s is still in use') + + +class InvalidInfraDriver(exceptions.InvalidInput): + message = _('VIM type %(vim_name)s is not supported as an infra driver ') + + +class InvalidServiceType(exceptions.InvalidInput): + message = _('invalid service type %(service_type)s') + + +class MEACreateFailed(exceptions.ApmecException): + message = _('creating MEA based on %(mead_id)s failed') + + +class MEACreateWaitFailed(exceptions.ApmecException): + message = _('%(reason)s') + + +class MEAScaleWaitFailed(exceptions.ApmecException): + message = _('%(reason)s') + + +class MEADeleteWaitFailed(exceptions.ApmecException): + message = _('%(reason)s') + + +class MEADNotFound(exceptions.NotFound): + message = _('MEAD %(mead_id)s could not be found') + + +class ServiceTypeNotFound(exceptions.NotFound): + message = _('service type %(service_type_id)s could not be found') + + +class MEANotFound(exceptions.NotFound): + message = _('MEA %(mea_id)s could not be found') + + +class ParamYAMLNotWellFormed(exceptions.InvalidInput): + message = _("Parameter YAML not well formed - %(error_msg_details)s") + + +class ToscaParserFailed(exceptions.InvalidInput): + message = _("tosca-parser failed: - %(error_msg_details)s") + + +class HeatTranslatorFailed(exceptions.InvalidInput): + message = _("heat-translator failed: - %(error_msg_details)s") + + +class HeatClientException(exceptions.ApmecException): + message = _("%(msg)s") + + +class UserDataFormatNotFound(exceptions.NotFound): + message = _("user_data and/or user_data_format not provided") + + +class IPAddrInvalidInput(exceptions.InvalidInput): + message = _("IP Address input values should be in a list format") + + +class HugePageSizeInvalidInput(exceptions.InvalidInput): + message = _("Value specified for mem_page_size is invalid:" + "%(error_msg_details)s. The valid values are 'small', 'large'," + "'any' or an integer value in MB") + + +class CpuAllocationInvalidKeys(exceptions.InvalidInput): + message = _("Invalid keys specified in MEAD - %(error_msg_details)s." + "Supported keys are: %(valid_keys)s") + + +class NumaNodesInvalidKeys(exceptions.InvalidInput): + message = _("Invalid keys specified in MEAD - %(error_msg_details)s." + "Supported keys are: %(valid_keys)s") + + +class FilePathMissing(exceptions.InvalidInput): + message = _("'file' attribute is missing for " + "tosca.artifacts.Deployment.Image.VM artifact type") + + +class InfraDriverUnreachable(exceptions.ServiceUnavailable): + message = _("Could not retrieve MEA resource IDs and" + " types. Please check %(service)s status.") + + +class MEAInactive(exceptions.InvalidInput): + message = _("MEA %(mea_id)s is not in Active state %(message)s") + + +class MetadataNotMatched(exceptions.InvalidInput): + message = _("Metadata for alarm policy is not matched") + + +class InvalidSubstitutionMapping(exceptions.InvalidInput): + message = _("Input for substitution mapping requirements are not" + " valid for %(requirement)s. They must be in the form" + " of list with two entries") + + +class SMRequirementMissing(exceptions.InvalidInput): + message = _("All the requirements for substitution_mappings are not" + " provided. Missing requirement for %(requirement)s") + + +class InvalidParamsForSM(exceptions.InvalidInput): + message = _("Please provide parameters for substitution mappings") + + +def _validate_service_type_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("invalid data format for service list: '%s'") % data + LOG.debug(msg) + return msg + if not data: + msg = _("empty list is not allowed for service list. '%s'") % data + LOG.debug(msg) + return msg + key_specs = { + 'service_type': { + 'type:string': None, + } + } + for service in data: + msg = attr._validate_dict(service, key_specs) + if msg: + LOG.debug(msg) + return msg + + +attr.validators['type:service_type_list'] = _validate_service_type_list + + +RESOURCE_ATTRIBUTE_MAP = { + + 'meads': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True, + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True, + }, + 'name': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'description': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'service_types': { + 'allow_post': True, + 'allow_put': False, + 'convert_to': attr.convert_to_list, + 'validate': {'type:service_type_list': None}, + 'is_visible': True, + 'default': attr.ATTR_NOT_SPECIFIED, + }, + 'attributes': { + 'allow_post': True, + 'allow_put': False, + 'convert_to': attr.convert_none_to_empty_dict, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + 'default': None, + }, + 'created_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'updated_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'template_source': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + 'default': 'onboarded' + }, + }, + + 'meas': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True + }, + 'mead_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'default': None + }, + 'vim_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'name': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'description': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'instance_id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'mgmt_url': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'attributes': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:dict_or_none': None}, + 'is_visible': True, + 'default': {}, + }, + 'placement_attr': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:dict_or_none': None}, + 'is_visible': True, + 'default': {}, + }, + 'status': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'error_reason': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'created_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'updated_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'mead_template': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:dict_or_none': None}, + 'is_visible': True, + 'default': None, + }, + }, +} + + +SUB_RESOURCE_ATTRIBUTE_MAP = { + 'actions': { + 'parent': { + 'collection_name': 'meas', + 'member_name': 'mea' + }, + 'members': { + 'scale': { + 'parameters': { + 'policy': { + 'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'validate': {'type:string': None} + }, + 'type': { + 'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'validate': {'type:string': None} + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': False, + 'is_visible': False + }, + } + }, + } + }, + 'triggers': { + 'parent': { + 'collection_name': 'meas', + 'member_name': 'mea' + }, + 'members': { + 'trigger': { + 'parameters': { + 'policy_name': { + 'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'validate': {'type:string': None} + }, + 'action_name': { + 'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'validate': {'type:string': None} + }, + 'params': { + 'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'validate': {'type:dict_or_none': None} + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': False, + 'is_visible': False + } + } + }, + } + }, + 'resources': { + 'parent': { + 'collection_name': 'meas', + 'member_name': 'mea' + }, + 'members': { + 'resource': { + 'parameters': { + 'name': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'type': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'id': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + } + } + } + } +} + + +class Mem(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return 'MEA Manager' + + @classmethod + def get_alias(cls): + return 'MEM' + + @classmethod + def get_description(cls): + return "Extension for MEA Manager" + + @classmethod + def get_namespace(cls): + return 'http://wiki.openstack.org/Apmec' + + @classmethod + def get_updated(cls): + return "2013-11-19T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + plural_mappings['service_types'] = 'service_type' + attr.PLURALS.update(plural_mappings) + resources = resource_helper.build_resource_info( + plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.MEM, + translate_name=True) + plugin = manager.ApmecManager.get_service_plugins()[ + constants.MEM] + for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: + parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name]['parent'] + + for resource_name in SUB_RESOURCE_ATTRIBUTE_MAP[ + collection_name]['members']: + params = SUB_RESOURCE_ATTRIBUTE_MAP[ + collection_name]['members'][resource_name]['parameters'] + + controller = base.create_resource(collection_name, + resource_name, + plugin, params, + allow_bulk=True, + parent=parent) + + resource = extensions.ResourceExtension( + collection_name, + controller, parent, + attr_map=params) + resources.append(resource) + return resources + + @classmethod + def get_plugin_interface(cls): + return MEMPluginBase + + def update_attributes_map(self, attributes): + super(Mem, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + version_map = {'1.0': RESOURCE_ATTRIBUTE_MAP} + return version_map.get(version, {}) + + +@six.add_metaclass(abc.ABCMeta) +class MEMPluginBase(service_base.MECPluginBase): + def get_plugin_name(self): + return constants.MEM + + def get_plugin_type(self): + return constants.MEM + + def get_plugin_description(self): + return 'Apmec MEA Manager plugin' + + @abc.abstractmethod + def create_mead(self, context, mead): + pass + + @abc.abstractmethod + def delete_mead(self, context, mead_id): + pass + + @abc.abstractmethod + def get_mead(self, context, mead_id, fields=None): + pass + + @abc.abstractmethod + def get_meads(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_meas(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_mea(self, context, mea_id, fields=None): + pass + + @abc.abstractmethod + def get_mea_resources(self, context, mea_id, fields=None, filters=None): + pass + + @abc.abstractmethod + def create_mea(self, context, mea): + pass + + @abc.abstractmethod + def update_mea( + self, context, mea_id, mea): + pass + + @abc.abstractmethod + def delete_mea(self, context, mea_id): + pass + + @abc.abstractmethod + def create_mea_scale( + self, context, mea_id, scale): + pass + + @abc.abstractmethod + def create_mea_trigger( + self, context, mea_id, trigger): + pass diff --git a/apmec/extensions/meo.py b/apmec/extensions/meo.py new file mode 100644 index 0000000..832c94d --- /dev/null +++ b/apmec/extensions/meo.py @@ -0,0 +1,526 @@ +# Copyright 2016 Brocade Communications Systems Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec._i18n import _ +from apmec.api import extensions +from apmec.api.v1 import attributes as attr +from apmec.api.v1 import resource_helper +from apmec.common import exceptions +from apmec.plugins.common import constants +from apmec.services import service_base + + +class VimUnauthorizedException(exceptions.ApmecException): + message = _("%(message)s") + + +class VimConnectionException(exceptions.ApmecException): + message = _("%(message)s") + + +class VimInUseException(exceptions.ApmecException): + message = _("VIM %(vim_id)s is still in use by MEA") + + +class VimDefaultNotDefined(exceptions.ApmecException): + message = _("Default VIM is not defined.") + + +class VimDefaultDuplicateException(exceptions.ApmecException): + message = _("Default VIM already exists %(vim_id)s.") + + +class VimNotFoundException(exceptions.ApmecException): + message = _("Specified VIM id %(vim_id)s is invalid. Please verify and " + "pass a valid VIM id") + + +class VimRegionNotFoundException(exceptions.ApmecException): + message = _("Unknown VIM region name %(region_name)s") + + +class VimKeyNotFoundException(exceptions.ApmecException): + message = _("Unable to find key file for VIM %(vim_id)s") + + +class VimUnsupportedResourceTypeException(exceptions.ApmecException): + message = _("Resource type %(type)s is unsupported by VIM") + + +class VimGetResourceException(exceptions.ApmecException): + message = _("Error while trying to issue %(cmd)s to find resource type " + "%(type)s by resource name %(name)s") + + +class VimGetResourceNameNotUnique(exceptions.ApmecException): + message = _("Getting resource id from VIM with resource name %(name)s " + "by %(cmd)s returns more than one") + + +class VimGetResourceNotFoundException(exceptions.ApmecException): + message = _("Getting resource id from VIM with resource name %(name)s " + "by %(cmd)s returns nothing") + + +class VimFromMeaNotFoundException(exceptions.NotFound): + message = _('VIM from MEA %(mea_id)s could not be found') + + +class ToscaParserFailed(exceptions.InvalidInput): + message = _("tosca-parser failed: - %(error_msg_details)s") + + +class NfydInvalidTemplate(exceptions.InvalidInput): + message = _("Invalid NFY template input: %(template)s") + + +class NfydDuplicateForwarderException(exceptions.InvalidInput): + message = _("Invalid Forwarding Path contains duplicate forwarder not in " + "order: %(forwarder)s") + + +class NfydDuplicateCPException(exceptions.InvalidInput): + message = _("Invalid Forwarding Path contains duplicate connection point " + ": %(cp)s") + + +class NfydCpNotFoundException(exceptions.NotFound): + message = _("Specified CP %(cp_id)s could not be found in MEAD " + "%(mead_name)s. Please check MEAD for correct Connection " + "Point.") + + +class NfydCpNoForwardingException(exceptions.ApmecException): + message = _("Specified CP %(cp_id)s in MEAD %(mead_name)s " + "does not have forwarding capability, which is required to be " + "included in forwarding path") + + +class NfydWrongEndpointNumber(exceptions.ApmecException): + message = _("Specified number_of_endpoints %(number)s is not equal to " + "the number of connection_point %(cps)s") + + +class NfyInvalidMappingException(exceptions.ApmecException): + message = _("Matching MEA Instance for MEAD %(mead_name)s could not be " + "found. Please create an instance of this MEAD before " + "creating/updating NFY.") + + +class NfyParamValueFormatError(exceptions.ApmecException): + message = _("Param values %(param_value)s is not in dict format.") + + +class NfyParamValueNotUsed(exceptions.ApmecException): + message = _("Param input %(param_key)s not used.") + + +class NfyCpNotFoundException(exceptions.NotFound): + message = _("Specified CP %(cp_id)s could not be found in MEA " + "%(mea_id)s.") + + +class NfyMeaNotFoundException(exceptions.NotFound): + message = _("Specified MEA instance %(mea_name)s in MEA Mapping could not " + "be found") + + +class NfpAttributeNotFoundException(exceptions.NotFound): + message = _('NFP attribute %(attribute)s could not be found') + + +class NfpNotFoundException(exceptions.NotFound): + message = _('NFP %(nfp_id)s could not be found') + + +class NfpInUse(exceptions.InUse): + message = _('NFP %(nfp_id)s is still in use') + + +class NfpPolicyCriteriaError(exceptions.PolicyCheckError): + message = _('%(error)s in policy') + + +class NfpPolicyNotFoundException(exceptions.NotFound): + message = _('Policy not found in NFP %(nfp)s') + + +class NfpPolicyTypeError(exceptions.PolicyCheckError): + message = _('Unsupported Policy Type: %(type)s') + + +class NfpForwarderNotFoundException(exceptions.NotFound): + message = _('MEAD Forwarder %(mead)s not found in MEA Mapping %(mapping)s') + + +class NfpRequirementsException(exceptions.ApmecException): + message = _('MEAD Forwarder %(mead)s specified more than twice in ' + 'requirements path') + + +class SfcInUse(exceptions.InUse): + message = _('SFC %(sfc_id)s is still in use') + + +class SfcNotFoundException(exceptions.NotFound): + message = _('Service Function Chain %(sfc_id)s could not be found') + + +class ClassifierInUse(exceptions.InUse): + message = _('Classifier %(classifier_id)s is still in use') + + +class ClassifierNotFoundException(exceptions.NotFound): + message = _('Classifier %(classifier_id)s could not be found') + + +class MESDInUse(exceptions.InUse): + message = _('MESD %(mesd_id)s is still in use') + + +class MESInUse(exceptions.InUse): + message = _('MES %(mes_id)s is still in use') + + +class NoTasksException(exceptions.ApmecException): + message = _('No tasks to run for %(action)s on %(resource)s') + + +RESOURCE_ATTRIBUTE_MAP = { + + 'vims': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True, + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True + }, + 'type': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True + }, + 'auth_url': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True + }, + 'auth_cred': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + }, + 'vim_project': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + }, + 'name': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'description': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'status': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'placement_attr': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + 'default': None, + }, + 'shared': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': False, + 'convert_to': attr.convert_to_boolean, + 'required_by_policy': True + }, + 'is_default': { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': False + }, + 'created_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'updated_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + }, + 'mesds': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True, + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True, + }, + 'name': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'description': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'created_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'updated_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'attributes': { + 'allow_post': True, + 'allow_put': False, + 'convert_to': attr.convert_none_to_empty_dict, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + 'default': None, + }, + 'template_source': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + 'default': 'onboarded' + }, + + }, + + 'mess': { + 'id': { + 'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True, + }, + 'tenant_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True, + }, + 'name': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + }, + 'description': { + 'allow_post': True, + 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'created_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'updated_at': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'mea_ids': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'mesd_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'default': None, + }, + 'vim_id': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, + 'default': '', + }, + 'status': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'error_reason': { + 'allow_post': False, + 'allow_put': False, + 'is_visible': True, + }, + 'attributes': { + 'allow_post': True, + 'allow_put': False, + 'convert_to': attr.convert_none_to_empty_dict, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + 'default': None, + }, + 'mgmt_urls': { + 'allow_post': False, + 'allow_put': False, + 'convert_to': attr.convert_none_to_empty_dict, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + }, + 'mesd_template': { + 'allow_post': True, + 'allow_put': False, + 'validate': {'type:dict_or_nodata': None}, + 'is_visible': True, + 'default': None, + }, + }, + +} + + +class Meo(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return 'MEC Orchestrator' + + @classmethod + def get_alias(cls): + return 'MEO' + + @classmethod + def get_description(cls): + return "Extension for MEC Orchestrator" + + @classmethod + def get_namespace(cls): + return 'http://wiki.openstack.org/Apmec' + + @classmethod + def get_updated(cls): + return "2015-12-21T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + return resource_helper.build_resource_info( + plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.MEO, + translate_name=True) + + @classmethod + def get_plugin_interface(cls): + return MEOPluginBase + + def update_attributes_map(self, attributes): + super(Meo, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + version_map = {'1.0': RESOURCE_ATTRIBUTE_MAP} + return version_map.get(version, {}) + + +@six.add_metaclass(abc.ABCMeta) +class MEOPluginBase(service_base.MECPluginBase): + def get_plugin_name(self): + return constants.MEO + + def get_plugin_type(self): + return constants.MEO + + def get_plugin_description(self): + return 'Apmec MEC Orchestrator plugin' + + @abc.abstractmethod + def create_vim(self, context, vim): + pass + + @abc.abstractmethod + def update_vim(self, context, vim_id, vim): + pass + + @abc.abstractmethod + def delete_vim(self, context, vim_id): + pass + + @abc.abstractmethod + def get_vim(self, context, vim_id, fields=None, mask_password=True): + pass + + @abc.abstractmethod + def get_vims(self, context, filters=None, fields=None): + pass + + def get_vim_by_name(self, context, vim_name, fields=None, + mask_password=True): + raise NotImplementedError() + + def get_default_vim(self, context): + raise NotImplementedError() diff --git a/apmec/extensions/meo_plugins/__init__.py b/apmec/extensions/meo_plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/extensions/meo_plugins/edge_service.py b/apmec/extensions/meo_plugins/edge_service.py new file mode 100644 index 0000000..0e887f5 --- /dev/null +++ b/apmec/extensions/meo_plugins/edge_service.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +from apmec.common import exceptions +from apmec.services import service_base + + +@six.add_metaclass(abc.ABCMeta) +class MESPluginBase(service_base.MECPluginBase): + + @abc.abstractmethod + def create_mesd(self, context, mesd): + pass + + @abc.abstractmethod + def delete_mesd(self, context, mesd_id): + pass + + @abc.abstractmethod + def get_mesd(self, context, mesd_id, fields=None): + pass + + @abc.abstractmethod + def get_mesds(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def create_mes(self, context, mes): + pass + + @abc.abstractmethod + def get_mess(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_mes(self, context, mes_id, fields=None): + pass + + @abc.abstractmethod + def delete_mes(self, context, mes_id): + pass + + +class MESDNotFound(exceptions.NotFound): + message = _('MESD %(mesd_id)s could not be found') + + +class MESNotFound(exceptions.NotFound): + message = _('MES %(mes_id)s could not be found') diff --git a/apmec/hacking/__init__.py b/apmec/hacking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/hacking/checks.py b/apmec/hacking/checks.py new file mode 100644 index 0000000..4c0e6cd --- /dev/null +++ b/apmec/hacking/checks.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import pep8 + +""" +Guidelines for writing new hacking checks + + - Use only for Apmec specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range N3xx. Find the current test with + the highest allocated number and then pick the next value. + - Keep the test method code in the source file ordered based + on the N3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to + apmec/tests/unit/test_hacking.py + +""" + +log_translation = re.compile( + r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") + + +def validate_log_translations(logical_line, physical_line, filename): + # Translations are not required in the test directory + if "apmec/tests" in filename: + return + if pep8.noqa(physical_line): + return + msg = "N320: Log messages require translations!" + if log_translation.match(logical_line): + yield (0, msg) + + +def factory(register): + register(validate_log_translations) diff --git a/apmec/keymgr/__init__.py b/apmec/keymgr/__init__.py new file mode 100644 index 0000000..298d3e7 --- /dev/null +++ b/apmec/keymgr/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_config import cfg +from oslo_utils import importutils + +key_manager_opts = [ + cfg.StrOpt('api_class', + default='apmec.keymgr.barbican_key_manager' + '.BarbicanKeyManager', + help='The full class name of the key manager API class'), +] + + +def config_opts(): + return [('key_manager', key_manager_opts)] + + +def API(auth_url, configuration=None): + conf = configuration or cfg.CONF + conf.register_opts(key_manager_opts, group='key_manager') + + cls = importutils.import_class(conf.key_manager.api_class) + return cls(auth_url) diff --git a/apmec/keymgr/barbican_key_manager.py b/apmec/keymgr/barbican_key_manager.py new file mode 100644 index 0000000..c2d8119 --- /dev/null +++ b/apmec/keymgr/barbican_key_manager.py @@ -0,0 +1,251 @@ +# Copyright (c) The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Key manager implementation for Barbican +""" +from barbicanclient import client as barbican_client +from barbicanclient import exceptions as barbican_exception +from keystoneauth1 import identity +from keystoneauth1 import session +from oslo_log import log as logging + +from apmec._i18n import _ +from apmec.keymgr import exception +from apmec.keymgr import key_manager + + +LOG = logging.getLogger(__name__) + + +class BarbicanKeyManager(key_manager.KeyManager): + """Key Manager Interface that wraps the Barbican client API.""" + + def __init__(self, auth_url): + self._barbican_client = None + self._base_url = None + self._auth_url = auth_url + + def _get_barbican_client(self, context): + """Creates a client to connect to the Barbican service. + + :param context: the user context for authentication + :return: a Barbican Client object + :raises Forbidden: if the context is empty + :raises KeyManagerError: if context is missing tenant or tenant is + None or error occurs while creating client + """ + + # Confirm context is provided, if not raise forbidden + if not context: + msg = _("User is not authorized to use key manager.") + LOG.error(msg) + raise exception.Forbidden(msg) + + if self._barbican_client and self._current_context == context: + return self._barbican_client + + try: + auth = self._get_keystone_auth(context) + sess = session.Session(auth=auth) + + self._barbican_endpoint = self._get_barbican_endpoint(auth, sess) + if self._barbican_endpoint[-1] != '/': + self._barbican_endpoint += '/' + self._barbican_client = barbican_client.Client( + session=sess, + endpoint=self._barbican_endpoint) + self._current_context = context + + except Exception as e: + LOG.error("Error creating Barbican client: %s", e) + raise exception.KeyManagerError(reason=e) + + self._base_url = self._create_base_url(auth, + sess, + self._barbican_endpoint) + + return self._barbican_client + + def _get_keystone_auth(self, context): + + if context.__class__.__name__ is 'KeystonePassword': + return identity.Password( + auth_url=self._auth_url, + username=context.username, + password=context.password, + user_id=context.user_id, + user_domain_id=context.user_domain_id, + user_domain_name=context.user_domain_name, + trust_id=context.trust_id, + domain_id=context.domain_id, + domain_name=context.domain_name, + project_id=context.project_id, + project_name=context.project_name, + project_domain_id=context.project_domain_id, + project_domain_name=context.project_domain_name, + reauthenticate=context.reauthenticate) + elif context.__class__.__name__ is 'KeystoneToken': + return identity.Token( + auth_url=self._auth_url, + token=context.token, + trust_id=context.trust_id, + domain_id=context.domain_id, + domain_name=context.domain_name, + project_id=context.project_id, + project_name=context.project_name, + project_domain_id=context.project_domain_id, + project_domain_name=context.project_domain_name, + reauthenticate=context.reauthenticate) + # this will be kept for oslo.context compatibility until + # projects begin to use utils.credential_factory + elif (context.__class__.__name__ is 'RequestContext' or + context.__class__.__name__ is 'Context'): + return identity.Token( + auth_url=self._auth_url, + token=context.auth_token, + project_id=context.tenant) + else: + msg = _("context must be of type KeystonePassword, " + "KeystoneToken, RequestContext, or Context.") + LOG.error(msg) + raise exception.Forbidden(reason=msg) + + def _get_barbican_endpoint(self, auth, sess): + service_parameters = {'service_type': 'key-manager', + 'service_name': 'barbican', + 'interface': 'internal'} + return auth.get_endpoint(sess, **service_parameters) + + def _create_base_url(self, auth, sess, endpoint): + discovery = auth.get_discovery(sess, url=endpoint) + raw_data = discovery.raw_version_data() + if len(raw_data) == 0: + msg = _( + "Could not find discovery information for %s") % endpoint + LOG.error(msg) + raise exception.KeyManagerError(reason=msg) + latest_version = raw_data[-1] + api_version = latest_version.get('id') + base_url = "%s%s/" % (endpoint, api_version) + return base_url + + def store(self, context, secret, expiration=None): + """Stores a secret with the key manager. + + :param context: contains information of the user and the environment + for the request + :param secret: a secret object with unencrypted payload. + Known as "secret" to the barbicanclient api + :param expiration: the expiration time of the secret in ISO 8601 + format + :returns: the UUID of the stored object + :raises KeyManagerError: if object store fails + """ + barbican_client = self._get_barbican_client(context) + + try: + secret = barbican_client.secrets.create( + payload=secret, + secret_type='opaque') + secret.expiration = expiration + secret_ref = secret.store() + return self._retrieve_secret_uuid(secret_ref) + except (barbican_exception.HTTPAuthError, + barbican_exception.HTTPClientError, + barbican_exception.HTTPServerError) as e: + LOG.error("Error storing object: %s", e) + raise exception.KeyManagerError(reason=e) + + def _create_secret_ref(self, object_id): + """Creates the URL required for accessing a secret. + + :param object_id: the UUID of the key to copy + :return: the URL of the requested secret + """ + if not object_id: + msg = _("Key ID is None") + raise exception.KeyManagerError(reason=msg) + return "%ssecrets/%s" % (self._base_url, object_id) + + def _retrieve_secret_uuid(self, secret_ref): + """Retrieves the UUID of the secret from the secret_ref. + + :param secret_ref: the href of the secret + :return: the UUID of the secret + """ + + # The secret_ref is assumed to be of a form similar to + # http://host:9311/v1/secrets/d152fa13-2b41-42ca-a934-6c21566c0f40 + # with the UUID at the end. This command retrieves everything + # after the last '/', which is the UUID. + return secret_ref.rpartition('/')[2] + + def _is_secret_not_found_error(self, error): + if (isinstance(error, barbican_exception.HTTPClientError) and + error.status_code == 404): + return True + else: + return False + + def get(self, context, managed_object_id, metadata_only=False): + """Retrieves the specified managed object. + + :param context: contains information of the user and the environment + for the request + :param managed_object_id: the UUID of the object to retrieve + :param metadata_only: whether secret data should be included + :return: ManagedObject representation of the managed object + :raises KeyManagerError: if object retrieval fails + :raises ManagedObjectNotFoundError: if object not found + """ + barbican_client = self._get_barbican_client(context) + + try: + secret_ref = self._create_secret_ref(managed_object_id) + return barbican_client.secrets.get(secret_ref) + except (barbican_exception.HTTPAuthError, + barbican_exception.HTTPClientError, + barbican_exception.HTTPServerError) as e: + LOG.error("Error retrieving object: %s", e) + if self._is_secret_not_found_error(e): + raise exception.ManagedObjectNotFoundError( + uuid=managed_object_id) + else: + raise exception.KeyManagerError(reason=e) + + def delete(self, context, managed_object_id): + """Deletes the specified managed object. + + :param context: contains information of the user and the environment + for the request + :param managed_object_id: the UUID of the object to delete + :raises KeyManagerError: if object deletion fails + :raises ManagedObjectNotFoundError: if the object could not be found + """ + barbican_client = self._get_barbican_client(context) + + try: + secret_ref = self._create_secret_ref(managed_object_id) + barbican_client.secrets.delete(secret_ref) + except (barbican_exception.HTTPAuthError, + barbican_exception.HTTPClientError, + barbican_exception.HTTPServerError) as e: + LOG.error("Error deleting object: %s", e) + if self._is_secret_not_found_error(e): + raise exception.ManagedObjectNotFoundError( + uuid=managed_object_id) + else: + raise exception.KeyManagerError(reason=e) diff --git a/apmec/keymgr/exception.py b/apmec/keymgr/exception.py new file mode 100644 index 0000000..e776a36 --- /dev/null +++ b/apmec/keymgr/exception.py @@ -0,0 +1,43 @@ +# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception for keymgr +""" + +from apmec._i18n import _ +from apmec.common.exceptions import ApmecException + + +class Forbidden(ApmecException): + message = _("You are not authorized to complete this action.") + + +class KeyManagerError(ApmecException): + message = _("Key manager error: %(reason)s") + + +class ManagedObjectNotFoundError(ApmecException): + message = _("Key not found, uuid: %(uuid)s") + + +class AuthTypeInvalidError(ApmecException): + message = _("Invalid auth_type was specified, auth_type: %(type)s") + + +class InsufficientCredentialDataError(ApmecException): + message = _('Insufficient credential data was provided, either ' + '"token" must be set in the passed conf, or a context ' + 'with an "auth_token" property must be passed.') diff --git a/apmec/keymgr/key_manager.py b/apmec/keymgr/key_manager.py new file mode 100644 index 0000000..ff62cf1 --- /dev/null +++ b/apmec/keymgr/key_manager.py @@ -0,0 +1,87 @@ +# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Key manager API +""" + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class KeyManager(object): + """Base Key Manager Interface + + A Key Manager is responsible for creating, reading, and deleting keys. + """ + + @abc.abstractmethod + def __init__(self, auth_url): + """Instantiate a KeyManager object. + + Creates a KeyManager object with implementation specific details + obtained from the supplied configuration. + """ + pass + + @abc.abstractmethod + def store(self, context, managed_object, expiration=None): + """Stores a managed object with the key manager. + + This method stores the specified managed object and returns its UUID + that identifies it within the key manager. If the specified context + does not permit the creation of keys, then a NotAuthorized exception + should be raised. + """ + pass + + @abc.abstractmethod + def get(self, context, managed_object_id, metadata_only=False): + """Retrieves the specified managed object. + + Implementations should verify that the caller has permissions to + retrieve the managed object by checking the context object passed in + as context. If the user lacks permission then a NotAuthorized + exception is raised. + + If the caller requests only metadata, then the object that is + returned will contain only the secret metadata and no secret bytes. + + If the specified object does not exist, then a KeyError should be + raised. Implementations should preclude users from discerning the + UUIDs of objects that belong to other users by repeatedly calling + this method. That is, objects that belong to other users should be + considered "non-existent" and completely invisible. + """ + pass + + @abc.abstractmethod + def delete(self, context, managed_object_id): + """Deletes the specified managed object. + + Implementations should verify that the caller has permission to delete + the managed object by checking the context object (context). A + NotAuthorized exception should be raised if the caller lacks + permission. + + If the specified object does not exist, then a KeyError should be + raised. Implementations should preclude users from discerning the + UUIDs of objects that belong to other users by repeatedly calling this + method. That is, objects that belong to other users should be + considered "non-existent" and completely invisible. + """ + pass diff --git a/apmec/locale/es/LC_MESSAGES/apmec.po b/apmec/locale/es/LC_MESSAGES/apmec.po new file mode 100644 index 0000000..28099a1 --- /dev/null +++ b/apmec/locale/es/LC_MESSAGES/apmec.po @@ -0,0 +1,1259 @@ +# OpenStack Infra , 2015. #zanata +# Tom Cocozzello , 2015. #zanata +# Andreas Jaeger , 2016. #zanata +# Eduardo Gonzalez Gutierrez , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: apmec 0.8.1.dev35\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2017-10-13 13:49+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2017-09-27 03:29+0000\n" +"Last-Translator: Eduardo Gonzalez Gutierrez \n" +"Language-Team: Spanish\n" +"Language: es\n" +"X-Generator: Zanata 3.9.6\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +#, python-format +msgid "" +"\n" +"Command: %(cmd)s\n" +"Exit code: %(code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"\n" +"Comando: %(cmd)s\n" +"Código de salida: %(code)s\n" +" Salida estándar: %(stdout)r\n" +"Salida de error: %(stderr)r" + +msgid " Cannot fetch details" +msgstr "No ha sido posible extraer detalles" + +#, python-format +msgid "%(_type)s already exist with given %(entry)s" +msgstr "%(_type)s ya existe con %(entry)s dado" + +#, python-format +msgid "%(error)s in policy" +msgstr "%(error)s en regla" + +#, python-format +msgid "" +"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " +"and '%(desc)s'" +msgstr "" +"%(invalid_dirs)s es un valor no válido para sort_dirs, los valores válidos " +"son '%(asc)s' y '%(desc)s'" + +#, python-format +msgid "%(message)s" +msgstr "%(message)s" + +#, python-format +msgid "%(msg)s" +msgstr "%(msg)s" + +#, python-format +msgid "%(reason)s" +msgstr "%(reason)s" + +#, python-format +msgid "%(resource)s with name %(name)s already exists" +msgstr "%(resource)s con nombre %(name)s ya existe" + +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "%(url)s ha devuelto un error: %(exception)s" + +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "Se ha devuelto %(url)s con HTTP %(status)d" + +#, python-format +msgid "%s is invalid attribute for sort_key" +msgstr "%s es un atributo no válido para sort_key" + +#, python-format +msgid "%s is invalid attribute for sort_keys" +msgstr "%s es un atributo no válido para sort_keys" + +#, python-format +msgid "%s is not a valid VLAN tag" +msgstr "%s no es una etiqueta VLAN válida" + +#, python-format +msgid "'%(data)s' exceeds maximum length of %(max_len)s" +msgstr "'%(data)s' supera la longitud máxima de %(max_len)s" + +#, python-format +msgid "'%(data)s' is not in %(valid_values)s" +msgstr "'%(data)s' no está en %(valid_values)s" + +#, python-format +msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" +msgstr "'%(data)s' es muy grande, no debe ser más grande que '%(limit)d'" + +#, python-format +msgid "'%(data)s' is too small - must be at least '%(limit)d'" +msgstr "'%(data)s' es muy pequeño, debe ser al menos '%(limit)d'" + +#, python-format +msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" +msgstr "" +"'%(data)s' no es un cidr de subred IP reconocido, se recomienda '%(cidr)s'" + +#, python-format +msgid "'%s' - age should be a positive integer" +msgstr "'%s' - edad debe ser un entero positivo" + +#, python-format +msgid "'%s' - age should be an integer" +msgstr "'%s' - edad debe ser un entero" + +#, python-format +msgid "'%s' Blank strings are not permitted" +msgstr "No se permiten '%s' series en blanco" + +#, python-format +msgid "'%s' cannot be converted to boolean" +msgstr "'%s' no se puede convertir a booleano" + +#, python-format +msgid "'%s' contains whitespace" +msgstr "'%s' contiene espacios en blanco" + +#, python-format +msgid "'%s' granularity should be days, hours, minutes, or seconds" +msgstr "'%s' granularidad debe ser días, horas, minutos o segundos" + +#, python-format +msgid "'%s' is not a dictionary" +msgstr "'%s' no es un diccionario" + +#, python-format +msgid "'%s' is not a list" +msgstr "'%s' no es una lista" + +#, python-format +msgid "'%s' is not a valid IP address" +msgstr "'%s' no es una dirección IP válida" + +#, python-format +msgid "'%s' is not a valid IP subnet" +msgstr "'%s' no es una subred IP válida" + +#, python-format +msgid "'%s' is not a valid MAC address" +msgstr "'%s' no es una dirección MAC válida" + +#, python-format +msgid "'%s' is not a valid UUID" +msgstr "'%s' no es un UUID válido" + +#, python-format +msgid "'%s' is not a valid boolean value" +msgstr "'%s' no es un valor booleano" + +#, python-format +msgid "'%s' is not a valid input" +msgstr "'%s' no es una entrada válida" + +#, python-format +msgid "'%s' is not a valid nameserver" +msgstr "%s no es un nombre de servidor valido" + +#, python-format +msgid "'%s' is not a valid string" +msgstr "'%s' no es una serie válida" + +#, python-format +msgid "'%s' is not an integer" +msgstr "'%s' no es un entero" + +#, python-format +msgid "'%s' is not of the form =[value]" +msgstr "'%s' no tiene el formato =[valor]" + +#, python-format +msgid "'%s' should be non-negative" +msgstr "'%s' debe ser no negativo" + +msgid "" +"'file' attribute is missing for tosca.artifacts.Deployment.Image.VM artifact " +"type" +msgstr "" +"Falta atributo 'file' para tipo de artefacto tosca.artifacts.Deployment." +"Image.VM" + +#, python-format +msgid "Added output for %s" +msgstr "Salida añadida para %s" + +msgid "Address which drivers use to trigger" +msgstr "Dirección que los driver usan para notificar" + +msgid "" +"Alarm monitoring driver to communicate with Hosting MEA/logical service " +"instance apmec plugin will use" +msgstr "" +"Driver de monitor de alarmas para comunicar con la instancia Hosting MEA/" +"servicio logico que apmec plugin usara" + +#, python-format +msgid "" +"All the requirements for substitution_mappings are not provided. Missing " +"requirement for %(requirement)s" +msgstr "" +"Todos los requerimientos para substitution_mappings no han sido " +"proporcionados. Requerimientos que faltan %(requirement)s" + +msgid "Allow the usage of the bulk API" +msgstr "Permitir el uso de la API masiva" + +msgid "Allow the usage of the pagination" +msgstr "Permitir el uso de la paginación" + +msgid "Allow the usage of the sorting" +msgstr "Permitir el uso de la ordenación" + +#, python-format +msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" +msgstr "" +"Se ha proporcionado un valor no válido para %(opt_name)s: %(opt_value)s" + +msgid "An unknown exception occurred." +msgstr "Una excepción desconocida ha ocurrido" + +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "El atributo '%s' no está permitido en POST" + +msgid "Available commands" +msgstr "Mandatos disponibles" + +#, python-format +msgid "Bad %(resource)s request: %(msg)s" +msgstr "Solicitud %(resource)s incorrecta: %(msg)s" + +#, python-format +msgid "Bridge %(bridge)s does not exist." +msgstr "El puente %(bridge)s no existe." + +msgid "Bulk operation not supported" +msgstr "No se soporta operación masiva" + +msgid "CA certificate file to use to verify connecting clients" +msgstr "" +"Archivo de certificado CA para usar para verificar los clientes de conexión" + +msgid "Cannot create resource for another tenant" +msgstr "No se puede crear el recurso para otro arrendatario" + +msgid "Cannot understand JSON" +msgstr "No se puede entender JSON" + +#, python-format +msgid "Cannot update read-only attribute %s" +msgstr "No se puede actualizar el atributo de sólo lectura %s" + +msgid "Certificate file to use when starting the server securely" +msgstr "" +"Archivo del certificado para usar al iniciar el servidor de manera segura" + +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: %(unders)s" +msgstr "" +"El cambio produciría un uso inferior a 0 para los recursos siguientes: " +"%(unders)s." + +msgid "Class not found." +msgstr "No se ha encontrado la clase." + +#, python-format +msgid "Classifier %(classifier_id)s could not be found" +msgstr "Classifier %(classifier_id)s no ha sido encontrado" + +#, python-format +msgid "Classifier %(classifier_id)s is still in use" +msgstr "Classifier %(classifier_id)s sigue en uso" + +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" +msgstr "" +"No se puede enlazar a %(host)s:%(port)s después de intentar por %(time)d " +"segundos" + +msgid "Could not deserialize data" +msgstr "No se han podido deserializar los datos" + +#, python-format +msgid "Could not find discovery information for %s" +msgstr "No ha sido posible encontrar la información de descubrimiento para %s" + +#, python-format +msgid "" +"Could not retrieve MEA resource IDs and types. Please check %(service)s " +"status." +msgstr "" +"No ha sido posible conseguir IDs y tipos del recurso MEA. Por favor " +"comprueba el estado del servicio %(service)s." + + +#, python-format +msgid "Creation failed. %(dev_name)s already exists." +msgstr "La creación ha fallado. %(dev_name)s ya existe." + +#, python-format +msgid "" +"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " +"to update." +msgstr "" +"IP de puerta de enlace actual %(ip_address)s ya está en uso por el puerto " +"%(port_id)s. No es posible actualizar." + +msgid "Database engine" +msgstr "Motor de base de datos" + +#, python-format +msgid "Default VIM already exists %(vim_id)s." +msgstr "VIM por defecto ya existe %(vim_id)s." + +msgid "Default VIM is not defined." +msgstr "VIM por defecto no definido." + +#, python-format +msgid "Duplicate IP address '%s'" +msgstr "Dirección IP duplicada '%s'" + +#, python-format +msgid "Duplicate hostroute '%s'" +msgstr "Ruta de host '%s' duplicada" + +#, python-format +msgid "Duplicate items in the list: '%s'" +msgstr "Elementos duplicados en la lista: '%s'" + +#, python-format +msgid "Duplicate nameserver '%s'" +msgstr "Servidor de nombres '%s' duplicado" + +#, python-format +msgid "ERROR: %s" +msgstr "ERROR: %s" + +msgid "" +"ERROR: Unable to find configuration file via the default search paths (~/." +"apmec/, ~/, /etc/apmec/, /etc/) and the '--config-file' option!" +msgstr "" +"ERROR: No ha sido posible encontrar el fichero de configuración en las rutas " +"de búsqueda por defecto (~/.apmec/, ~/, /etc/apmec/, /etc/) y en la opción " +"'--config-file'" + +msgid "Enable SSL on the API server" +msgstr "Habilitar SSL en el servidor API" + +msgid "End of VLAN range is less than start of VLAN range" +msgstr "El final del rango VLAN es menor que el inicio del rango VLAN" + +#, python-format +msgid "" +"Error while trying to issue %(cmd)s to find resource type %(type)s by " +"resource name %(name)s" +msgstr "" +"Error al intentar ejecutar %(cmd)s para buscar tipo de recurso %(type)s por " +"nombre de recurso %(name)s" + +#, python-format +msgid "Extension with alias %s does not exist" +msgstr "La ampliación con el alias %s no existe" + +#, python-format +msgid "Extensions not found: %(extensions)s" +msgstr "Extensiones no encontradas: %(extensions)s" + +#, python-format +msgid "Failed to check policy %(policy)s because %(reason)s" +msgstr "No se ha podido comprobar la política %(policy)s debido a %(reason)s" + +#, python-format +msgid "Failed to create an event: %(error_str)s" +msgstr "Fallo al crear evento: %(error_str)s" + +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "No se ha podido iniciar la política %(policy)s debido a %(reason)s" + +#, python-format +msgid "Failed to parse request. Parameter '%s' not specified" +msgstr "" +"No se ha podido analizar la solicitud. No se ha especificado el parámetro " +"'%s'" + +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" +"No se ha podido analizar la solicitud. No se ha especificado el atributo " +"necesario '%s'" + +msgid "Flavor Extra Specs" +msgstr "Especificaciones extra de sabor" + +#, python-format +msgid "Found duplicate extension: %(alias)s" +msgstr "Se ha encontrado ampliación duplicada: %(alias)s" + +#, python-format +msgid "" +"Found overlapping allocation pools:%(pool_1)s %(pool_2)s for subnet " +"%(subnet_cidr)s." +msgstr "" +"Se ha encontrado solapamiento de agrupaciones de asignación:%(pool_1)s " +"%(pool_2)s para la subred %(subnet_cidr)s." + +#, python-format +msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" +msgstr "" +"La IP de pasarela %(ip_address)s está en conflicto con la agrupación de " +"asignación %(pool)s" + +#, python-format +msgid "" +"Getting resource id from VIM with resource name %(name)s by %(cmd)s returns " +"more than one" +msgstr "" +"Obteniendo ID de recurso desde VIM con nombre de recurso %(name)s por " +"%(cmd)s responde con mas de uno" + +#, python-format +msgid "" +"Getting resource id from VIM with resource name %(name)s by %(cmd)s returns " +"nothing" +msgstr "" +"Obteniendo ID de recurso desde VIM con nombre de recurso %(name)s por " +"%(cmd)s no responde nada" + +msgid "Granularity to use for age argument, defaults to days." +msgstr "Granularidad para usar por argumento de edad, por defecto a días." + +msgid "HEAD file does not match migration timeline head" +msgstr "" +"Fichero HEAD no coincide con HEAD de la linea del tiempo de la migración" + +msgid "HTTP port number to send request" +msgstr "Numero de puerto HTTP para enviar petición" + +msgid "Hosting mea drivers apmec plugin will use" +msgstr "Drivers MEA de hospedaje que plugin de apmec va a usar" + +msgid "How long to preserve deleted data,defaults to 90" +msgstr "Cuánto tiempo preservar los datos eliminados, por defecto 90" + +msgid "IP Address input values should be in a list format" +msgstr "Valores de entrada de direcciones IP deben ser en formato de lista" + +#, python-format +msgid "" +"Input for substitution mapping requirements are not valid for " +"%(requirement)s. They must be in the form of list with two entries" +msgstr "" +"Requerimientos de entrada para el mapeo de sustitución no son validos para " +"%(requirement)s. Deben ser en formato de lista con dos entradas." + +msgid "" +"Insufficient credential data was provided, either \"token\" must be set in " +"the passed conf, or a context with an \"auth_token\" property must be passed." +msgstr "" +"Datos de credenciales proporcionadas son insuficientes, o \"token\" debe ser " +"pasado en la configuración, o un contexto con una propiedad \"auth_token\" " +"debe ser pasado." + +msgid "Interval to check for VIM health" +msgstr "Intervalo para comprobar salud del VIM" + +#, python-format +msgid "Invalid CIDR %(input)s given as IP prefix" +msgstr "Se ha proporcionado un CIDR %(input)s no válido como prefijo de IP" + +#, python-format +msgid "Invalid Forwarding Path contains duplicate connection point : %(cp)s" +msgstr "" +"Forwarding Path no válido, contiene punto de conexión duplicado: %(cp)s" + +#, python-format +msgid "" +"Invalid Forwarding Path contains duplicate forwarder not in order: " +"%(forwarder)s" +msgstr "" +"Forwarding Path no válido, contiene el forwarder duplicado sin orden: " +"%(forwarder)s" + +#, python-format +msgid "Invalid alarm url for MEA %(mea_id)s" +msgstr "Url de alarma invalida para MEA %(mea_id)s" + +#, python-format +msgid "Invalid auth_type was specified, auth_type: %(type)s" +msgstr "auth_type especificado no es válido, auth_type: %(type)s" + +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "Tipo de contenido no válido %(content_type)s" + +#, python-format +msgid "Invalid data format for IP pool: '%s'" +msgstr "Formato de datos no válido para agrupación de IP: '%s'" + +#, python-format +msgid "Invalid data format for fixed IP: '%s'" +msgstr "Formato de datos no válido para IP fija: '%s'" + +#, python-format +msgid "Invalid data format for hostroute: '%s'" +msgstr "Formato de datos no válido para ruta de host: '%s'" + +#, python-format +msgid "Invalid data format for nameserver: '%s'" +msgstr "Formato de datos no válido para servidor de nombres: '%s'" + +#, python-format +msgid "Invalid extension environment: %(reason)s" +msgstr "Entorno de ampliación no válido: %(reason)s" + +#, python-format +msgid "Invalid format: %s" +msgstr "Formato no válido: %s" + +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "Entrada no válida para %(attr)s. Razón: %(reason)s." + +#, python-format +msgid "Invalid input for operation: %(error_message)s." +msgstr "Entrada no válida para operación: %(error_message)s." + +#, python-format +msgid "" +"Invalid input. '%(target_dict)s' must be a dictionary with keys: " +"%(expected_keys)s" +msgstr "" +"Entrada no válida. '%(target_dict)s' debe ser un diccionario con claves: " +"%(expected_keys)s" + +#, python-format +msgid "" +"Invalid keys specified in MEAD - %(error_msg_details)s.Supported keys are: " +"%(valid_keys)s" +msgstr "" +"Claves invalidas especificadas en MEAD - %(error_msg_details)s. Claves " +"soportadas son: %(valid_keys)s" + +#, python-format +msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" +msgstr "Rango de VLAN de red no válido: '%(vlan_range)s' - '%(error)s'" + +#, python-format +msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" +msgstr "Rango de puerto VXLAN de red no válido: '%(vxlan_range)s'" + +#, python-format +msgid "" +"Invalid type %(type)s for policy %(policy)s, should be one of %(valid_types)s" +msgstr "" +"Tipo %(type)s invalido para regla %(policy)s, debe ser uno de %(valid_types)s" + +msgid "Key ID is None" +msgstr "ID de clave es None" + +#, python-format +msgid "Key manager error: %(reason)s" +msgstr "Error del gestor de claves: %(reason)s" + +#, python-format +msgid "Key not found, uuid: %(uuid)s" +msgstr "Clave no ha sido encontrada, uuid: %(uuid)s" + +#, python-format +msgid "Limit must be an integer 0 or greater and not '%d'" +msgstr "El límite debe ser un entero mayor o igual a 0 y no '%d'" + +msgid "" +"MGMT driver to communicate with Hosting MEA/logical service instance apmec " +"plugin will use" +msgstr "" +"Driver de MGMT para comunicar con la instancia Hosting MEA/servicio logico " +"que apmec plugin usara" + +msgid "Malformed request body" +msgstr "Cuerpo de solicitud incorrecto" + +#, python-format +msgid "Malformed request body: %(reason)s" +msgstr "Cuerpo de solicitud formado incorrectamente: %(reason)s" + +#, python-format +msgid "" +"Matching MEA Instance for MEAD %(mead_name)s could not be found. Please " +"create an instance of this MEAD before creating/updating NFY." +msgstr "" +"Instancia MEA correspondiente a MEAD %(mead_name)s no ha sido encontrada. " +"Por favor crea una instancia de este MEAD antes de crear/actualizar el NFY." + +msgid "Max header line to accommodate large tokens" +msgstr "Máximo de línea de cabecera para acomodar señales grandes" + +msgid "Metadata for alarm policy is not matched" +msgstr "Meta datos para la política de alarmas no coincide." + +msgid "" +"Monitor driver to communicate with Hosting MEA/logical service instance " +"apmec plugin will use" +msgstr "" +"Driver de monitor para comunicar con la instancia Hosting MEA/servicio " +"logico que apmec plugin usara" + +msgid "More than one MGMT Driver per mead is not supported" +msgstr "Más de un Driver MGMT por mead no está soportado" + +msgid "More than one external network exists" +msgstr "Existe más de una red externa" + +#, python-format +msgid "Multiple plugins for service %s were configured" +msgstr "Se han configurado varios complementos para el servicio %s" + +#, python-format +msgid "NFP %(nfp_id)s could not be found" +msgstr "NFP %(nfp_id)s no ha sido encontrado" + +#, python-format +msgid "NFP %(nfp_id)s is still in use" +msgstr "NFP %(nfp_id)s sigue en uso" + +#, python-format +msgid "NFP attribute %(attribute)s could not be found" +msgstr "Atributo NFP %(attribute)s no ha sido encontrado" + +#, python-format +msgid "MES %(ns_id)s could not be found" +msgstr "MES %(ns_id)s no ha sido encontrado" + +#, python-format +msgid "MES %(ns_id)s is still in use" +msgstr "MES %(ns_id)s sigue en uso" + +#, python-format +msgid "MESD %(mesd_id)s could not be found" +msgstr "MESD %(mesd_id)s no ha sido encontrado" + +#, python-format +msgid "MESD %(mesd_id)s is still in use" +msgstr "MESD %(mesd_id)s sigue en uso" + +msgid "Native pagination depend on native sorting" +msgstr "La paginación nativa depende de la ordenación nativa" + +#, python-format +msgid "Network %(net_id)s could not be found" +msgstr "No se ha podido encontrar la red %(net_id)s." + +msgid "No details." +msgstr "Sin detalles." + +#, python-format +msgid "No more IP addresses available on network %(net_id)s." +msgstr "No hay más direcciones IP disponibles en la red %(net_id)s. " + +#, python-format +msgid "No tasks to run for %(action)s on %(resource)s" +msgstr "No hay tareas para ejecutar %(action)s en %(resource)s" + +msgid "Not authorized." +msgstr "No Autorizado" + +msgid "Number of attempts to retry for stack creation/deletion" +msgstr "Numero de intentos para re intentar creación/borrado de pila" + +msgid "Number of backlog requests to configure the socket with" +msgstr "" +"Número de solicitudes de registro de reserva para configurar el socket con" + +msgid "Number of seconds to keep retrying to listen" +msgstr "Número de segundos en seguir intentando escuchar" + +msgid "Number of separate worker processes for service" +msgstr "Numero de procesos independientes por servicio" + +#, python-format +msgid "Param input %(param_key)s not used." +msgstr "Parámetro de entrada %(param_key)s no usado." + +#, python-format +msgid "Param values %(param_value)s is not in dict format." +msgstr "" +"Valores de los parámetros %(param_value)s no están en formato de diccionario." + +msgid "Parameter YAML input missing" +msgstr "Falta parámetro de entrada YAML" + +#, python-format +msgid "Parameter YAML not well formed - %(error_msg_details)s" +msgstr "Parameter YAML no bien formateado - %(error_msg_details)s" + +#, python-format +msgid "Parameter input values missing for the key '%(key)s'" +msgstr "Faltan valores de parámetros de entrada para la clave '%(key)s'" + +msgid "Please provide parameters for substitution mappings" +msgstr "Por favor, proporcione parámetros para los mapeo de sustitución" + +#, python-format +msgid "Plugin '%s' not found." +msgstr "No se ha encontrado el plugin '%s'." + +#, python-format +msgid "Policy %(policy)s does not exist for MEA %(mea_id)s" +msgstr "Regla %(policy)s no existe para MEA %(mea_id)s" + +msgid "Policy configuration policy.json could not be found" +msgstr "No se ha podido encontrar el policy.json de configuración de política" + +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "La política no permite que la %(action)s se realice" + +#, python-format +msgid "Policy not found in NFP %(nfp)s" +msgstr "Regla no encontrada en NFP %(nfp)s" + +msgid "Private key file to use when starting the server securely" +msgstr "" +"Archivo de clave privada para usar al iniciar el servidor de manera segura" + +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Cuota superada para recursos: %(overs)s" + +msgid "" +"Range of seconds to randomly delay when starting the periodic task scheduler " +"to reduce stampeding. (Disable by setting to 0)" +msgstr "" +"Rango de segundos para retrasar aleatoriamente al iniciar la tarea periódica " +"programador para reducir avalanchas. (Inhabilitar al establecer en 0)" + +msgid "Request Failed: internal server error while processing your request." +msgstr "" +"Ha fallado la solicitar: error interno de servidor al procesar la solicitud." + +msgid "Resource body required" +msgstr "Se necesita cuerpo de recurso" + +msgid "" +"Resource cleanup for mea is not completed within {wait} seconds as deletion " +"of Stack {stack} is not completed" +msgstr "" +"Eliminado de recursos no ha sido completado en {wait} segundos como " +"eliminado de pila {stack} no ha sido completado" + +msgid "" +"Resource creation is not completed within {wait} seconds as creation of " +"stack {stack} is not completed" +msgstr "" +"Creación de recursos no ha sido completada en {wait} segundos como creación " +"de pila {stack} no ha sido completada" + +msgid "Resource name for which deleted entries are to be purged." +msgstr "Nombre de recurso para el cual las entradas serán eliminadas" + +msgid "Resource not found." +msgstr "Recurso no encontrado." + +msgid "Resources required" +msgstr "Recursos necesarios " + +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "Ejecución sin keystone AuthN requiere que tenant_id sea especificado" + +#, python-format +msgid "SFC %(sfc_id)s is still in use" +msgstr "SFC %(sfc_id)s sigue en uso" + +#, python-format +msgid "" +"Scaling Policy actionskipped due to status:%(status)s for mea: %(meaid)s" +msgstr "" +"Acciones de Scaling Policy no ejecutadas debido a estado: %(status)s de mea: " +"%(meaid)s" + +msgid "Seconds between running components report states" +msgstr "Segundos para reportar estado de componentes ejecutándose" + +msgid "Seconds between running periodic tasks" +msgstr "Segundos entre tareas periódicas en ejecución" + +#, python-format +msgid "Service Function Chain %(sfc_id)s could not be found" +msgstr "Service Function Chain %(sfc_id)s no ha sido encontrado" + +msgid "" +"Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " +"supported on OS X." +msgstr "" +"Establece el valor de TCP_KEEPIDLE en segundos para cada socket del " +"servidor. No soportado en OS X." + +#, python-format +msgid "Specified CP %(cp_id)s could not be found in MEA %(mea_id)s." +msgstr "CP especificado %(cp_id)s no ha sido encontrado en MEA %(mea_id)s." + +#, python-format +msgid "" +"Specified CP %(cp_id)s could not be found in MEAD %(mead_name)s. Please " +"check MEAD for correct Connection Point." +msgstr "" +"CP especificado %(cp_id)s no ha sido encontrado en MEAD %(mead_name)s. Por " +"favor comprueba MEAD para el correcto Punto de Conexión." + +#, python-format +msgid "" +"Specified CP %(cp_id)s in MEAD %(mead_name)s does not have forwarding " +"capability, which is required to be included in forwarding path" +msgstr "" +"CP %(cp_id)s especificado en MEAD %(mead_name)s no tiene capacidad de " +"reenvío, que es requerida para ser incluido en el forwarding path" + +#, python-format +msgid "" +"Specified Event id %(evt_id)s is invalid. Please verify and pass a valid " +"Event id" +msgstr "" +"ID de evento especificado %(evt_id)s es invalido. Por favor, verifique y " +"pasa un ID de evento valido." + +#, python-format +msgid "" +"Specified VIM id %(vim_id)s is invalid. Please verify and pass a valid VIM id" +msgstr "" +"VIM id especificado %(vim_id)s no es valido. Por favor, verifique y pasa un " +"VIM id valido" + +#, python-format +msgid "Specified MEA instance %(mea_name)s in MEA Mapping could not be found" +msgstr "" +"Instancia MEA %(mea_name)s especificada en MEA Mapping no ha sido encontrada" + +#, python-format +msgid "" +"Specified MEAD %(mead_name)s in NFYD does not exist. Please create MEADs " +"before creating NFY" +msgstr "" +"MEAD especificado %(mead_name)s en NFYD no existe. Por favor crea MEADs " +"antes de crear NFY" + +msgid "Specified model is invalid, only Event model supported" +msgstr "Modelo especificado es invalido, solo modelo Event soportado" + +#, python-format +msgid "" +"Specified number_of_endpoints %(number)s is not equal to the number of " +"connection_point %(cps)s" +msgstr "" +"number_of_endpoints %(number)s especificado no es igual al número de " +"connection_point %(cps)s" + +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request requires " +"admin privileges" +msgstr "" +"Para especificar un 'tenant_id' distinto del arrendatario autenticado en la " +"solicitud requiere privilegios administrativos" + +msgid "Sudo privilege is required to run this command." +msgstr "Privilegios de sudo son necesarios para ejecutar este comando." + +msgid "Tenant-id was missing from Quota request" +msgstr "El ID de arrendatario faltaba en la solicitud de cuota" + +msgid "The API paste config file to use" +msgstr "El archivo de configuración de pegar de API a utilizar" + +#, python-format +msgid "The allocation pool %(pool)s is not valid." +msgstr "La agrupación de asignación %(pool)s no es válida. " + +#, python-format +msgid "" +"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." +msgstr "" +"La agrupación de asignación %(pool)s abarca más allá de cidr de subred " +"%(subnet_cidr)s." + +#, python-format +msgid "" +"The attribute '%(attr)s' is reference to other resource, can't used by sort " +"'%(resource)s'" +msgstr "" +"Otro recurso hace referencia al atributo '%(attr)s', la ordenación " +"'%(resource)s no puede usarlo'" + +#, python-format +msgid "" +"The following device_id %(device_id)s is not owned by your tenant or matches " +"another tenants router." +msgstr "" +"El siguiente device_id %(device_id)s no es propiedad de su arrendatario o " +"coincide con el direccionador de otros arrendatarios." + +msgid "The host IP to bind to" +msgstr "El IP de host al que enlazar" + +msgid "The hostname Apmec is running on" +msgstr "El nombre de servidor donde Apmec está ejecutándose" + +msgid "" +"The maximum number of items returned in a single response, value was " +"'infinite' or negative integer means no limit" +msgstr "" +"El número máximo de elementos devueltos en una única respuesta, el valor " +"'infinite' o un entero negativo significa que no hay límite" + +msgid "The number of sort_keys and sort_dirs must be same" +msgstr "El número de sort_keys y sort_dirs debe ser igual" + +msgid "The path for API extensions" +msgstr "La vía de acceso para ampliaciones de API" + +msgid "The policy file to use" +msgstr "El fichero de reglas a usar" + +msgid "The port to bind to" +msgstr "El puerto al que enlazar" + +#, python-format +msgid "The requested content type %s is invalid." +msgstr "El tipo de contenido solicitado %s no es válido." + +msgid "The resource could not be found." +msgstr "El recurso no se ha podido encontrar." + +msgid "The resource is inuse" +msgstr "El recurso se está utilizando" + +msgid "The service is unavailable" +msgstr "El servicio no está disponible" + +msgid "The service plugins Apmec will use" +msgstr "Los plugins de servicio que Apmec va a usar" + +msgid "The type of authentication to use" +msgstr "El tipo de autenticación a utilizar" + +msgid "Time interval to wait for VM to boot" +msgstr "Intervalo de tiempo a esperar para que inicie la instancia" + +msgid "Timeline branches unable to generate timeline" +msgstr "Las ramas de la línea de tiempo no pueden generar la línea de tiempo" + +#, python-format +msgid "Trigger %(trigger_name)s does not exist for MEA %(mea_id)s" +msgstr "Disparador %(trigger_name)s no existe para MEA %(mea_id)s" + +msgid "URL to database" +msgstr "URL en base de datos" + +#, python-format +msgid "Unable to find '%s' in request body" +msgstr "No se puede encontrar '%s' en el cuerpo de solicitud " + +#, python-format +msgid "Unable to find any IP address on external network %(net_id)s." +msgstr "" +"No se ha podido encontrar ninguna dirección IP en la red externa %(net_id)s." + +#, python-format +msgid "Unable to find key file for VIM %(vim_id)s" +msgstr "No ha sido posible encontrar el fichero de clave para VIM %(vim_id)s" + +#, python-format +msgid "Unable to find ssl_ca_file : %s" +msgstr "No se ha podido encontrar ssl_ca_file : %s" + +#, python-format +msgid "Unable to find ssl_cert_file : %s" +msgstr "No se ha podido encontrar ssl_cert_file : %s" + +#, python-format +msgid "Unable to find ssl_key_file : %s" +msgstr "No se ha podido encontrar ssl_key_file : %s" + +#, python-format +msgid "Unable to generate unique mac on network %(net_id)s." +msgstr "No se puede generar mac exclusivo en la red %(net_id)s. " + +#, python-format +msgid "" +"Unable to identify a target field from:%s. Match should be in the form " +"%%()s" +msgstr "" +"No ha sido posible identificar celda objetivo de: %s. Coincidencia debe se " +"en forma de %%()s" + +#, python-format +msgid "Unable to load %(app_name)s from configuration file %(config_path)s." +msgstr "" +"No se puede cargar %(app_name)s del archivo de configuración %(config_path)s." + +#, python-format +msgid "" +"Unable to reconfigure sharing settings for network %(network)s. Multiple " +"tenants are using it" +msgstr "" +"No se ha podido volver a configurar los valores para la red %(network)s. " +"Varios arrendatarios la están utilizando" + +msgid "Unknown API version specified" +msgstr "Versión API desconocida especificada" + +#, python-format +msgid "Unknown VIM region name %(region_name)s" +msgstr "Nombre de región del VIM desconocido %(region_name)s" + +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "Recursos de cuota desconocidos %(unknown)s." + +msgid "Unmapped error" +msgstr "Error no correlacionado" + +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "Atributo(s) no reconocido(s) '%s'" + +msgid "Unsupported Content-Type" +msgstr "Tipo de contenido no soportado" + +#, python-format +msgid "Unsupported Policy Type: %(type)s" +msgstr "Tipo de regla no soportada: %(type)s" + +#, python-format +msgid "Unsupported port state: %(port_state)s" +msgstr "Estado de puerto no soportado: %(port_state)s " + +msgid "" +"Use barbican to encrypt vim password if True, save vim credentials in local " +"file system if False" +msgstr "" +"Usa Barbican para encriptar la contraseña de VIM si True, guarda " +"credenciales de VIM en fichero local del sistema is False" + +#, python-format +msgid "User does not have admin privileges: %(reason)s" +msgstr "El usuario no tiene privilegios de administración: %(reason)s" + +msgid "User is not authorized to use key manager." +msgstr "Usuario no autorizado para usar el gestor de claves." + +msgid "User name for alarm monitoring" +msgstr "Nombre de usuario para motorización de alarmas" + +#, python-format +msgid "VIM %(vim_id)s is still in use by MEA" +msgstr "VIM %(vim_id)s sigue en uso por MEA" + +msgid "VIM driver for launching MEAs" +msgstr "Driver VIM para ejecutar MEAs" + +#, python-format +msgid "VIM from MEA %(mea_id)s could not be found" +msgstr "VIM de MEA %(mea_id)s no ha sido encontrado" + +#, python-format +msgid "VIM type %(vim_name)s is not supported as an infra driver " +msgstr "Tipo de VIM %(vim_name)s no soportado como driver de infra" + +#, python-format +msgid "MEA %(mea_id)s could not be found" +msgstr "MEA %(mea_id)s no ha sido encontrado" + +#, python-format +msgid "MEA %(mea_id)s is not in Active state %(message)s" +msgstr "MEA %(mea_id)s no está en estado Activo %(message)s" + +#, python-format +msgid "MEA %(mea_id)s is still in use" +msgstr "MEA %(mea_id)s sigue en uso" + +msgid "MEA configuration failed" +msgstr "Fallo la configuración de MEA" + +#, python-format +msgid "MEA scaling failed for stack %(stack)s with error %(error)s" +msgstr "Fallo escalado de MEA para pila %(stack)s con error %(error)s" + +#, python-format +msgid "" +"MEA scaling failed to complete within %{wait}s seconds while waiting for the " +"stack %(stack)s to be scaled." +msgstr "" +"Fallo para completar escalado de MEA en %{wait}s segundos mientras esperaba " +"a que la pila %(stack)s escalara." + +#, python-format +msgid "MEAD %(mead_id)s could not be found" +msgstr "MEAD %(mead_id)s no ha sido encontrado" + +#, python-format +msgid "MEAD %(mead_id)s is still in use" +msgstr "MEAD %(mead_id)s sigue en uso" + +#, python-format +msgid "MEAD Forwarder %(mead)s not found in MEA Mapping %(mapping)s" +msgstr "MEAD Forwarder %(mead)s no encontrado en MEA Mapping %(mapping)s" + + +msgid "VXLAN Network unsupported." +msgstr "Red VXLAN no soportada." + +#, python-format +msgid "" +"Validation of dictionary's keys failed.Expected keys: %(expected_keys)s " +"Provided keys: %(provided_keys)s" +msgstr "" +"Validación de claves en diccionario fallo: Claves esperadas: " +"%(expected_keys)s Claves proporcionadas: %(provided_keys)s" + +#, python-format +msgid "Validator '%s' does not exist." +msgstr "El validador '%s' no existe." + +#, python-format +msgid "" +"Value specified for mem_page_size is invalid:%(error_msg_details)s. The " +"valid values are 'small', 'large','any' or an integer value in MB" +msgstr "" +"Valor especificado para mem_page_size es invalido: %(error_msg_details)s. " +"Valores validos son 'small', 'large', 'any' o un valor entero en MB" + +msgid "Wait time (in seconds) between consecutive stack create/delete retries" +msgstr "" +"Tiempo de espera (en segundos) entre re intentos consecutivos de creación/" +"borrado de pila" + +msgid "" +"Where to store Apmec state files. This directory must be writable by the " +"agent." +msgstr "" +"Lugar para almacenar ficheros de estado de Apmec. El agente debe poder " +"escribir en este directorio." + +msgid "You are not authorized to complete this action." +msgstr "No está autorizado a completar esta acción." + +msgid "You must implement __call__" +msgstr "Debe implementar __call__" + +msgid "You must provide a revision or relative delta" +msgstr "Debe proporcionar una revisión o delta relativa" + +#, python-format +msgid "" +"can't load driver '%(new_driver)s' because driver '%(old_driver)s' is " +"already registered for driver '%(type)s'" +msgstr "" +"No se puede cargar driver '%(new_driver)s' porque el driver '%(old_driver)s' " +"continua registrado para driver '%(type)s'" + +msgid "check interval for monitor" +msgstr "Intervalo de comprobación para el monitor" + +msgid "" +"context must be of type KeystonePassword, KeystoneToken, RequestContext, or " +"Context." +msgstr "" +"contexto debe ser del tipo KeystonePassword, KeystoneToken, RequestContext, " +"o Context." + +#, python-format +msgid "creating MEA based on %(mead_id)s failed" +msgstr "Fallo en la creación de MEA basado en %(mead_id)s" + +#, python-format +msgid "" +"driver '%(new_driver)s' ignored because driver '%(old_driver)s' is already " +"registered for driver '%(type)s'" +msgstr "" +"Driver '%(new_driver)s' ignorado porque el driver '%(old_driver)s' continua " +"registrado para driver '%(type)s'" + +#, python-format +msgid "empty list is not allowed for service list. '%s'" +msgstr "Lista vacía no esta permitida para lista de servicio. '%s'" + +#, python-format +msgid "heat-translator failed: - %(error_msg_details)s" +msgstr "heat-translator fallo: - %(error_msg_details)s" + +#, python-format +msgid "invalid data format for service list: '%s'" +msgstr "Formato de datos inválidos para lista de servicio: '%s'" + +#, python-format +msgid "invalid service type %(service_type)s" +msgstr "Tipo de servicio invalido %(service_type)s" + +msgid "number of ICMP packets to send" +msgstr "Numero de paquetes ICMP a enviar" + +msgid "number of seconds to wait between packets" +msgstr "Numero de segundos a esperar entre paquetes" + +msgid "number of seconds to wait for a response" +msgstr "Numero de segundos a esperar para la repuesta" + +msgid "number of times to retry" +msgstr "Numero de veces para re intentar" + +msgid "password for alarm monitoring" +msgstr "Contraseña para motorización de alarmas" + +msgid "password to login openwrt" +msgstr "Contraseña para conectarse en openwrt" + +msgid "port number which drivers use to trigger" +msgstr "Numero de puerto que los driver usan para notificar" + +msgid "project name for alarm monitoring" +msgstr "Nombre de proyecto para motorización de alarmas" + +#, python-format +msgid "service type %(service_type_id)s could not be found" +msgstr "Tipo de servicio %(service_type_id)s no ha sido encontrado" + +msgid "service types are not specified" +msgstr "Tipos de servicio no especificados" + +#, python-format +msgid "tosca-parser failed: - %(error_msg_details)s" +msgstr "tosca-parser falló: - %(error_msg_details)s" + +msgid "user name to login openwrt" +msgstr "Nombre de usuario para conectarse en openwrt" + +msgid "user_data and/or user_data_format not provided" +msgstr "user_data y/o user_data_format no proporcionado" + +msgid "mea {mea_id} deletion is not completed. {stack_status}" +msgstr "Eliminado de mea {mea_id} no ha sido completado. {stack_status}" + +msgid "" +"when signal occurred within cool down window, no events generated from heat, " +"so ignore it" +msgstr "" +"Cuando señal ocurre junto a ventana de espera, no hay eventos generados de " +"Heat, ignóralos" diff --git a/apmec/manager.py b/apmec/manager.py new file mode 100644 index 0000000..904af2e --- /dev/null +++ b/apmec/manager.py @@ -0,0 +1,191 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging +from oslo_service import periodic_task + +from apmec.common import utils + + +LOG = logging.getLogger(__name__) + + +class Manager(periodic_task.PeriodicTasks): + + # Set RPC API version to 1.0 by default. + target = oslo_messaging.Target(version='1.0') + + def __init__(self, host=None): + if not host: + host = cfg.CONF.host + self.host = host + conf = getattr(self, "conf", cfg.CONF) + super(Manager, self).__init__(conf) + + def periodic_tasks(self, context, raise_on_error=False): + self.run_periodic_tasks(context, raise_on_error=raise_on_error) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def after_start(self): + """Handler post initialization stuff. + + Child classes can override this method. + """ + pass + + +def validate_post_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + message = None + return message + + +def validate_pre_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + message = None + return message + + +class ApmecManager(object): + """Apmec's Manager class. + + Apmec's Manager class is responsible for parsing a config file and + instantiating the correct plugin that concretely implement + apmec_plugin_base class. + The caller should make sure that ApmecManager is a singleton. + """ + _instance = None + + def __init__(self, options=None, config_file=None): + # If no options have been provided, create an empty dict + if not options: + options = {} + + msg = validate_pre_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + msg = validate_post_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + self.service_plugins = {} + self._load_service_plugins() + + @staticmethod + def load_class_for_provider(namespace, plugin_provider): + """Loads plugin using alias or class name + + Load class using stevedore alias or the class name + :param namespace: namespace where alias is defined + :param plugin_provider: plugin alias or class name + :returns: plugin that is loaded + :raises ImportError: if fails to load plugin + """ + + try: + return utils.load_class_by_alias_or_classname(namespace, + plugin_provider) + except ImportError: + raise ImportError(_("Plugin '%s' not found.") % plugin_provider) + + def _get_plugin_instance(self, namespace, plugin_provider): + plugin_class = self.load_class_for_provider(namespace, plugin_provider) + return plugin_class() + + def _load_service_plugins(self): + """Loads service plugins. + + Starts from the core plugin and checks if it supports + advanced services then loads classes provided in configuration. + """ + plugin_providers = cfg.CONF.service_plugins + if 'commonservices' not in plugin_providers: + plugin_providers.append('commonservices') + LOG.debug("Loading service plugins: %s", plugin_providers) + for provider in plugin_providers: + if provider == '': + continue + LOG.info("Loading Plugin: %s", provider) + + plugin_inst = self._get_plugin_instance('apmec.service_plugins', + provider) + # only one implementation of svc_type allowed + # specifying more than one plugin + # for the same type is a fatal exception + if plugin_inst.get_plugin_type() in self.service_plugins: + raise ValueError(_("Multiple plugins for service " + "%s were configured"), + plugin_inst.get_plugin_type()) + + self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst + # # search for possible agent notifiers declared in service plugin + # # (needed by agent management extension) + # if (hasattr(self.plugin, 'agent_notifiers') and + # hasattr(plugin_inst, 'agent_notifiers')): + # self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers) + + LOG.debug("Successfully loaded %(type)s plugin. " + "Description: %(desc)s", + {"type": plugin_inst.get_plugin_type(), + "desc": plugin_inst.get_plugin_description()}) + + @classmethod + @utils.synchronized("manager") + def _create_instance(cls): + if cls._instance is None: + cls._instance = cls() + + @classmethod + def get_instance(cls): + # double checked locking + if cls._instance is None: + cls._create_instance() + return cls._instance + + @classmethod + def get_plugin(cls): + return cls.get_instance().plugin + + @classmethod + def get_service_plugins(cls): + return cls.get_instance().service_plugins + + @classmethod + def has_instance(cls): + return cls._instance is not None + + @classmethod + def clear_instance(cls): + cls._instance = None diff --git a/apmec/mem/__init__.py b/apmec/mem/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/infra_drivers/__init__.py b/apmec/mem/infra_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/infra_drivers/abstract_driver.py b/apmec/mem/infra_drivers/abstract_driver.py new file mode 100644 index 0000000..13aadad --- /dev/null +++ b/apmec/mem/infra_drivers/abstract_driver.py @@ -0,0 +1,71 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class DeviceAbstractDriver(extensions.PluginInterface): + + @abc.abstractmethod + def get_type(self): + """Return one of predefined type of the hosting mea drivers.""" + pass + + @abc.abstractmethod + def get_name(self): + """Return a symbolic name for the service VM plugin.""" + pass + + @abc.abstractmethod + def get_description(self): + pass + + @abc.abstractmethod + def create(self, plugin, context, mea): + """Create mea and return its id.""" + + @abc.abstractmethod + def create_wait(self, plugin, context, mea_dict, mea_id): + """wait for mea creation to complete.""" + + @abc.abstractmethod + def update(self, plugin, context, mea_id, mea_dict, mea): + # mea_dict: old mea_dict to be updated + # mea: update with mea dict + pass + + @abc.abstractmethod + def update_wait(self, plugin, context, mea_id): + pass + + @abc.abstractmethod + def delete(self, plugin, context, mea_id): + pass + + @abc.abstractmethod + def delete_wait(self, plugin, context, mea_id): + pass + + @abc.abstractmethod + def get_resource_info(self, plugin, context, mea_info, auth_attr, + region_name=None): + '''Fetches optional details of a MEA''' + pass diff --git a/apmec/mem/infra_drivers/noop.py b/apmec/mem/infra_drivers/noop.py new file mode 100644 index 0000000..5391738 --- /dev/null +++ b/apmec/mem/infra_drivers/noop.py @@ -0,0 +1,75 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(yamahata): once unittests are impletemted, move this there +from oslo_log import log as logging +from oslo_utils import uuidutils + +from apmec.common import log +from apmec.mem.infra_drivers import abstract_driver + + +LOG = logging.getLogger(__name__) + + +class DeviceNoop(abstract_driver.DeviceAbstractDriver): + + """Noop driver of hosting mea for tests.""" + + def __init__(self): + super(DeviceNoop, self).__init__() + self._instances = set() + + def get_type(self): + return 'noop' + + def get_name(self): + return 'noop' + + def get_description(self): + return 'Apmec infra noop driver' + + @log.log + def create(self, **kwargs): + instance_id = uuidutils.generate_uuid() + self._instances.add(instance_id) + return instance_id + + @log.log + def create_wait(self, plugin, context, mea_dict, mea_id): + pass + + @log.log + def update(self, plugin, context, mea_id, mea_dict, mea): + if mea_id not in self._instances: + LOG.debug('not found') + raise ValueError('No instance %s' % mea_id) + + @log.log + def update_wait(self, plugin, context, mea_id): + pass + + @log.log + def delete(self, plugin, context, mea_id): + self._instances.remove(mea_id) + + @log.log + def delete_wait(self, plugin, context, mea_id): + pass + + def get_resource_info(self, plugin, context, mea_info, auth_attr, + region_name=None): + return {'noop': {'id': uuidutils.generate_uuid(), 'type': 'noop'}} diff --git a/apmec/mem/infra_drivers/openstack/__init__.py b/apmec/mem/infra_drivers/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/infra_drivers/openstack/heat_client.py b/apmec/mem/infra_drivers/openstack/heat_client.py new file mode 100644 index 0000000..535821b --- /dev/null +++ b/apmec/mem/infra_drivers/openstack/heat_client.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from heatclient import exc as heatException +from oslo_log import log as logging + +from apmec.common import clients +from apmec.extensions import mem + +LOG = logging.getLogger(__name__) + + +class HeatClient(object): + def __init__(self, auth_attr, region_name=None): + # context, password are unused + self.heat = clients.OpenstackClients(auth_attr, region_name).heat + self.stacks = self.heat.stacks + self.resource_types = self.heat.resource_types + self.resources = self.heat.resources + + def create(self, fields): + fields = fields.copy() + fields.update({ + 'timeout_mins': 10, + 'disable_rollback': True}) + if 'password' in fields.get('template', {}): + fields['password'] = fields['template']['password'] + + try: + return self.stacks.create(**fields) + except heatException.HTTPException: + type_, value, tb = sys.exc_info() + raise mem.HeatClientException(msg=value) + + def delete(self, stack_id): + try: + self.stacks.delete(stack_id) + except heatException.HTTPNotFound: + LOG.warning("Stack %(stack)s created by service chain driver is " + "not found at cleanup", {'stack': stack_id}) + + def get(self, stack_id): + return self.stacks.get(stack_id) + + def resource_attr_support(self, resource_name, property_name): + resource = self.resource_types.get(resource_name) + return property_name in resource['attributes'] + + def resource_get_list(self, stack_id, nested_depth=0): + return self.heat.resources.list(stack_id, + nested_depth=nested_depth) + + def resource_signal(self, stack_id, rsc_name): + return self.heat.resources.signal(stack_id, rsc_name) + + def resource_get(self, stack_id, rsc_name): + return self.heat.resources.get(stack_id, rsc_name) + + def resource_event_list(self, stack_id, rsc_name, **kwargs): + return self.heat.events.list(stack_id, rsc_name, **kwargs) + + def resource_metadata(self, stack_id, rsc_name): + return self.heat.resources.metadata(stack_id, rsc_name) diff --git a/apmec/mem/infra_drivers/openstack/openstack.py b/apmec/mem/infra_drivers/openstack/openstack.py new file mode 100644 index 0000000..390914b --- /dev/null +++ b/apmec/mem/infra_drivers/openstack/openstack.py @@ -0,0 +1,397 @@ +# Copyright 2015 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from heatclient import exc as heatException +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +import yaml + +from apmec.common import log +from apmec.common import utils +from apmec.extensions import mem +from apmec.mem.infra_drivers import abstract_driver +from apmec.mem.infra_drivers.openstack import heat_client as hc +from apmec.mem.infra_drivers.openstack import translate_template +from apmec.mem.infra_drivers import scale_driver + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +OPTS = [ + cfg.IntOpt('stack_retries', + default=60, + help=_("Number of attempts to retry for stack" + " creation/deletion")), + cfg.IntOpt('stack_retry_wait', + default=10, + help=_("Wait time (in seconds) between consecutive stack" + " create/delete retries")), +] + +CONF.register_opts(OPTS, group='openstack_vim') + + +def config_opts(): + return [('openstack_vim', OPTS)] + + +# Global map of individual resource type and +# incompatible properties, alternate properties pair for +# upgrade/downgrade across all Heat template versions (starting Kilo) +# +# Maintains a dictionary of {"resource type": {dict of "incompatible +# property": "alternate_prop"}} + +HEAT_VERSION_INCOMPATIBILITY_MAP = {'OS::Neutron::Port': { + 'port_security_enabled': 'value_specs', }, } + +HEAT_TEMPLATE_BASE = """ +heat_template_version: 2013-05-23 +""" + +OUTPUT_PREFIX = 'mgmt_ip-' +ALARMING_POLICY = 'tosca.policies.apmec.Alarming' +SCALING_POLICY = 'tosca.policies.apmec.Scaling' + + +def get_scaling_policy_name(action, policy_name): + return '%s_scale_%s' % (policy_name, action) + + +class OpenStack(abstract_driver.DeviceAbstractDriver, + scale_driver.MeaScaleAbstractDriver): + """Openstack infra driver for hosting meas""" + + def __init__(self): + super(OpenStack, self).__init__() + self.STACK_RETRIES = cfg.CONF.openstack_vim.stack_retries + self.STACK_RETRY_WAIT = cfg.CONF.openstack_vim.stack_retry_wait + + def get_type(self): + return 'openstack' + + def get_name(self): + return 'openstack' + + def get_description(self): + return 'Openstack infra driver' + + @log.log + def create(self, plugin, context, mea, auth_attr): + LOG.debug('mea %s', mea) + + region_name = mea.get('placement_attr', {}).get('region_name', None) + heatclient = hc.HeatClient(auth_attr, region_name) + + tth = translate_template.TOSCAToHOT(mea, heatclient) + tth.generate_hot() + stack = self._create_stack(heatclient, tth.mea, tth.fields) + return stack['stack']['id'] + + @log.log + def _create_stack(self, heatclient, mea, fields): + if 'stack_name' not in fields: + name = __name__ + '_' + self.__class__.__name__ + '-' + mea['id'] + if mea['attributes'].get('failure_count'): + name += ('-RESPAWN-%s') % str(mea['attributes'][ + 'failure_count']) + fields['stack_name'] = name + + # service context is ignored + LOG.debug('service_context: %s', mea.get('service_context', [])) + LOG.debug('fields: %s', fields) + LOG.debug('template: %s', fields['template']) + stack = heatclient.create(fields) + + return stack + + @log.log + def create_wait(self, plugin, context, mea_dict, mea_id, auth_attr): + region_name = mea_dict.get('placement_attr', {}).get( + 'region_name', None) + heatclient = hc.HeatClient(auth_attr, region_name) + + stack = heatclient.get(mea_id) + status = stack.stack_status + stack_retries = self.STACK_RETRIES + error_reason = None + while status == 'CREATE_IN_PROGRESS' and stack_retries > 0: + time.sleep(self.STACK_RETRY_WAIT) + try: + stack = heatclient.get(mea_id) + except Exception: + LOG.warning("MEA Instance setup may not have " + "happened because Heat API request failed " + "while waiting for the stack %(stack)s to be " + "created", {'stack': mea_id}) + # continue to avoid temporary connection error to target + # VIM + status = stack.stack_status + LOG.debug('status: %s', status) + stack_retries = stack_retries - 1 + + LOG.debug('stack status: %(stack)s %(status)s', + {'stack': str(stack), 'status': status}) + if stack_retries == 0 and status != 'CREATE_COMPLETE': + error_reason = _("Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.STACK_RETRIES * + self.STACK_RETRY_WAIT), + stack=mea_id) + LOG.warning("MEA Creation failed: %(reason)s", + {'reason': error_reason}) + raise mem.MEACreateWaitFailed(reason=error_reason) + + elif stack_retries != 0 and status != 'CREATE_COMPLETE': + error_reason = stack.stack_status_reason + raise mem.MEACreateWaitFailed(reason=error_reason) + + def _find_mgmt_ips(outputs): + LOG.debug('outputs %s', outputs) + mgmt_ips = dict((output['output_key'][len(OUTPUT_PREFIX):], + output['output_value']) + for output in outputs + if output.get('output_key', + '').startswith(OUTPUT_PREFIX)) + return mgmt_ips + + # scaling enabled + if mea_dict['attributes'].get('scaling_group_names'): + group_names = jsonutils.loads( + mea_dict['attributes'].get('scaling_group_names')).values() + mgmt_ips = self._find_mgmt_ips_from_groups(heatclient, + mea_id, + group_names) + else: + mgmt_ips = _find_mgmt_ips(stack.outputs) + + if mgmt_ips: + mea_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips) + + @log.log + def update(self, plugin, context, mea_id, mea_dict, mea, + auth_attr): + region_name = mea_dict.get('placement_attr', {}).get( + 'region_name', None) + heatclient = hc.HeatClient(auth_attr, region_name) + heatclient.get(mea_id) + + # update config attribute + config_yaml = mea_dict.get('attributes', {}).get('config', '') + update_yaml = mea['mea'].get('attributes', {}).get('config', '') + LOG.debug('yaml orig %(orig)s update %(update)s', + {'orig': config_yaml, 'update': update_yaml}) + + # If config_yaml is None, yaml.safe_load() will raise Attribute Error. + # So set config_yaml to {}, if it is None. + if not config_yaml: + config_dict = {} + else: + config_dict = yaml.safe_load(config_yaml) or {} + update_dict = yaml.safe_load(update_yaml) + if not update_dict: + return + + LOG.debug('dict orig %(orig)s update %(update)s', + {'orig': config_dict, 'update': update_dict}) + utils.deep_update(config_dict, update_dict) + LOG.debug('dict new %(new)s update %(update)s', + {'new': config_dict, 'update': update_dict}) + new_yaml = yaml.safe_dump(config_dict) + mea_dict.setdefault('attributes', {})['config'] = new_yaml + + @log.log + def update_wait(self, plugin, context, mea_id, auth_attr, + region_name=None): + # do nothing but checking if the stack exists at the moment + heatclient = hc.HeatClient(auth_attr, region_name) + heatclient.get(mea_id) + + @log.log + def delete(self, plugin, context, mea_id, auth_attr, region_name=None): + heatclient = hc.HeatClient(auth_attr, region_name) + heatclient.delete(mea_id) + + @log.log + def delete_wait(self, plugin, context, mea_id, auth_attr, + region_name=None): + heatclient = hc.HeatClient(auth_attr, region_name) + + stack = heatclient.get(mea_id) + status = stack.stack_status + error_reason = None + stack_retries = self.STACK_RETRIES + while (status == 'DELETE_IN_PROGRESS' and stack_retries > 0): + time.sleep(self.STACK_RETRY_WAIT) + try: + stack = heatclient.get(mea_id) + except heatException.HTTPNotFound: + return + except Exception: + LOG.warning("MEA Instance cleanup may not have " + "happened because Heat API request failed " + "while waiting for the stack %(stack)s to be " + "deleted", {'stack': mea_id}) + # Just like create wait, ignore the exception to + # avoid temporary connection error. + status = stack.stack_status + stack_retries = stack_retries - 1 + + if stack_retries == 0 and status != 'DELETE_COMPLETE': + error_reason = _("Resource cleanup for mea is" + " not completed within {wait} seconds as " + "deletion of Stack {stack} is " + "not completed").format(stack=mea_id, + wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT)) + LOG.warning(error_reason) + raise mem.MEADeleteWaitFailed(reason=error_reason) + + if stack_retries != 0 and status != 'DELETE_COMPLETE': + error_reason = _("mea {mea_id} deletion is not completed. " + "{stack_status}").format(mea_id=mea_id, + stack_status=status) + LOG.warning(error_reason) + raise mem.MEADeleteWaitFailed(reason=error_reason) + + @classmethod + def _find_mgmt_ips_from_groups(cls, heat_client, instance_id, group_names): + + def _find_mgmt_ips(attributes): + mgmt_ips = {} + for k, v in attributes.items(): + if k.startswith(OUTPUT_PREFIX): + mgmt_ips[k.replace(OUTPUT_PREFIX, '')] = v + + return mgmt_ips + + mgmt_ips = {} + for group_name in group_names: + # Get scale group + grp = heat_client.resource_get(instance_id, group_name) + for rsc in heat_client.resource_get_list(grp.physical_resource_id): + # Get list of resources in scale group + scale_rsc = heat_client.resource_get(grp.physical_resource_id, + rsc.resource_name) + + # findout the mgmt ips from attributes + for k, v in _find_mgmt_ips(scale_rsc.attributes).items(): + if k not in mgmt_ips: + mgmt_ips[k] = [v] + else: + mgmt_ips[k].append(v) + + return mgmt_ips + + @log.log + def scale(self, context, plugin, auth_attr, policy, region_name): + heatclient = hc.HeatClient(auth_attr, region_name) + policy_rsc = get_scaling_policy_name(policy_name=policy['name'], + action=policy['action']) + events = heatclient.resource_event_list(policy['instance_id'], + policy_rsc, limit=1, + sort_dir='desc', + sort_keys='event_time') + + heatclient.resource_signal(policy['instance_id'], policy_rsc) + return events[0].id + + @log.log + def scale_wait(self, context, plugin, auth_attr, policy, region_name, + last_event_id): + heatclient = hc.HeatClient(auth_attr, region_name) + + # TODO(kanagaraj-manickam) make wait logic into separate utility method + # and make use of it here and other actions like create and delete + stack_retries = self.STACK_RETRIES + while (True): + try: + time.sleep(self.STACK_RETRY_WAIT) + stack_id = policy['instance_id'] + policy_name = get_scaling_policy_name( + policy_name=policy['name'], action=policy['action']) + events = heatclient.resource_event_list(stack_id, policy_name, + limit=1, + sort_dir='desc', + sort_keys='event_time') + + if events[0].id != last_event_id: + if events[0].resource_status == 'SIGNAL_COMPLETE': + break + except Exception as e: + error_reason = _("MEA scaling failed for stack %(stack)s with " + "error %(error)s") % { + 'stack': policy['instance_id'], + 'error': str(e)} + LOG.warning(error_reason) + raise mem.MEAScaleWaitFailed(mea_id=policy['mea']['id'], + reason=error_reason) + + if stack_retries == 0: + metadata = heatclient.resource_metadata(stack_id, policy_name) + if not metadata['scaling_in_progress']: + error_reason = _('when signal occurred within cool down ' + 'window, no events generated from heat, ' + 'so ignore it') + LOG.warning(error_reason) + break + error_reason = _( + "MEA scaling failed to complete within %{wait}s seconds " + "while waiting for the stack %(stack)s to be " + "scaled.") % {'stack': stack_id, + 'wait': self.STACK_RETRIES * + self.STACK_RETRY_WAIT} + LOG.warning(error_reason) + raise mem.MEAScaleWaitFailed(mea_id=policy['mea']['id'], + reason=error_reason) + stack_retries -= 1 + + def _fill_scaling_group_name(): + mea = policy['mea'] + scaling_group_names = mea['attributes']['scaling_group_names'] + policy['group_name'] = jsonutils.loads( + scaling_group_names)[policy['name']] + + _fill_scaling_group_name() + + mgmt_ips = self._find_mgmt_ips_from_groups(heatclient, + policy['instance_id'], + [policy['group_name']]) + + return jsonutils.dumps(mgmt_ips) + + @log.log + def get_resource_info(self, plugin, context, mea_info, auth_attr, + region_name=None): + instance_id = mea_info['instance_id'] + heatclient = hc.HeatClient(auth_attr, region_name) + try: + # nested_depth=2 is used to get VDU resources + # in case of nested template + resources_ids =\ + heatclient.resource_get_list(instance_id, nested_depth=2) + details_dict = {resource.resource_name: + {"id": resource.physical_resource_id, + "type": resource.resource_type} + for resource in resources_ids} + return details_dict + # Raise exception when Heat API service is not available + except Exception: + raise mem.InfraDriverUnreachable(service="Heat API service") diff --git a/apmec/mem/infra_drivers/openstack/translate_template.py b/apmec/mem/infra_drivers/openstack/translate_template.py new file mode 100644 index 0000000..7b9046e --- /dev/null +++ b/apmec/mem/infra_drivers/openstack/translate_template.py @@ -0,0 +1,342 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +from toscaparser import tosca_template +from toscaparser.utils import yamlparser +from translator.hot import tosca_translator +import yaml + +from apmec.common import log +from apmec.extensions import common_services as cs +from apmec.extensions import mem +from apmec.catalogs.tosca import utils as toscautils + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +OPTS = [ + cfg.DictOpt('flavor_extra_specs', + default={}, + help=_("Flavor Extra Specs")), +] + +CONF.register_opts(OPTS, group='openstack_vim') + +HEAT_VERSION_INCOMPATIBILITY_MAP = {'OS::Neutron::Port': { + 'port_security_enabled': 'value_specs', }, } + +HEAT_TEMPLATE_BASE = """ +heat_template_version: 2013-05-23 +""" + +ALARMING_POLICY = 'tosca.policies.apmec.Alarming' +SCALING_POLICY = 'tosca.policies.apmec.Scaling' + + +class TOSCAToHOT(object): + """Convert TOSCA template to HOT template.""" + + def __init__(self, mea, heatclient): + self.mea = mea + self.heatclient = heatclient + self.attributes = {} + self.mead_yaml = None + self.unsupported_props = {} + self.heat_template_yaml = None + self.monitoring_dict = None + self.nested_resources = dict() + self.fields = None + self.STACK_FLAVOR_EXTRA = cfg.CONF.openstack_vim.flavor_extra_specs + + @log.log + def generate_hot(self): + + self._get_mead() + dev_attrs = self._update_fields() + + mead_dict = yamlparser.simple_ordered_parse(self.mead_yaml) + LOG.debug('mead_dict %s', mead_dict) + self._get_unsupported_resource_props(self.heatclient) + + self._generate_hot_from_tosca(mead_dict, dev_attrs) + self.fields['template'] = self.heat_template_yaml + if not self.mea['attributes'].get('heat_template'): + self.mea['attributes']['heat_template'] = self.fields['template'] + if self.monitoring_dict: + self.mea['attributes']['monitoring_policy'] = jsonutils.dumps( + self.monitoring_dict) + + @log.log + def _get_mead(self): + self.attributes = self.mea['mead']['attributes'].copy() + self.mead_yaml = self.attributes.pop('mead', None) + if self.mead_yaml is None: + # TODO(kangaraj-manickam) raise user level exception + LOG.info("MEAD is not provided, so no mea is created !!") + return + LOG.debug('mead_yaml %s', self.mead_yaml) + + @log.log + def _update_fields(self): + attributes = self.attributes + fields = dict((key, attributes.pop(key)) for key + in ('stack_name', 'template_url', 'template') + if key in attributes) + for key in ('files', 'parameters'): + if key in attributes: + fields[key] = jsonutils.loads(attributes.pop(key)) + + # overwrite parameters with given dev_attrs for mea creation + dev_attrs = self.mea['attributes'].copy() + fields.update(dict((key, dev_attrs.pop(key)) for key + in ('stack_name', 'template_url', 'template') + if key in dev_attrs)) + for key in ('files', 'parameters'): + if key in dev_attrs: + fields.setdefault(key, {}).update( + jsonutils.loads(dev_attrs.pop(key))) + + self.attributes = attributes + self.fields = fields + return dev_attrs + + @log.log + def _update_params(self, original, paramvalues, match=False): + for key, value in (original).items(): + if not isinstance(value, dict) or 'get_input' not in str(value): + pass + elif isinstance(value, dict): + if not match: + if key in paramvalues and 'param' in paramvalues[key]: + self._update_params(value, paramvalues[key]['param'], + True) + elif key in paramvalues: + self._update_params(value, paramvalues[key], False) + else: + LOG.debug('Key missing Value: %s', key) + raise cs.InputValuesMissing(key=key) + elif 'get_input' in value: + if value['get_input'] in paramvalues: + original[key] = paramvalues[value['get_input']] + else: + LOG.debug('Key missing Value: %s', key) + raise cs.InputValuesMissing(key=key) + else: + self._update_params(value, paramvalues, True) + + @log.log + def _process_parameterized_input(self, dev_attrs, mead_dict): + param_vattrs_yaml = dev_attrs.pop('param_values', None) + if param_vattrs_yaml: + try: + param_vattrs_dict = yaml.safe_load(param_vattrs_yaml) + LOG.debug('param_vattrs_yaml', param_vattrs_dict) + except Exception as e: + LOG.debug("Not Well Formed: %s", str(e)) + raise mem.ParamYAMLNotWellFormed( + error_msg_details=str(e)) + else: + self._update_params(mead_dict, param_vattrs_dict) + else: + raise cs.ParamYAMLInputMissing() + + @log.log + def _process_vdu_network_interfaces(self, vdu_id, vdu_dict, properties, + template_dict): + + networks_list = [] + properties['networks'] = networks_list + for network_param in vdu_dict['network_interfaces'].values(): + port = None + if 'addresses' in network_param: + ip_list = network_param.pop('addresses', []) + if not isinstance(ip_list, list): + raise mem.IPAddrInvalidInput() + mgmt_flag = network_param.pop('management', False) + port, template_dict =\ + self._handle_port_creation(vdu_id, network_param, + template_dict, + ip_list, mgmt_flag) + if network_param.pop('management', False): + port, template_dict = self._handle_port_creation(vdu_id, + network_param, + template_dict, + [], True) + if port is not None: + network_param = { + 'port': {'get_resource': port} + } + networks_list.append(dict(network_param)) + return vdu_dict, template_dict + + @log.log + def _make_port_dict(self): + port_dict = {'type': 'OS::Neutron::Port'} + if self.unsupported_props: + port_dict['properties'] = { + 'value_specs': { + 'port_security_enabled': False + } + } + else: + port_dict['properties'] = { + 'port_security_enabled': False + } + port_dict['properties'].setdefault('fixed_ips', []) + return port_dict + + @log.log + def _make_mgmt_outputs_dict(self, vdu_id, port, template_dict): + mgmt_ip = 'mgmt_ip-%s' % vdu_id + outputs_dict = template_dict['outputs'] + outputs_dict[mgmt_ip] = { + 'description': 'management ip address', + 'value': { + 'get_attr': [port, 'fixed_ips', 0, 'ip_address'] + } + } + template_dict['outputs'] = outputs_dict + return template_dict + + @log.log + def _handle_port_creation(self, vdu_id, network_param, + template_dict, ip_list=None, + mgmt_flag=False): + ip_list = ip_list or [] + port = '%s-%s-port' % (vdu_id, network_param['network']) + port_dict = self._make_port_dict() + if mgmt_flag: + template_dict = self._make_mgmt_outputs_dict(vdu_id, port, + template_dict) + for ip in ip_list: + port_dict['properties']['fixed_ips'].append({"ip_address": ip}) + port_dict['properties'].update(network_param) + template_dict['resources'][port] = port_dict + return port, template_dict + + @log.log + def _get_unsupported_resource_props(self, heat_client): + unsupported_resource_props = {} + + for res, prop_dict in (HEAT_VERSION_INCOMPATIBILITY_MAP).items(): + unsupported_props = {} + for prop, val in (prop_dict).items(): + if not heat_client.resource_attr_support(res, prop): + unsupported_props.update(prop_dict) + if unsupported_props: + unsupported_resource_props[res] = unsupported_props + self.unsupported_props = unsupported_resource_props + + @log.log + def _generate_hot_from_tosca(self, mead_dict, dev_attrs): + parsed_params = {} + if 'param_values' in dev_attrs and dev_attrs['param_values'] != "": + try: + parsed_params = yaml.safe_load(dev_attrs['param_values']) + except Exception as e: + LOG.debug("Params not Well Formed: %s", str(e)) + raise mem.ParamYAMLNotWellFormed(error_msg_details=str(e)) + + block_storage_details = toscautils.get_block_storage_details(mead_dict) + toscautils.updateimports(mead_dict) + if 'substitution_mappings' in str(mead_dict): + toscautils.check_for_substitution_mappings(mead_dict, + parsed_params) + + try: + tosca = tosca_template.ToscaTemplate(parsed_params=parsed_params, + a_file=False, + yaml_dict_tpl=mead_dict) + + except Exception as e: + LOG.debug("tosca-parser error: %s", str(e)) + raise mem.ToscaParserFailed(error_msg_details=str(e)) + + metadata = toscautils.get_vdu_metadata(tosca) + alarm_resources =\ + toscautils.pre_process_alarm_resources(self.mea, tosca, metadata) + monitoring_dict = toscautils.get_vdu_monitoring(tosca) + mgmt_ports = toscautils.get_mgmt_ports(tosca) + nested_resource_name = toscautils.get_nested_resources_name(tosca) + res_tpl = toscautils.get_resources_dict(tosca, + self.STACK_FLAVOR_EXTRA) + toscautils.post_process_template(tosca) + scaling_policy_names = toscautils.get_scaling_policy(tosca) + try: + translator = tosca_translator.TOSCATranslator(tosca, + parsed_params) + heat_template_yaml = translator.translate() + if nested_resource_name: + sub_heat_template_yaml =\ + translator.translate_to_yaml_files_dict( + nested_resource_name, True) + nested_resource_yaml =\ + sub_heat_template_yaml[nested_resource_name] + self.nested_resources[nested_resource_name] =\ + nested_resource_yaml + + except Exception as e: + LOG.debug("heat-translator error: %s", str(e)) + raise mem.HeatTranslatorFailed(error_msg_details=str(e)) + + if self.nested_resources: + nested_tpl = toscautils.update_nested_scaling_resources( + self.nested_resources, mgmt_ports, metadata, + res_tpl, self.unsupported_props) + self.fields['files'] = nested_tpl + self.mea['attributes'][nested_resource_name] =\ + nested_tpl[nested_resource_name] + mgmt_ports.clear() + + if scaling_policy_names: + scaling_group_dict = toscautils.get_scaling_group_dict( + heat_template_yaml, scaling_policy_names) + self.mea['attributes']['scaling_group_names'] =\ + jsonutils.dumps(scaling_group_dict) + + heat_template_yaml = toscautils.post_process_heat_template( + heat_template_yaml, mgmt_ports, metadata, alarm_resources, + res_tpl, block_storage_details, self.unsupported_props) + + self.heat_template_yaml = heat_template_yaml + self.monitoring_dict = monitoring_dict + self.metadata = metadata + + @log.log + def represent_odict(self, dump, tag, mapping, flow_style=None): + value = [] + node = yaml.MappingNode(tag, value, flow_style=flow_style) + if dump.alias_key is not None: + dump.represented_objects[dump.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + for item_key, item_value in mapping: + node_key = dump.represent_data(item_key) + node_value = dump.represent_data(item_value) + if not (isinstance(node_key, yaml.ScalarNode) + and not node_key.style): + best_style = False + if not (isinstance(node_value, yaml.ScalarNode) + and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if dump.default_flow_style is not None: + node.flow_style = dump.default_flow_style + else: + node.flow_style = best_style + return node diff --git a/apmec/mem/infra_drivers/scale_driver.py b/apmec/mem/infra_drivers/scale_driver.py new file mode 100644 index 0000000..a0b0522 --- /dev/null +++ b/apmec/mem/infra_drivers/scale_driver.py @@ -0,0 +1,42 @@ +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class MeaScaleAbstractDriver(extensions.PluginInterface): + + @abc.abstractmethod + def scale(self, + context, + plugin, + auth_attr, + policy, + region_name): + pass + + @abc.abstractmethod + def scale_wait(self, + context, + plugin, + auth_attr, + policy, + region_name): + pass diff --git a/apmec/mem/keystone.py b/apmec/mem/keystone.py new file mode 100644 index 0000000..5c3ae18 --- /dev/null +++ b/apmec/mem/keystone.py @@ -0,0 +1,86 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cryptography import fernet +from keystoneauth1 import exceptions +from keystoneauth1 import identity +from keystoneauth1 import session +from keystoneclient import client +from oslo_config import cfg +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class Keystone(object): + """Keystone module for OpenStack VIM + + Handles identity operations for a given OpenStack + instance such as version, session and client + """ + + def get_version(self, base_url=None): + try: + keystone_client = client.Client(auth_url=base_url) + except exceptions.ConnectionError: + raise + return keystone_client.version + + def get_session(self, auth_plugin): + ses = session.Session(auth=auth_plugin) + return ses + + def get_endpoint(self, ses, service_type, region_name=None): + return ses.get_endpoint(service_type, region_name) + + def initialize_client(self, version, **kwargs): + if version == 'v2.0': + from keystoneclient.v2_0 import client + if 'token' in kwargs: + auth_plugin = identity.v2.Token(**kwargs) + else: + auth_plugin = identity.v2.Password(**kwargs) + else: + from keystoneclient.v3 import client + if 'token' in kwargs: + auth_plugin = identity.v3.Token(**kwargs) + else: + auth_plugin = identity.v3.Password(**kwargs) + ses = self.get_session(auth_plugin=auth_plugin) + cli = client.Client(session=ses) + return cli + + @staticmethod + def create_key_dir(path): + if not os.access(path, os.F_OK): + LOG.info('[fernet_tokens] key_repository does not appear to ' + 'exist; attempting to create it') + try: + os.makedirs(path, 0o700) + except OSError: + LOG.error( + 'Failed to create [fernet_tokens] key_repository: either' + 'it already exists or you don\'t have sufficient' + 'permissions to create it') + + def create_fernet_key(self): + fernet_key = fernet.Fernet.generate_key() + fernet_obj = fernet.Fernet(fernet_key) + return fernet_key, fernet_obj diff --git a/apmec/mem/mgmt_drivers/__init__.py b/apmec/mem/mgmt_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/mgmt_drivers/abstract_driver.py b/apmec/mem/mgmt_drivers/abstract_driver.py new file mode 100644 index 0000000..cdce42d --- /dev/null +++ b/apmec/mem/mgmt_drivers/abstract_driver.py @@ -0,0 +1,81 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class DeviceMGMTAbstractDriver(extensions.PluginInterface): + + @abc.abstractmethod + def get_type(self): + """Return one of predefined type of the hosting mea drivers.""" + pass + + @abc.abstractmethod + def get_name(self): + """Return a symbolic name for the service VM plugin.""" + pass + + @abc.abstractmethod + def get_description(self): + pass + + def mgmt_create_pre(self, plugin, context, mea): + pass + + def mgmt_create_post(self, plugin, context, mea): + pass + + def mgmt_update_pre(self, plugin, context, mea): + pass + + def mgmt_update_post(self, plugin, context, mea): + pass + + def mgmt_delete_pre(self, plugin, context, mea): + pass + + def mgmt_delete_post(self, plugin, context, mea): + pass + + def mgmt_get_config(self, plugin, context, mea): + """Get a dict of objects. + + Returns dict of file-like objects which will be passed to hosting + mea. + It depends on drivers how to use it. + for nova case, it can be used for meta data, file injection or + config drive + i.e. + metadata case: nova --meta = + file injection case: nova --file : + config drive case: nova --config-drive=true --file \ + : + """ + return {} + + @abc.abstractmethod + def mgmt_url(self, plugin, context, mea): + pass + + @abc.abstractmethod + def mgmt_call(self, plugin, context, mea, kwargs): + pass diff --git a/apmec/mem/mgmt_drivers/constants.py b/apmec/mem/mgmt_drivers/constants.py new file mode 100644 index 0000000..d5781fa --- /dev/null +++ b/apmec/mem/mgmt_drivers/constants.py @@ -0,0 +1,24 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# key +KEY_ACTION = 'action' +KEY_KWARGS = 'kwargs' + +# ACTION type +ACTION_CREATE_MEA = 'create_mea' +ACTION_UPDATE_MEA = 'update_mea' +ACTION_DELETE_MEA = 'delete_mea' diff --git a/apmec/mem/mgmt_drivers/noop.py b/apmec/mem/mgmt_drivers/noop.py new file mode 100644 index 0000000..6021286 --- /dev/null +++ b/apmec/mem/mgmt_drivers/noop.py @@ -0,0 +1,41 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from apmec.mem.mgmt_drivers import abstract_driver + + +LOG = logging.getLogger(__name__) + + +class DeviceMgmtNoop(abstract_driver.DeviceMGMTAbstractDriver): + def get_type(self): + return 'noop' + + def get_name(self): + return 'noop' + + def get_description(self): + return 'Apmec MEAMgmt Noop Driver' + + def mgmt_url(self, plugin, context, mea): + LOG.debug('mgmt_url %s', mea) + return 'noop-mgmt-url' + + def mgmt_call(self, plugin, context, mea, kwargs): + LOG.debug('mgmt_call %(mea)s %(kwargs)s', + {'mea': mea, 'kwargs': kwargs}) diff --git a/apmec/mem/mgmt_drivers/openwrt/__init__.py b/apmec/mem/mgmt_drivers/openwrt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/mgmt_drivers/openwrt/openwrt.py b/apmec/mem/mgmt_drivers/openwrt/openwrt.py new file mode 100644 index 0000000..22253f9 --- /dev/null +++ b/apmec/mem/mgmt_drivers/openwrt/openwrt.py @@ -0,0 +1,109 @@ +# Copyright 2015 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +import yaml + +from apmec.common import cmd_executer +from apmec.common import exceptions +from apmec.common import log +from apmec.mem.mgmt_drivers import abstract_driver +from apmec.mem.mgmt_drivers import constants as mgmt_constants + + +LOG = logging.getLogger(__name__) +OPTS = [ + cfg.StrOpt('user', default='root', help=_('user name to login openwrt')), + cfg.StrOpt('password', default='', help=_('password to login openwrt')), +] +cfg.CONF.register_opts(OPTS, 'openwrt') + + +def config_opts(): + return [('openwrt', OPTS)] + + +class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver): + def get_type(self): + return 'openwrt' + + def get_name(self): + return 'openwrt' + + def get_description(self): + return 'Apmec MEAMgmt OpenWRT Driver' + + def mgmt_url(self, plugin, context, mea): + LOG.debug('mgmt_url %s', mea) + return mea.get('mgmt_url', '') + + @log.log + def _config_service(self, mgmt_ip_address, service, config): + user = cfg.CONF.openwrt.user + password = cfg.CONF.openwrt.password + try: + cmd = "uci import %s; /etc/init.d/%s restart" % (service, service) + LOG.debug('execute command: %(cmd)s on mgmt_ip_address ' + '%(mgmt_ip)s', + {'cmd': cmd, + 'mgmt_ip': mgmt_ip_address}) + commander = cmd_executer.RemoteCommandExecutor( + user, password, mgmt_ip_address) + commander.execute_command(cmd, input_data=config) + except Exception as ex: + LOG.error("While executing command on remote " + "%(mgmt_ip)s: %(exception)s", + {'mgmt_ip': mgmt_ip_address, + 'exception': ex}) + raise exceptions.MgmtDriverException() + + @log.log + def mgmt_call(self, plugin, context, mea, kwargs): + if (kwargs[mgmt_constants.KEY_ACTION] != + mgmt_constants.ACTION_UPDATE_MEA): + return + dev_attrs = mea.get('attributes', {}) + + mgmt_url = jsonutils.loads(mea.get('mgmt_url', '{}')) + if not mgmt_url: + return + + vdus_config = dev_attrs.get('config', '') + config_yaml = yaml.safe_load(vdus_config) + if not config_yaml: + return + vdus_config_dict = config_yaml.get('vdus', {}) + for vdu, vdu_dict in vdus_config_dict.items(): + config = vdu_dict.get('config', {}) + for key, conf_value in config.items(): + KNOWN_SERVICES = ('firewall', 'network') + if key not in KNOWN_SERVICES: + continue + mgmt_ip_address = mgmt_url.get(vdu, '') + if not mgmt_ip_address: + LOG.warning('tried to configure unknown mgmt ' + 'address on MEA %(mea)s VDU %(vdu)s', + {'mea': mea.get('name'), + 'vdu': vdu}) + continue + + if isinstance(mgmt_ip_address, list): + for ip_address in mgmt_ip_address: + self._config_service(ip_address, key, conf_value) + else: + self._config_service(mgmt_ip_address, key, conf_value) diff --git a/apmec/mem/monitor.py b/apmec/mem/monitor.py new file mode 100644 index 0000000..db5d92d --- /dev/null +++ b/apmec/mem/monitor.py @@ -0,0 +1,286 @@ +# Copyright 2015 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import inspect +import threading +import time + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_utils import timeutils + + +from apmec.common import driver_manager +from apmec import context as t_context +from apmec.db.common_services import common_services_db_plugin +from apmec.plugins.common import constants + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +OPTS = [ + cfg.IntOpt('check_intvl', + default=10, + help=_("check interval for monitor")), +] +CONF.register_opts(OPTS, group='monitor') + + +def config_opts(): + return [('monitor', OPTS), + ('apmec', MEAMonitor.OPTS), + ('apmec', MEAAlarmMonitor.OPTS), ] + + +def _log_monitor_events(context, mea_dict, evt_details): + _cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + _cos_db_plg.create_event(context, res_id=mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=mea_dict['status'], + evt_type=constants.RES_EVT_MONITOR, + tstamp=timeutils.utcnow(), + details=evt_details) + + +class MEAMonitor(object): + """MEA Monitor.""" + + _instance = None + _hosting_meas = dict() # mea_id => dict of parameters + _status_check_intvl = 0 + _lock = threading.RLock() + + OPTS = [ + cfg.ListOpt( + 'monitor_driver', default=['ping', 'http_ping'], + help=_('Monitor driver to communicate with ' + 'Hosting MEA/logical service ' + 'instance apmec plugin will use')), + ] + cfg.CONF.register_opts(OPTS, 'apmec') + + def __new__(cls, boot_wait, check_intvl=None): + if not cls._instance: + cls._instance = super(MEAMonitor, cls).__new__(cls) + return cls._instance + + def __init__(self, boot_wait, check_intvl=None): + self._monitor_manager = driver_manager.DriverManager( + 'apmec.apmec.monitor.drivers', + cfg.CONF.apmec.monitor_driver) + + self.boot_wait = boot_wait + if check_intvl is None: + check_intvl = cfg.CONF.monitor.check_intvl + self._status_check_intvl = check_intvl + LOG.debug('Spawning MEA monitor thread') + threading.Thread(target=self.__run__).start() + + def __run__(self): + while(1): + time.sleep(self._status_check_intvl) + + with self._lock: + for hosting_mea in self._hosting_meas.values(): + if hosting_mea.get('dead', False): + LOG.debug('monitor skips dead mea %s', hosting_mea) + continue + + self.run_monitor(hosting_mea) + + @staticmethod + def to_hosting_mea(mea_dict, action_cb): + return { + 'id': mea_dict['id'], + 'management_ip_addresses': jsonutils.loads( + mea_dict['mgmt_url']), + 'action_cb': action_cb, + 'mea': mea_dict, + 'monitoring_policy': jsonutils.loads( + mea_dict['attributes']['monitoring_policy']) + } + + def add_hosting_mea(self, new_mea): + LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s', + {'id': new_mea['id'], + 'ips': new_mea['management_ip_addresses']}) + new_mea['boot_at'] = timeutils.utcnow() + with self._lock: + self._hosting_meas[new_mea['id']] = new_mea + + attrib_dict = new_mea['mea']['attributes'] + mon_policy_dict = attrib_dict['monitoring_policy'] + evt_details = (("MEA added for monitoring. " + "mon_policy_dict = %s,") % (mon_policy_dict)) + _log_monitor_events(t_context.get_admin_context(), new_mea['mea'], + evt_details) + + def delete_hosting_mea(self, mea_id): + LOG.debug('deleting mea_id %(mea_id)s', {'mea_id': mea_id}) + with self._lock: + hosting_mea = self._hosting_meas.pop(mea_id, None) + if hosting_mea: + LOG.debug('deleting mea_id %(mea_id)s, Mgmt IP %(ips)s', + {'mea_id': mea_id, + 'ips': hosting_mea['management_ip_addresses']}) + + def run_monitor(self, hosting_mea): + mgmt_ips = hosting_mea['management_ip_addresses'] + vdupolicies = hosting_mea['monitoring_policy']['vdus'] + + mea_delay = hosting_mea['monitoring_policy'].get( + 'monitoring_delay', self.boot_wait) + + for vdu in vdupolicies.keys(): + if hosting_mea.get('dead'): + return + + policy = vdupolicies[vdu] + for driver in policy.keys(): + params = policy[driver].get('monitoring_params', {}) + + vdu_delay = params.get('monitoring_delay', mea_delay) + + if not timeutils.is_older_than( + hosting_mea['boot_at'], + vdu_delay): + continue + + actions = policy[driver].get('actions', {}) + if 'mgmt_ip' not in params: + params['mgmt_ip'] = mgmt_ips[vdu] + + driver_return = self.monitor_call(driver, + hosting_mea['mea'], + params) + + LOG.debug('driver_return %s', driver_return) + + if driver_return in actions: + action = actions[driver_return] + hosting_mea['action_cb'](action) + + def mark_dead(self, mea_id): + self._hosting_meas[mea_id]['dead'] = True + + def _invoke(self, driver, **kwargs): + method = inspect.stack()[1][3] + return self._monitor_manager.invoke( + driver, method, **kwargs) + + def monitor_get_config(self, mea_dict): + return self._invoke( + mea_dict, monitor=self, mea=mea_dict) + + def monitor_url(self, mea_dict): + return self._invoke( + mea_dict, monitor=self, mea=mea_dict) + + def monitor_call(self, driver, mea_dict, kwargs): + return self._invoke(driver, + mea=mea_dict, kwargs=kwargs) + + +class MEAAlarmMonitor(object): + """MEA Alarm monitor""" + OPTS = [ + cfg.ListOpt( + 'alarm_monitor_driver', default=['ceilometer'], + help=_('Alarm monitoring driver to communicate with ' + 'Hosting MEA/logical service ' + 'instance apmec plugin will use')), + ] + cfg.CONF.register_opts(OPTS, 'apmec') + + # get alarm here + def __init__(self): + self._alarm_monitor_manager = driver_manager.DriverManager( + 'apmec.apmec.alarm_monitor.drivers', + cfg.CONF.apmec.alarm_monitor_driver) + + def update_mea_with_alarm(self, plugin, context, mea, policy_dict): + triggers = policy_dict['triggers'] + alarm_url = dict() + for trigger_name, trigger_dict in triggers.items(): + params = dict() + params['mea_id'] = mea['id'] + params['mon_policy_name'] = trigger_name + driver = trigger_dict['event_type']['implementation'] + # TODO(Tung Doan) trigger_dict.get('actions') needs to be used + policy_action = trigger_dict.get('action') + if len(policy_action) == 0: + _log_monitor_events(t_context.get_admin_context(), + mea, + "Alarm not set: policy action missing") + return + # Other backend policies with the construct (policy, action) + # ex: (SP1, in), (SP1, out) + + def _refactor_backend_policy(bk_policy_name, bk_action_name): + policy = '%(policy_name)s-%(action_name)s' % { + 'policy_name': bk_policy_name, + 'action_name': bk_action_name} + return policy + + for index, policy_action_name in enumerate(policy_action): + filters = {'name': policy_action_name} + bkend_policies =\ + plugin.get_mea_policies(context, mea['id'], filters) + if bkend_policies: + bkend_policy = bkend_policies[0] + if bkend_policy['type'] == constants.POLICY_SCALING: + cp = trigger_dict['condition'].\ + get('comparison_operator') + scaling_type = 'out' if cp == 'gt' else 'in' + policy_action[index] = _refactor_backend_policy( + policy_action_name, scaling_type) + + # Support multiple action. Ex: respawn % notify + action_name = '%'.join(policy_action) + + params['mon_policy_action'] = action_name + alarm_url[trigger_name] =\ + self.call_alarm_url(driver, mea, params) + details = "Alarm URL set successfully: %s" % alarm_url + _log_monitor_events(t_context.get_admin_context(), + mea, + details) + return alarm_url + + def process_alarm_for_mea(self, mea, trigger): + '''call in plugin''' + params = trigger['params'] + mon_prop = trigger['trigger'] + alarm_dict = dict() + alarm_dict['alarm_id'] = params['data'].get('alarm_id') + alarm_dict['status'] = params['data'].get('current') + trigger_name, trigger_dict = list(mon_prop.items())[0] + driver = trigger_dict['event_type']['implementation'] + return self.process_alarm(driver, mea, alarm_dict) + + def _invoke(self, driver, **kwargs): + method = inspect.stack()[1][3] + return self._alarm_monitor_manager.invoke( + driver, method, **kwargs) + + def call_alarm_url(self, driver, mea_dict, kwargs): + return self._invoke(driver, + mea=mea_dict, kwargs=kwargs) + + def process_alarm(self, driver, mea_dict, kwargs): + return self._invoke(driver, + mea=mea_dict, kwargs=kwargs) diff --git a/apmec/mem/monitor_drivers/__init__.py b/apmec/mem/monitor_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/monitor_drivers/abstract_driver.py b/apmec/mem/monitor_drivers/abstract_driver.py new file mode 100644 index 0000000..27fd873 --- /dev/null +++ b/apmec/mem/monitor_drivers/abstract_driver.py @@ -0,0 +1,82 @@ +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class MEAMonitorAbstractDriver(extensions.PluginInterface): + + @abc.abstractmethod + def get_type(self): + """Return one of predefined type of the hosting mea drivers.""" + pass + + @abc.abstractmethod + def get_name(self): + """Return a symbolic name for the MEA Monitor plugin.""" + pass + + @abc.abstractmethod + def get_description(self): + """Return description of MEA Monitor plugin.""" + pass + + def monitor_get_config(self, plugin, context, mea): + """Return dict of monitor configuration data. + + :param plugin: + :param context: + :param mea: + :returns: dict + :returns: dict of monitor configuration data + """ + return {} + + @abc.abstractmethod + def monitor_url(self, plugin, context, mea): + """Return the url of mea to monitor. + + :param plugin: + :param context: + :param mea: + :returns: string + :returns: url of mea to monitor + """ + pass + + @abc.abstractmethod + def monitor_call(self, mea, kwargs): + """Monitor. + + Return boolean value True if MEA is healthy + or return an event string like 'failure' or 'calls-capacity-reached' + for specific MEA health condition. + + :param mea: + :param kwargs: + :returns: boolean + :returns: True if MEA is healthy + """ + pass + + def monitor_service_driver(self, plugin, context, mea, + service_instance): + # use same monitor driver to communicate with service + return self.get_name() diff --git a/apmec/mem/monitor_drivers/ceilometer/__init__.py b/apmec/mem/monitor_drivers/ceilometer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/monitor_drivers/ceilometer/ceilometer.py b/apmec/mem/monitor_drivers/ceilometer/ceilometer.py new file mode 100644 index 0000000..4919f78 --- /dev/null +++ b/apmec/mem/monitor_drivers/ceilometer/ceilometer.py @@ -0,0 +1,92 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging +import random +import string +from apmec.common import utils +from apmec.mem.monitor_drivers import abstract_driver + + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.HostAddressOpt('host', default=utils.get_hostname(), + help=_('Address which drivers use to trigger')), + cfg.PortOpt('port', default=9896, + help=_('port number which drivers use to trigger')) +] +cfg.CONF.register_opts(OPTS, group='ceilometer') + + +def config_opts(): + return [('ceilometer', OPTS)] + +ALARM_INFO = ( + ALARM_ACTIONS, OK_ACTIONS, REPEAT_ACTIONS, ALARM, + INSUFFICIENT_DATA_ACTIONS, DESCRIPTION, ENABLED, TIME_CONSTRAINTS, + SEVERITY, +) = ('alarm_actions', 'ok_actions', 'repeat_actions', 'alarm', + 'insufficient_data_actions', 'description', 'enabled', 'time_constraints', + 'severity', + ) + + +class MEAMonitorCeilometer( + abstract_driver.MEAMonitorAbstractDriver): + def get_type(self): + return 'ceilometer' + + def get_name(self): + return 'ceilometer' + + def get_description(self): + return 'Apmec MEAMonitor Ceilometer Driver' + + def _create_alarm_url(self, mea_id, mon_policy_name, mon_policy_action): + # alarm_url = 'http://host:port/v1.0/meas/mea-uuid/monitoring-policy + # -name/action-name?key=8785' + host = cfg.CONF.ceilometer.host + port = cfg.CONF.ceilometer.port + LOG.info("Apmec in heat listening on %(host)s:%(port)s", + {'host': host, + 'port': port}) + origin = "http://%(host)s:%(port)s/v1.0/meas" % { + 'host': host, 'port': port} + access_key = ''.join( + random.SystemRandom().choice( + string.ascii_lowercase + string.digits) + for _ in range(8)) + alarm_url = "".join([origin, '/', mea_id, '/', mon_policy_name, '/', + mon_policy_action, '/', access_key]) + return alarm_url + + def call_alarm_url(self, mea, kwargs): + '''must be used after call heat-create in plugin''' + return self._create_alarm_url(**kwargs) + + def _process_alarm(self, alarm_id, status): + if alarm_id and status == ALARM: + return True + + def process_alarm(self, mea, kwargs): + '''Check alarm state. if available, will be processed''' + return self._process_alarm(**kwargs) + + def monitor_url(self, plugin, context, mea): + pass + + def monitor_call(self, mea, kwargs): + pass diff --git a/apmec/mem/monitor_drivers/http_ping/__init__.py b/apmec/mem/monitor_drivers/http_ping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/monitor_drivers/http_ping/http_ping.py b/apmec/mem/monitor_drivers/http_ping/http_ping.py new file mode 100644 index 0000000..e75a7cc --- /dev/null +++ b/apmec/mem/monitor_drivers/http_ping/http_ping.py @@ -0,0 +1,80 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging +import six.moves.urllib.error as urlerr +import six.moves.urllib.request as urlreq + +from apmec.common import log +from apmec.mem.monitor_drivers import abstract_driver + + +LOG = logging.getLogger(__name__) +OPTS = [ + cfg.IntOpt('retry', default=5, + help=_('number of times to retry')), + cfg.IntOpt('timeout', default=1, + help=_('number of seconds to wait for a response')), + cfg.IntOpt('port', default=80, + help=_('HTTP port number to send request')) +] +cfg.CONF.register_opts(OPTS, 'monitor_http_ping') + + +def config_opts(): + return [('monitor_http_ping', OPTS)] + + +class MEAMonitorHTTPPing(abstract_driver.MEAMonitorAbstractDriver): + def get_type(self): + return 'http_ping' + + def get_name(self): + return 'HTTP ping' + + def get_description(self): + return 'Apmec HTTP Ping Driver for MEA' + + def monitor_url(self, plugin, context, mea): + LOG.debug('monitor_url %s', mea) + return mea.get('monitor_url', '') + + def _is_pingable(self, mgmt_ip='', retry=5, timeout=5, port=80, **kwargs): + """Checks whether the server is reachable by using urllib. + + Waits for connectivity for `timeout` seconds, + and if connection refused, it will retry `retry` + times. + :param mgmt_ip: IP to check + :param retry: times to reconnect if connection refused + :param timeout: seconds to wait for connection + :param port: port number to check connectivity + :return: bool - True or False depending on pingability. + """ + url = 'http://' + mgmt_ip + ':' + str(port) + for retry_index in range(int(retry)): + try: + urlreq.urlopen(url, timeout=timeout) + return True + except urlerr.URLError: + LOG.warning('Unable to reach to the url %s', url) + return 'failure' + + @log.log + def monitor_call(self, mea, kwargs): + if not kwargs['mgmt_ip']: + return + + return self._is_pingable(**kwargs) diff --git a/apmec/mem/monitor_drivers/ping/__init__.py b/apmec/mem/monitor_drivers/ping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/monitor_drivers/ping/ping.py b/apmec/mem/monitor_drivers/ping/ping.py new file mode 100644 index 0000000..8dd84db --- /dev/null +++ b/apmec/mem/monitor_drivers/ping/ping.py @@ -0,0 +1,81 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging + +from apmec.agent.linux import utils as linux_utils +from apmec.common import log +from apmec.mem.monitor_drivers import abstract_driver + + +LOG = logging.getLogger(__name__) +OPTS = [ + cfg.StrOpt('count', default='1', + help=_('number of ICMP packets to send')), + cfg.StrOpt('timeout', default='1', + help=_('number of seconds to wait for a response')), + cfg.StrOpt('interval', default='1', + help=_('number of seconds to wait between packets')) +] +cfg.CONF.register_opts(OPTS, 'monitor_ping') + + +def config_opts(): + return [('monitor_ping', OPTS)] + + +class MEAMonitorPing(abstract_driver.MEAMonitorAbstractDriver): + def get_type(self): + return 'ping' + + def get_name(self): + return 'ping' + + def get_description(self): + return 'Apmec MEAMonitor Ping Driver' + + def monitor_url(self, plugin, context, mea): + LOG.debug('monitor_url %s', mea) + return mea.get('monitor_url', '') + + def _is_pingable(self, mgmt_ip="", count=5, timeout=1, interval='0.2', + **kwargs): + """Checks whether an IP address is reachable by pinging. + + Use linux utils to execute the ping (ICMP ECHO) command. + Sends 5 packets with an interval of 0.2 seconds and timeout of 1 + seconds. Runtime error implies unreachability else IP is pingable. + :param ip: IP to check + :return: bool - True or string 'failure' depending on pingability. + """ + ping_cmd = ['ping', + '-c', count, + '-W', timeout, + '-i', interval, + mgmt_ip] + + try: + linux_utils.execute(ping_cmd, check_exit_code=True) + return True + except RuntimeError: + LOG.warning("Cannot ping ip address: %s", mgmt_ip) + return 'failure' + + @log.log + def monitor_call(self, mea, kwargs): + if not kwargs['mgmt_ip']: + return + + return self._is_pingable(**kwargs) diff --git a/apmec/mem/monitor_drivers/token.py b/apmec/mem/monitor_drivers/token.py new file mode 100644 index 0000000..8fafdae --- /dev/null +++ b/apmec/mem/monitor_drivers/token.py @@ -0,0 +1,37 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from keystoneauth1.identity import v3 +from keystoneauth1 import session + + +class Token(object): + def __init__(self, username, password, project_name, + auth_url, user_domain_name, project_domain_name): + self.username = username + self.password = password + self.auth_url = auth_url + self.project_name = project_name + self.user_domain_name = user_domain_name + self.project_domain_name = project_domain_name + + def create_token(self): + auth = v3.Password(auth_url=self.auth_url, + username=self.username, + password=self.password, + project_name=self.project_name, + user_domain_name=self.user_domain_name, + project_domain_name=self.project_domain_name) + sess = session.Session(auth=auth) + token_id = sess.auth.get_token(sess) + return token_id diff --git a/apmec/mem/plugin.py b/apmec/mem/plugin.py new file mode 100644 index 0000000..0399895 --- /dev/null +++ b/apmec/mem/plugin.py @@ -0,0 +1,868 @@ +# Copyright 2013, 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import six +import yaml + +import eventlet +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import uuidutils +from toscaparser.tosca_template import ToscaTemplate + +from apmec.api.v1 import attributes +from apmec.common import driver_manager +from apmec.common import exceptions +from apmec.common import utils +from apmec.db.mem import mem_db +from apmec.extensions import mem +from apmec.plugins.common import constants +from apmec.mem.mgmt_drivers import constants as mgmt_constants +from apmec.mem import monitor +from apmec.mem import vim_client + +from apmec.catalogs.tosca import utils as toscautils + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +def config_opts(): + return [('apmec', MEMMgmtMixin.OPTS), + ('apmec', MEMPlugin.OPTS_INFRA_DRIVER), + ('apmec', MEMPlugin.OPTS_POLICY_ACTION)] + + +class MEMMgmtMixin(object): + OPTS = [ + cfg.ListOpt( + 'mgmt_driver', default=['noop', 'openwrt'], + help=_('MGMT driver to communicate with ' + 'Hosting MEA/logical service ' + 'instance apmec plugin will use')), + cfg.IntOpt('boot_wait', default=30, + help=_('Time interval to wait for VM to boot')) + ] + cfg.CONF.register_opts(OPTS, 'apmec') + + def __init__(self): + super(MEMMgmtMixin, self).__init__() + self._mgmt_manager = driver_manager.DriverManager( + 'apmec.apmec.mgmt.drivers', cfg.CONF.apmec.mgmt_driver) + + def _invoke(self, mea_dict, **kwargs): + method = inspect.stack()[1][3] + return self._mgmt_manager.invoke( + self._mgmt_driver_name(mea_dict), method, **kwargs) + + def mgmt_create_pre(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_create_post(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_update_pre(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_update_post(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_delete_pre(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_delete_post(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_get_config(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_url(self, context, mea_dict): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict) + + def mgmt_call(self, context, mea_dict, kwargs): + return self._invoke( + mea_dict, plugin=self, context=context, mea=mea_dict, + kwargs=kwargs) + + +class MEMPlugin(mem_db.MEMPluginDb, MEMMgmtMixin): + """MEMPlugin which supports MEM framework. + + Plugin which supports Apmec framework + """ + OPTS_INFRA_DRIVER = [ + cfg.ListOpt( + 'infra_driver', default=['noop', 'openstack'], + help=_('Hosting mea drivers apmec plugin will use')), + ] + cfg.CONF.register_opts(OPTS_INFRA_DRIVER, 'apmec') + + OPTS_POLICY_ACTION = [ + cfg.ListOpt( + 'policy_action', default=['autoscaling', 'respawn', + 'log', 'log_and_kill'], + help=_('Hosting mea drivers apmec plugin will use')), + ] + cfg.CONF.register_opts(OPTS_POLICY_ACTION, 'apmec') + + supported_extension_aliases = ['mem'] + + def __init__(self): + super(MEMPlugin, self).__init__() + self._pool = eventlet.GreenPool() + self.boot_wait = cfg.CONF.apmec.boot_wait + self.vim_client = vim_client.VimClient() + self._mea_manager = driver_manager.DriverManager( + 'apmec.apmec.mem.drivers', + cfg.CONF.apmec.infra_driver) + self._mea_action = driver_manager.DriverManager( + 'apmec.apmec.policy.actions', + cfg.CONF.apmec.policy_action) + self._mea_monitor = monitor.MEAMonitor(self.boot_wait) + self._mea_alarm_monitor = monitor.MEAAlarmMonitor() + + def spawn_n(self, function, *args, **kwargs): + self._pool.spawn_n(function, *args, **kwargs) + + def create_mead(self, context, mead): + mead_data = mead['mead'] + template = mead_data['attributes'].get('mead') + if isinstance(template, dict): + # TODO(sripriya) remove this yaml dump once db supports storing + # json format of yaml files in a separate column instead of + # key value string pairs in mea attributes table + mead_data['attributes']['mead'] = yaml.safe_dump( + template) + elif isinstance(template, str): + self._report_deprecated_yaml_str() + if "tosca_definitions_version" not in template: + raise exceptions.Invalid('Not a valid template: ' + 'tosca_definitions_version is missing.') + + LOG.debug('mead %s', mead_data) + + service_types = mead_data.get('service_types') + if not attributes.is_attr_set(service_types): + LOG.debug('service type must be specified') + raise mem.ServiceTypesNotSpecified() + for service_type in service_types: + # TODO(yamahata): + # framework doesn't know what services are valid for now. + # so doesn't check it here yet. + pass + if 'template_source' in mead_data: + template_source = mead_data.get('template_source') + else: + template_source = 'onboarded' + mead['mead']['template_source'] = template_source + + self._parse_template_input(mead) + return super(MEMPlugin, self).create_mead( + context, mead) + + def _parse_template_input(self, mead): + mead_dict = mead['mead'] + mead_yaml = mead_dict['attributes'].get('mead') + if mead_yaml is None: + return + + inner_mead_dict = yaml.safe_load(mead_yaml) + LOG.debug('mead_dict: %s', inner_mead_dict) + + # Prepend the apmec_defs.yaml import file with the full + # path to the file + toscautils.updateimports(inner_mead_dict) + + try: + tosca = ToscaTemplate(a_file=False, + yaml_dict_tpl=inner_mead_dict) + except Exception as e: + LOG.exception("tosca-parser error: %s", str(e)) + raise mem.ToscaParserFailed(error_msg_details=str(e)) + + if ('description' not in mead_dict or + mead_dict['description'] == ''): + mead_dict['description'] = inner_mead_dict.get( + 'description', '') + if (('name' not in mead_dict or + not len(mead_dict['name'])) and + 'metadata' in inner_mead_dict): + mead_dict['name'] = inner_mead_dict['metadata'].get( + 'template_name', '') + + mead_dict['mgmt_driver'] = toscautils.get_mgmt_driver( + tosca) + LOG.debug('mead %s', mead) + + def add_mea_to_monitor(self, context, mea_dict): + dev_attrs = mea_dict['attributes'] + mgmt_url = mea_dict['mgmt_url'] + if 'monitoring_policy' in dev_attrs and mgmt_url: + def action_cb(action): + LOG.debug('policy action: %s', action) + self._mea_action.invoke( + action, 'execute_action', plugin=self, context=context, + mea_dict=hosting_mea['mea'], args={}) + + hosting_mea = self._mea_monitor.to_hosting_mea( + mea_dict, action_cb) + LOG.debug('hosting_mea: %s', hosting_mea) + self._mea_monitor.add_hosting_mea(hosting_mea) + + def add_alarm_url_to_mea(self, context, mea_dict): + mead_yaml = mea_dict['mead']['attributes'].get('mead', '') + mead_dict = yaml.safe_load(mead_yaml) + if mead_dict and mead_dict.get('tosca_definitions_version'): + polices = mead_dict['topology_template'].get('policies', []) + for policy_dict in polices: + name, policy = list(policy_dict.items())[0] + if policy['type'] in constants.POLICY_ALARMING: + alarm_url =\ + self._mea_alarm_monitor.update_mea_with_alarm( + self, context, mea_dict, policy) + mea_dict['attributes']['alarming_policy'] = mea_dict['id'] + mea_dict['attributes'].update(alarm_url) + break + + def config_mea(self, context, mea_dict): + config = mea_dict['attributes'].get('config') + if not config: + return + eventlet.sleep(self.boot_wait) # wait for vm to be ready + mea_id = mea_dict['id'] + update = { + 'mea': { + 'id': mea_id, + 'attributes': {'config': config}, + } + } + self.update_mea(context, mea_id, update) + + def _get_infra_driver(self, context, mea_info): + vim_res = self.get_vim(context, mea_info) + return vim_res['vim_type'], vim_res['vim_auth'] + + def _create_mea_wait(self, context, mea_dict, auth_attr, driver_name): + mea_id = mea_dict['id'] + instance_id = self._instance_id(mea_dict) + create_failed = False + + try: + self._mea_manager.invoke( + driver_name, 'create_wait', plugin=self, context=context, + mea_dict=mea_dict, mea_id=instance_id, + auth_attr=auth_attr) + except mem.MEACreateWaitFailed as e: + LOG.error("MEA Create failed for mea_id %s", mea_id) + create_failed = True + mea_dict['status'] = constants.ERROR + self.set_mea_error_status_reason(context, mea_id, + six.text_type(e)) + + if instance_id is None or create_failed: + mgmt_url = None + else: + # mgmt_url = self.mgmt_url(context, mea_dict) + # FIXME(yamahata): + mgmt_url = mea_dict['mgmt_url'] + + self._create_mea_post( + context, mea_id, instance_id, mgmt_url, mea_dict) + self.mgmt_create_post(context, mea_dict) + + if instance_id is None or create_failed: + return + + mea_dict['mgmt_url'] = mgmt_url + + kwargs = { + mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_CREATE_MEA, + mgmt_constants.KEY_KWARGS: {'mea': mea_dict}, + } + new_status = constants.ACTIVE + try: + self.mgmt_call(context, mea_dict, kwargs) + except exceptions.MgmtDriverException: + LOG.error('MEA configuration failed') + new_status = constants.ERROR + self.set_mea_error_status_reason(context, mea_id, + 'Unable to configure VDU') + mea_dict['status'] = new_status + self._create_mea_status(context, mea_id, new_status) + + def get_vim(self, context, mea): + region_name = mea.setdefault('placement_attr', {}).get( + 'region_name', None) + vim_res = self.vim_client.get_vim(context, mea['vim_id'], + region_name) + mea['placement_attr']['vim_name'] = vim_res['vim_name'] + mea['vim_id'] = vim_res['vim_id'] + return vim_res + + def _create_mea(self, context, mea, vim_auth, driver_name): + mea_dict = self._create_mea_pre( + context, mea) if not mea.get('id') else mea + mea_id = mea_dict['id'] + LOG.debug('mea_dict %s', mea_dict) + self.mgmt_create_pre(context, mea_dict) + self.add_alarm_url_to_mea(context, mea_dict) + try: + instance_id = self._mea_manager.invoke( + driver_name, 'create', plugin=self, + context=context, mea=mea_dict, auth_attr=vim_auth) + except Exception: + LOG.debug('Fail to create mea %s in infra_driver, ' + 'so delete this mea', + mea_dict['id']) + with excutils.save_and_reraise_exception(): + self.delete_mea(context, mea_id) + + if instance_id is None: + self._create_mea_post(context, mea_id, None, None, + mea_dict) + return + mea_dict['instance_id'] = instance_id + return mea_dict + + def create_mea(self, context, mea): + mea_info = mea['mea'] + name = mea_info['name'] + + # if mead_template specified, create mead from template + # create template dictionary structure same as needed in create_mead() + if mea_info.get('mead_template'): + mead_name = utils.generate_resource_name(name, 'inline') + mead = {'mead': {'attributes': {'mead': mea_info['mead_template']}, + 'name': mead_name, + 'template_source': 'inline', + 'service_types': [{'service_type': 'mead'}]}} + mea_info['mead_id'] = self.create_mead(context, mead).get('id') + + mea_attributes = mea_info['attributes'] + if mea_attributes.get('param_values'): + param = mea_attributes['param_values'] + if isinstance(param, dict): + # TODO(sripriya) remove this yaml dump once db supports storing + # json format of yaml files in a separate column instead of + # key value string pairs in mea attributes table + mea_attributes['param_values'] = yaml.safe_dump(param) + else: + self._report_deprecated_yaml_str() + if mea_attributes.get('config'): + config = mea_attributes['config'] + if isinstance(config, dict): + # TODO(sripriya) remove this yaml dump once db supports storing + # json format of yaml files in a separate column instead of + # key value string pairs in mea attributes table + mea_attributes['config'] = yaml.safe_dump(config) + else: + self._report_deprecated_yaml_str() + infra_driver, vim_auth = self._get_infra_driver(context, mea_info) + if infra_driver not in self._mea_manager: + LOG.debug('unknown vim driver ' + '%(infra_driver)s in %(drivers)s', + {'infra_driver': infra_driver, + 'drivers': cfg.CONF.apmec.infra_driver}) + raise mem.InvalidInfraDriver(vim_name=infra_driver) + + mea_dict = self._create_mea(context, mea_info, vim_auth, infra_driver) + + def create_mea_wait(): + self._create_mea_wait(context, mea_dict, vim_auth, infra_driver) + if mea_dict['status'] is not constants.ERROR: + self.add_mea_to_monitor(context, mea_dict) + self.config_mea(context, mea_dict) + self.spawn_n(create_mea_wait) + return mea_dict + + # not for wsgi, but for service to create hosting mea + # the mea is NOT added to monitor. + def create_mea_sync(self, context, mea): + infra_driver, vim_auth = self._get_infra_driver(context, mea) + mea_dict = self._create_mea(context, mea, vim_auth, infra_driver) + self._create_mea_wait(context, mea_dict, vim_auth, infra_driver) + return mea_dict + + def _update_mea_wait(self, context, mea_dict, vim_auth, driver_name): + instance_id = self._instance_id(mea_dict) + kwargs = { + mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_UPDATE_MEA, + mgmt_constants.KEY_KWARGS: {'mea': mea_dict}, + } + new_status = constants.ACTIVE + placement_attr = mea_dict['placement_attr'] + region_name = placement_attr.get('region_name') + + try: + self._mea_manager.invoke( + driver_name, 'update_wait', plugin=self, + context=context, mea_id=instance_id, auth_attr=vim_auth, + region_name=region_name) + self.mgmt_call(context, mea_dict, kwargs) + except exceptions.MgmtDriverException as e: + LOG.error('MEA configuration failed') + new_status = constants.ERROR + self._mea_monitor.delete_hosting_mea(mea_dict['id']) + self.set_mea_error_status_reason(context, mea_dict['id'], + six.text_type(e)) + mea_dict['status'] = new_status + self.mgmt_update_post(context, mea_dict) + + self._update_mea_post(context, mea_dict['id'], + new_status, mea_dict) + + def update_mea(self, context, mea_id, mea): + mea_attributes = mea['mea']['attributes'] + if mea_attributes.get('config'): + config = mea_attributes['config'] + if isinstance(config, dict): + # TODO(sripriya) remove this yaml dump once db supports storing + # json format of yaml files in a separate column instead of + # key value string pairs in mea attributes table + mea_attributes['config'] = yaml.safe_dump(config) + else: + self._report_deprecated_yaml_str() + mea_dict = self._update_mea_pre(context, mea_id) + driver_name, vim_auth = self._get_infra_driver(context, mea_dict) + instance_id = self._instance_id(mea_dict) + + try: + self.mgmt_update_pre(context, mea_dict) + self._mea_manager.invoke( + driver_name, 'update', plugin=self, context=context, + mea_id=instance_id, mea_dict=mea_dict, + mea=mea, auth_attr=vim_auth) + except Exception as e: + with excutils.save_and_reraise_exception(): + mea_dict['status'] = constants.ERROR + self._mea_monitor.delete_hosting_mea(mea_id) + self.set_mea_error_status_reason(context, + mea_dict['id'], + six.text_type(e)) + self.mgmt_update_post(context, mea_dict) + self._update_mea_post(context, mea_id, + constants.ERROR, + mea_dict) + + self.spawn_n(self._update_mea_wait, context, mea_dict, vim_auth, + driver_name) + return mea_dict + + def _delete_mea_wait(self, context, mea_dict, auth_attr, driver_name): + instance_id = self._instance_id(mea_dict) + e = None + if instance_id: + placement_attr = mea_dict['placement_attr'] + region_name = placement_attr.get('region_name') + try: + self._mea_manager.invoke( + driver_name, + 'delete_wait', + plugin=self, + context=context, + mea_id=instance_id, + auth_attr=auth_attr, + region_name=region_name) + except Exception as e_: + e = e_ + mea_dict['status'] = constants.ERROR + mea_dict['error_reason'] = six.text_type(e) + LOG.exception('_delete_mea_wait') + self.set_mea_error_status_reason(context, mea_dict['id'], + mea_dict['error_reason']) + + self.mgmt_delete_post(context, mea_dict) + self._delete_mea_post(context, mea_dict, e) + + def delete_mea(self, context, mea_id): + mea_dict = self._delete_mea_pre(context, mea_id) + driver_name, vim_auth = self._get_infra_driver(context, mea_dict) + self._mea_monitor.delete_hosting_mea(mea_id) + instance_id = self._instance_id(mea_dict) + placement_attr = mea_dict['placement_attr'] + region_name = placement_attr.get('region_name') + kwargs = { + mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_DELETE_MEA, + mgmt_constants.KEY_KWARGS: {'mea': mea_dict}, + } + try: + self.mgmt_delete_pre(context, mea_dict) + self.mgmt_call(context, mea_dict, kwargs) + if instance_id: + self._mea_manager.invoke(driver_name, + 'delete', + plugin=self, + context=context, + mea_id=instance_id, + auth_attr=vim_auth, + region_name=region_name) + except Exception as e: + # TODO(yamahata): when the devaice is already deleted. mask + # the error, and delete row in db + # Other case mark error + with excutils.save_and_reraise_exception(): + mea_dict['status'] = constants.ERROR + mea_dict['error_reason'] = six.text_type(e) + self.set_mea_error_status_reason(context, mea_dict['id'], + mea_dict['error_reason']) + self.mgmt_delete_post(context, mea_dict) + self._delete_mea_post(context, mea_dict, e) + + self.spawn_n(self._delete_mea_wait, context, mea_dict, vim_auth, + driver_name) + + def _handle_mea_scaling(self, context, policy): + # validate + def _validate_scaling_policy(): + type = policy['type'] + + if type not in constants.POLICY_ACTIONS.keys(): + raise exceptions.MeaPolicyTypeInvalid( + type=type, + valid_types=constants.POLICY_ACTIONS.keys(), + policy=policy['name'] + ) + action = policy['action'] + + if action not in constants.POLICY_ACTIONS[type]: + raise exceptions.MeaPolicyActionInvalid( + action=action, + valid_actions=constants.POLICY_ACTIONS[type], + policy=policy['name'] + ) + + LOG.debug("Policy %s is validated successfully", policy['name']) + + def _get_status(): + if policy['action'] == constants.ACTION_SCALE_IN: + status = constants.PENDING_SCALE_IN + else: + status = constants.PENDING_SCALE_OUT + + return status + + # pre + def _handle_mea_scaling_pre(): + status = _get_status() + result = self._update_mea_scaling_status(context, + policy, + [constants.ACTIVE], + status) + LOG.debug("Policy %(policy)s mea is at %(status)s", + {'policy': policy['name'], + 'status': status}) + return result + + # post + def _handle_mea_scaling_post(new_status, mgmt_url=None): + status = _get_status() + result = self._update_mea_scaling_status(context, + policy, + [status], + new_status, + mgmt_url) + LOG.debug("Policy %(policy)s mea is at %(status)s", + {'policy': policy['name'], + 'status': new_status}) + return result + + # action + def _mea_policy_action(): + try: + last_event_id = self._mea_manager.invoke( + infra_driver, + 'scale', + plugin=self, + context=context, + auth_attr=vim_auth, + policy=policy, + region_name=region_name + ) + LOG.debug("Policy %s action is started successfully", + policy['name']) + return last_event_id + except Exception as e: + LOG.error("Policy %s action is failed to start", + policy) + with excutils.save_and_reraise_exception(): + mea['status'] = constants.ERROR + self.set_mea_error_status_reason( + context, + policy['mea']['id'], + six.text_type(e)) + _handle_mea_scaling_post(constants.ERROR) + + # wait + def _mea_policy_action_wait(): + try: + LOG.debug("Policy %s action is in progress", + policy['name']) + mgmt_url = self._mea_manager.invoke( + infra_driver, + 'scale_wait', + plugin=self, + context=context, + auth_attr=vim_auth, + policy=policy, + region_name=region_name, + last_event_id=last_event_id + ) + LOG.debug("Policy %s action is completed successfully", + policy['name']) + _handle_mea_scaling_post(constants.ACTIVE, mgmt_url) + # TODO(kanagaraj-manickam): Add support for config and mgmt + except Exception as e: + LOG.error("Policy %s action is failed to complete", + policy['name']) + with excutils.save_and_reraise_exception(): + self.set_mea_error_status_reason( + context, + policy['mea']['id'], + six.text_type(e)) + _handle_mea_scaling_post(constants.ERROR) + + _validate_scaling_policy() + + mea = _handle_mea_scaling_pre() + policy['instance_id'] = mea['instance_id'] + + infra_driver, vim_auth = self._get_infra_driver(context, mea) + region_name = mea.get('placement_attr', {}).get('region_name', None) + last_event_id = _mea_policy_action() + self.spawn_n(_mea_policy_action_wait) + + return policy + + def _report_deprecated_yaml_str(self): + utils.deprecate_warning(what='yaml as string', + as_of='N', in_favor_of='yaml as dictionary') + + def _make_policy_dict(self, mea, name, policy): + p = {} + p['type'] = policy.get('type') + p['properties'] = policy.get('properties') or policy.get('triggers') + p['mea'] = mea + p['name'] = name + p['id'] = uuidutils.generate_uuid() + return p + + def get_mea_policies( + self, context, mea_id, filters=None, fields=None): + mea = self.get_mea(context, mea_id) + mead_tmpl = yaml.safe_load(mea['mead']['attributes']['mead']) + policy_list = [] + + polices = mead_tmpl['topology_template'].get('policies', []) + for policy_dict in polices: + for name, policy in policy_dict.items(): + def _add(policy): + p = self._make_policy_dict(mea, name, policy) + p['name'] = name + policy_list.append(p) + + # Check for filters + if filters.get('name') or filters.get('type'): + if name == filters.get('name'): + _add(policy) + break + elif policy['type'] == filters.get('type'): + _add(policy) + break + else: + continue + + _add(policy) + + return policy_list + + def get_mea_policy( + self, context, policy_id, mea_id, fields=None): + policies = self.get_mea_policies(context, + mea_id, + filters={'name': policy_id}) + if policies: + return policies[0] + else: + return None + + def create_mea_scale(self, context, mea_id, scale): + policy_ = self.get_mea_policy(context, + scale['scale']['policy'], + mea_id) + if not policy_: + raise exceptions.MeaPolicyNotFound(policy=scale['scale']['policy'], + mea_id=mea_id) + policy_.update({'action': scale['scale']['type']}) + self._handle_mea_scaling(context, policy_) + + return scale['scale'] + + def get_mea_policy_by_type(self, context, mea_id, policy_type=None, fields=None): # noqa + policies = self.get_mea_policies(context, + mea_id, + filters={'type': policy_type}) + if policies: + return policies[0] + + raise exceptions.MeaPolicyTypeInvalid(type=constants.POLICY_ALARMING, + mea_id=mea_id) + + def _validate_alarming_policy(self, context, mea_id, trigger): + # validate alarm status + if not self._mea_alarm_monitor.process_alarm_for_mea(mea_id, trigger): + raise exceptions.AlarmUrlInvalid(mea_id=mea_id) + + # validate policy action. if action is composite, split it. + # ex: respawn%notify + action = trigger['action_name'] + action_list = action.split('%') + pl_action_dict = dict() + pl_action_dict['policy_actions'] = dict() + pl_action_dict['policy_actions']['def_actions'] = list() + pl_action_dict['policy_actions']['custom_actions'] = dict() + for action in action_list: + # validate policy action. if action is composite, split it. + # ex: SP1-in, SP1-out + action_ = None + if action in constants.DEFAULT_ALARM_ACTIONS: + pl_action_dict['policy_actions']['def_actions'].append(action) + policy_ = self.get_mea_policy(context, action, mea_id) + if not policy_: + sp_action = action.split('-') + if len(sp_action) == 2: + bk_policy_name = sp_action[0] + bk_policy_action = sp_action[1] + policies_ = self.get_mea_policies( + context, mea_id, filters={'name': bk_policy_name}) + if policies_: + policy_ = policies_[0] + action_ = bk_policy_action + if policy_: + pl_action_dict['policy_actions']['custom_actions'].update( + {policy_['id']: {'bckend_policy': policy_, + 'bckend_action': action_}}) + + LOG.debug("Trigger %s is validated successfully", trigger) + + return pl_action_dict + # validate url + + def _get_mea_triggers(self, context, mea_id, filters=None, fields=None): + policy = self.get_mea_policy_by_type( + context, mea_id, policy_type=constants.POLICY_ALARMING) + triggers = policy['properties'] + mea_trigger = dict() + for trigger_name, trigger_dict in triggers.items(): + if trigger_name == filters.get('name'): + mea_trigger['trigger'] = {trigger_name: trigger_dict} + mea_trigger['mea'] = policy['mea'] + break + + return mea_trigger + + def get_mea_trigger(self, context, mea_id, trigger_name): + trigger = self._get_mea_triggers( + context, mea_id, filters={'name': trigger_name}) + if not trigger: + raise exceptions.TriggerNotFound( + trigger_name=trigger_name, + mea_id=mea_id + ) + return trigger + + def _handle_mea_monitoring(self, context, trigger): + mea_dict = trigger['mea'] + if trigger['action_name'] in constants.DEFAULT_ALARM_ACTIONS: + action = trigger['action_name'] + LOG.debug('mea for monitoring: %s', mea_dict) + self._mea_action.invoke( + action, 'execute_action', plugin=self, context=context, + mea_dict=mea_dict, args={}) + + # Multiple actions support + if trigger.get('policy_actions'): + policy_actions = trigger['policy_actions'] + if policy_actions.get('def_actions'): + for action in policy_actions['def_actions']: + self._mea_action.invoke( + action, 'execute_action', plugin=self, context=context, + mea_dict=mea_dict, args={}) + if policy_actions.get('custom_actions'): + custom_actions = policy_actions['custom_actions'] + for pl_action, pl_action_dict in custom_actions.items(): + bckend_policy = pl_action_dict['bckend_policy'] + bckend_action = pl_action_dict['bckend_action'] + bckend_policy_type = bckend_policy['type'] + if bckend_policy_type == constants.POLICY_SCALING: + if mea_dict['status'] != constants.ACTIVE: + LOG.info(_("Scaling Policy action" + "skipped due to status:" + "%(status)s for mea: %(meaid)s"), + {"status": mea_dict['status'], + "meaid": mea_dict['id']}) + return + action = 'autoscaling' + scale = {} + scale.setdefault('scale', {}) + scale['scale']['type'] = bckend_action + scale['scale']['policy'] = bckend_policy['name'] + self._mea_action.invoke( + action, 'execute_action', plugin=self, + context=context, mea_dict=mea_dict, args=scale) + + def create_mea_trigger( + self, context, mea_id, trigger): + trigger_ = self.get_mea_trigger( + context, mea_id, trigger['trigger']['policy_name']) + # action_name before analyzing + trigger_.update({'action_name': trigger['trigger']['action_name']}) + trigger_.update({'params': trigger['trigger']['params']}) + policy_actions = self._validate_alarming_policy( + context, mea_id, trigger_) + if policy_actions: + trigger_.update(policy_actions) + self._handle_mea_monitoring(context, trigger_) + return trigger['trigger'] + + def get_mea_resources(self, context, mea_id, fields=None, filters=None): + mea_info = self.get_mea(context, mea_id) + infra_driver, vim_auth = self._get_infra_driver(context, mea_info) + if mea_info['status'] == constants.ACTIVE: + mea_details = self._mea_manager.invoke(infra_driver, + 'get_resource_info', + plugin=self, + context=context, + mea_info=mea_info, + auth_attr=vim_auth) + resources = [{'name': name, + 'type': info.get('type'), + 'id': info.get('id')} + for name, info in mea_details.items()] + return resources + # Raise exception when MEA.status != ACTIVE + else: + raise mem.MEAInactive(mea_id=mea_id, + message=_(' Cannot fetch details')) diff --git a/apmec/mem/policy_actions/__init__.py b/apmec/mem/policy_actions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/policy_actions/abstract_action.py b/apmec/mem/policy_actions/abstract_action.py new file mode 100644 index 0000000..c39c14c --- /dev/null +++ b/apmec/mem/policy_actions/abstract_action.py @@ -0,0 +1,38 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class AbstractPolicyAction(object): + @abc.abstractmethod + def get_type(self): + """Return one of predefined type of the hosting mea drivers.""" + pass + + @abc.abstractmethod + def get_name(self): + """Return a symbolic name for the service VM plugin.""" + pass + + @abc.abstractmethod + def get_description(self): + pass + + @abc.abstractmethod + def execute_action(self, plugin, context, mea_dict, args): + """args: policy is enabled to execute with additional arguments.""" + pass diff --git a/apmec/mem/policy_actions/autoscaling/__init__.py b/apmec/mem/policy_actions/autoscaling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/policy_actions/autoscaling/autoscaling.py b/apmec/mem/policy_actions/autoscaling/autoscaling.py new file mode 100644 index 0000000..dca57b1 --- /dev/null +++ b/apmec/mem/policy_actions/autoscaling/autoscaling.py @@ -0,0 +1,50 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging +from oslo_utils import timeutils + +from apmec.db.common_services import common_services_db_plugin +from apmec.plugins.common import constants +from apmec.mem.policy_actions import abstract_action + +LOG = logging.getLogger(__name__) + + +def _log_monitor_events(context, mea_dict, evt_details): + _cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + _cos_db_plg.create_event(context, res_id=mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=mea_dict['status'], + evt_type=constants.RES_EVT_MONITOR, + tstamp=timeutils.utcnow(), + details=evt_details) + + +class MEAActionAutoscaling(abstract_action.AbstractPolicyAction): + def get_type(self): + return 'autoscaling' + + def get_name(self): + return 'autoscaling' + + def get_description(self): + return 'Apmec MEA auto-scaling policy' + + def execute_action(self, plugin, context, mea_dict, args): + mea_id = mea_dict['id'] + _log_monitor_events(context, + mea_dict, + "ActionAutoscalingHeat invoked") + plugin.create_mea_scale(context, mea_id, args) diff --git a/apmec/mem/policy_actions/log/__init__.py b/apmec/mem/policy_actions/log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/policy_actions/log/log.py b/apmec/mem/policy_actions/log/log.py new file mode 100644 index 0000000..54a40e0 --- /dev/null +++ b/apmec/mem/policy_actions/log/log.py @@ -0,0 +1,72 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging +from oslo_utils import timeutils + +from apmec.db.common_services import common_services_db_plugin +from apmec.plugins.common import constants +from apmec.mem.policy_actions import abstract_action + +LOG = logging.getLogger(__name__) + + +def _log_monitor_events(context, mea_dict, evt_details): + _cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + _cos_db_plg.create_event(context, res_id=mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=mea_dict['status'], + evt_type=constants.RES_EVT_MONITOR, + tstamp=timeutils.utcnow(), + details=evt_details) + + +class MEAActionLog(abstract_action.AbstractPolicyAction): + def get_type(self): + return 'log' + + def get_name(self): + return 'log' + + def get_description(self): + return 'Apmec MEA logging policy' + + def execute_action(self, plugin, context, mea_dict, args): + mea_id = mea_dict['id'] + LOG.error('mea %s dead', mea_id) + _log_monitor_events(context, + mea_dict, + "ActionLogOnly invoked") + + +class MEAActionLogAndKill(abstract_action.AbstractPolicyAction): + def get_type(self): + return 'log_and_kill' + + def get_name(self): + return 'log_and_kill' + + def get_description(self): + return 'Apmec MEA log_and_kill policy' + + def execute_action(self, plugin, context, mea_dict, args): + _log_monitor_events(context, + mea_dict, + "ActionLogAndKill invoked") + mea_id = mea_dict['id'] + if plugin._mark_mea_dead(mea_dict['id']): + if mea_dict['attributes'].get('monitoring_policy'): + plugin._mea_monitor.mark_dead(mea_dict['id']) + plugin.delete_mea(context, mea_id) + LOG.error('mea %s dead', mea_id) diff --git a/apmec/mem/policy_actions/respawn/__init__.py b/apmec/mem/policy_actions/respawn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mem/policy_actions/respawn/respawn.py b/apmec/mem/policy_actions/respawn/respawn.py new file mode 100644 index 0000000..29ae0b9 --- /dev/null +++ b/apmec/mem/policy_actions/respawn/respawn.py @@ -0,0 +1,95 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging +from oslo_utils import timeutils + +from apmec.db.common_services import common_services_db_plugin +from apmec.plugins.common import constants +from apmec.mem.infra_drivers.openstack import heat_client as hc +from apmec.mem.policy_actions import abstract_action +from apmec.mem import vim_client + +LOG = logging.getLogger(__name__) + + +def _log_monitor_events(context, mea_dict, evt_details): + _cos_db_plg = common_services_db_plugin.CommonServicesPluginDb() + _cos_db_plg.create_event(context, res_id=mea_dict['id'], + res_type=constants.RES_TYPE_MEA, + res_state=mea_dict['status'], + evt_type=constants.RES_EVT_MONITOR, + tstamp=timeutils.utcnow(), + details=evt_details) + + +class MEAActionRespawn(abstract_action.AbstractPolicyAction): + def get_type(self): + return 'respawn' + + def get_name(self): + return 'respawn' + + def get_description(self): + return 'Apmec MEA respawning policy' + + def execute_action(self, plugin, context, mea_dict, args): + mea_id = mea_dict['id'] + LOG.info('mea %s is dead and needs to be respawned', mea_id) + attributes = mea_dict['attributes'] + vim_id = mea_dict['vim_id'] + + def _update_failure_count(): + failure_count = int(attributes.get('failure_count', '0')) + 1 + failure_count_str = str(failure_count) + LOG.debug("mea %(mea_id)s failure count %(failure_count)s", + {'mea_id': mea_id, 'failure_count': failure_count_str}) + attributes['failure_count'] = failure_count_str + attributes['dead_instance_id_' + failure_count_str] = mea_dict[ + 'instance_id'] + + def _fetch_vim(vim_uuid): + vim_res = vim_client.VimClient().get_vim(context, vim_uuid) + return vim_res + + def _delete_heat_stack(vim_auth): + placement_attr = mea_dict.get('placement_attr', {}) + region_name = placement_attr.get('region_name') + heatclient = hc.HeatClient(auth_attr=vim_auth, + region_name=region_name) + heatclient.delete(mea_dict['instance_id']) + LOG.debug("Heat stack %s delete initiated", + mea_dict['instance_id']) + _log_monitor_events(context, mea_dict, "ActionRespawnHeat invoked") + + def _respawn_mea(): + update_mea_dict = plugin.create_mea_sync(context, mea_dict) + LOG.info('respawned new mea %s', update_mea_dict['id']) + plugin.config_mea(context, update_mea_dict) + return update_mea_dict + + if plugin._mark_mea_dead(mea_dict['id']): + _update_failure_count() + vim_res = _fetch_vim(vim_id) + if mea_dict['attributes'].get('monitoring_policy'): + plugin._mea_monitor.mark_dead(mea_dict['id']) + _delete_heat_stack(vim_res['vim_auth']) + updated_mea = _respawn_mea() + plugin.add_mea_to_monitor(context, updated_mea) + LOG.debug("MEA %s added to monitor thread", + updated_mea['id']) + if mea_dict['attributes'].get('alarming_policy'): + _delete_heat_stack(vim_res['vim_auth']) + mea_dict['attributes'].pop('alarming_policy') + _respawn_mea() diff --git a/apmec/mem/vim_client.py b/apmec/mem/vim_client.py new file mode 100644 index 0000000..c738954 --- /dev/null +++ b/apmec/mem/vim_client.py @@ -0,0 +1,115 @@ + +# Copyright 2015-2016 Brocade Communications Systems Ine All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cryptography import fernet +from oslo_config import cfg +from oslo_log import log as logging + +from apmec.extensions import meo +from apmec.keymgr import API as KEYMGR_API +from apmec import manager +from apmec.plugins.common import constants + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class VimClient(object): + def get_vim(self, context, vim_id=None, region_name=None): + """Get Vim information for provided VIM id + + Initiate the MEO plugin, request VIM information for the provided + VIM id and validate region + """ + meo_plugin = manager.ApmecManager.get_service_plugins().get( + constants.MEO) + + if not vim_id: + LOG.debug('VIM id not provided. Attempting to find default ' + 'VIM information') + try: + vim_info = meo_plugin.get_default_vim(context) + except Exception as ex: + LOG.debug('Fail to get default vim due to %s', ex) + raise meo.VimDefaultNotDefined() + else: + try: + vim_info = meo_plugin.get_vim(context, vim_id, + mask_password=False) + except Exception: + raise meo.VimNotFoundException(vim_id=vim_id) + LOG.debug('VIM info found for vim id %s', vim_id) + if region_name and not self.region_valid(vim_info['placement_attr'] + ['regions'], region_name): + raise meo.VimRegionNotFoundException(region_name=region_name) + + vim_auth = self._build_vim_auth(context, vim_info) + vim_res = {'vim_auth': vim_auth, 'vim_id': vim_info['id'], + 'vim_name': vim_info.get('name', vim_info['id']), + 'vim_type': vim_info['type']} + return vim_res + + @staticmethod + def region_valid(vim_regions, region_name): + return region_name in vim_regions + + def _build_vim_auth(self, context, vim_info): + LOG.debug('VIM id is %s', vim_info['id']) + vim_auth = vim_info['auth_cred'] + vim_auth['password'] = self._decode_vim_auth(context, + vim_info['id'], + vim_auth) + vim_auth['auth_url'] = vim_info['auth_url'] + + # These attributes are needless for authentication + # from keystone, so we remove them. + needless_attrs = ['key_type', 'secret_uuid'] + for attr in needless_attrs: + if attr in vim_auth: + vim_auth.pop(attr, None) + return vim_auth + + def _decode_vim_auth(self, context, vim_id, auth): + """Decode Vim credentials + + Decrypt VIM cred, get fernet Key from local_file_system or + barbican. + """ + cred = auth['password'].encode('utf-8') + if auth.get('key_type') == 'barbican_key': + keystone_conf = CONF.keystone_authtoken + secret_uuid = auth['secret_uuid'] + keymgr_api = KEYMGR_API(keystone_conf.auth_url) + secret_obj = keymgr_api.get(context, secret_uuid) + vim_key = secret_obj.payload + else: + vim_key = self._find_vim_key(vim_id) + + f = fernet.Fernet(vim_key) + if not f: + LOG.warning('Unable to decode VIM auth') + raise meo.VimNotFoundException( + 'Unable to decode VIM auth key') + return f.decrypt(cred) + + @staticmethod + def _find_vim_key(vim_id): + key_file = os.path.join(CONF.vim_keys.openstack, vim_id) + LOG.debug('Attempting to open key file for vim id %s', vim_id) + with open(key_file, 'r') as f: + return f.read() + LOG.warning('VIM id invalid or key not found for %s', vim_id) diff --git a/apmec/meo/__init__.py b/apmec/meo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/meo/drivers/__init__.py b/apmec/meo/drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/meo/drivers/vim/__init__.py b/apmec/meo/drivers/vim/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/meo/drivers/vim/abstract_vim_driver.py b/apmec/meo/drivers/vim/abstract_vim_driver.py new file mode 100644 index 0000000..2a249b3 --- /dev/null +++ b/apmec/meo/drivers/vim/abstract_vim_driver.py @@ -0,0 +1,95 @@ +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class VimAbstractDriver(extensions.PluginInterface): + + @abc.abstractmethod + def get_type(self): + """Get VIM Driver type + + Return one of predefined types of VIMs. + """ + pass + + @abc.abstractmethod + def get_name(self): + """Get VIM name + + Return a symbolic name for the VIM driver. + """ + pass + + @abc.abstractmethod + def get_description(self): + pass + + @abc.abstractmethod + def register_vim(self, context, vim_obj): + """Register VIM object in to MEO plugin + + Validate, encode and store VIM information for deploying MEAs. + """ + pass + + @abc.abstractmethod + def deregister_vim(self, context, vim_obj): + """Deregister VIM object from MEO plugin + + Cleanup VIM data and delete VIM information + """ + pass + + @abc.abstractmethod + def authenticate_vim(self, context, vim_obj): + """Authenticate VIM connection parameters + + Validate authentication credentials and connectivity of VIM + """ + pass + + @abc.abstractmethod + def encode_vim_auth(self, context, vim_id, auth): + """Encrypt VIM credentials + + Encrypt and store VIM sensitive information such as password + """ + pass + + @abc.abstractmethod + def delete_vim_auth(self, context, vim_id, auth): + """Delete VIM auth keys + + Delete VIM sensitive information such as keys from file system or DB + """ + pass + + @abc.abstractmethod + def get_vim_resource_id(self, vim_obj, resource_type, resource_name): + """Parses a VIM resource ID from a given type and name + + :param vim_obj: VIM information + :param resource_type: type of resource, such as network, compute + :param resource_name: name of resource, such at "test-network" + :return: ID of resource + """ + pass diff --git a/apmec/meo/drivers/vim/openstack_driver.py b/apmec/meo/drivers/vim/openstack_driver.py new file mode 100644 index 0000000..1b6b7cf --- /dev/null +++ b/apmec/meo/drivers/vim/openstack_driver.py @@ -0,0 +1,385 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import six +import yaml + +from keystoneauth1 import exceptions +from keystoneauth1 import identity +from keystoneauth1.identity import v2 +from keystoneauth1.identity import v3 +from keystoneauth1 import session +from neutronclient.common import exceptions as nc_exceptions +from neutronclient.v2_0 import client as neutron_client +from oslo_config import cfg +from oslo_log import log as logging + +from apmec._i18n import _ +from apmec.common import log +from apmec.extensions import meo +from apmec.keymgr import API as KEYMGR_API +from apmec.mistral import mistral_client +from apmec.meo.drivers.vim import abstract_vim_driver +from apmec.meo.drivers.workflow import workflow_generator +from apmec.mem import keystone + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +OPTS = [cfg.StrOpt('openstack', default='/etc/apmec/vim/fernet_keys', + help='Dir.path to store fernet keys.'), + cfg.BoolOpt('use_barbican', default=False, + help=_('Use barbican to encrypt vim password if True, ' + 'save vim credentials in local file system ' + 'if False')) + ] + +# same params as we used in ping monitor driver +OPENSTACK_OPTS = [ + cfg.StrOpt('count', default='1', + help=_('number of ICMP packets to send')), + cfg.StrOpt('timeout', default='1', + help=_('number of seconds to wait for a response')), + cfg.StrOpt('interval', default='1', + help=_('number of seconds to wait between packets')) +] +cfg.CONF.register_opts(OPTS, 'vim_keys') +cfg.CONF.register_opts(OPENSTACK_OPTS, 'vim_monitor') + +_VALID_RESOURCE_TYPES = {'network': {'client': neutron_client.Client, + 'cmd': 'list_networks', + 'vim_res_name': 'networks', + 'filter_attr': 'name' + } + } + +FC_MAP = {'name': 'name', + 'description': 'description', + 'eth_type': 'ethertype', + 'ip_src_prefix': 'source_ip_prefix', + 'ip_dst_prefix': 'destination_ip_prefix', + 'source_port_min': 'source_port_range_min', + 'source_port_max': 'source_port_range_max', + 'destination_port_min': 'destination_port_range_min', + 'destination_port_max': 'destination_port_range_max', + 'network_src_port_id': 'logical_source_port', + 'network_dst_port_id': 'logical_destination_port'} + +CONNECTION_POINT = 'connection_points' + + +def config_opts(): + return [('vim_keys', OPTS), ('vim_monitor', OPENSTACK_OPTS)] + + +class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver): + """Driver for OpenStack VIM + + OpenStack driver handles interactions with local as well as + remote OpenStack instances. The driver invokes keystone service for VIM + authorization and validation. The driver is also responsible for + discovering placement attributes such as regions, availability zones + """ + + def __init__(self): + self.keystone = keystone.Keystone() + self.keystone.create_key_dir(CONF.vim_keys.openstack) + + def get_type(self): + return 'openstack' + + def get_name(self): + return 'OpenStack VIM Driver' + + def get_description(self): + return 'OpenStack VIM Driver' + + def authenticate_vim(self, vim_obj): + """Validate VIM auth attributes + + Initialize keystoneclient with provided authentication attributes. + """ + auth_url = vim_obj['auth_url'] + keystone_version = self._validate_auth_url(auth_url) + auth_cred = self._get_auth_creds(keystone_version, vim_obj) + return self._initialize_keystone(keystone_version, auth_cred) + + def _get_auth_creds(self, keystone_version, vim_obj): + auth_url = vim_obj['auth_url'] + auth_cred = vim_obj['auth_cred'] + vim_project = vim_obj['vim_project'] + + if keystone_version not in auth_url: + vim_obj['auth_url'] = auth_url + '/' + keystone_version + if keystone_version == 'v3': + auth_cred['project_id'] = vim_project.get('id') + auth_cred['project_name'] = vim_project.get('name') + auth_cred['project_domain_name'] = vim_project.get( + 'project_domain_name') + else: + auth_cred['tenant_id'] = vim_project.get('id') + auth_cred['tenant_name'] = vim_project.get('name') + # pop stuff not supported in keystone v2 + auth_cred.pop('user_domain_name', None) + auth_cred.pop('user_id', None) + auth_cred['auth_url'] = vim_obj['auth_url'] + return auth_cred + + def _get_auth_plugin(self, version, **kwargs): + if version == 'v2.0': + auth_plugin = v2.Password(**kwargs) + else: + auth_plugin = v3.Password(**kwargs) + + return auth_plugin + + def _validate_auth_url(self, auth_url): + try: + keystone_version = self.keystone.get_version(auth_url) + except Exception as e: + LOG.error('VIM Auth URL invalid') + raise meo.VimConnectionException(message=str(e)) + return keystone_version + + def _initialize_keystone(self, version, auth): + ks_client = self.keystone.initialize_client(version=version, **auth) + return ks_client + + def _find_regions(self, ks_client): + if ks_client.version == 'v2.0': + service_list = ks_client.services.list() + heat_service_id = None + for service in service_list: + if service.type == 'orchestration': + heat_service_id = service.id + endpoints_list = ks_client.endpoints.list() + region_list = [endpoint.region for endpoint in + endpoints_list if endpoint.service_id == + heat_service_id] + else: + region_info = ks_client.regions.list() + region_list = [region.id for region in region_info] + return region_list + + def discover_placement_attr(self, vim_obj, ks_client): + """Fetch VIM placement information + + Attributes can include regions, AZ. + """ + try: + regions_list = self._find_regions(ks_client) + except (exceptions.Unauthorized, exceptions.BadRequest) as e: + LOG.warning("Authorization failed for user") + raise meo.VimUnauthorizedException(message=e.message) + vim_obj['placement_attr'] = {'regions': regions_list} + return vim_obj + + @log.log + def register_vim(self, context, vim_obj): + """Validate and set VIM placements.""" + + if 'key_type' in vim_obj['auth_cred']: + vim_obj['auth_cred'].pop(u'key_type') + if 'secret_uuid' in vim_obj['auth_cred']: + vim_obj['auth_cred'].pop(u'secret_uuid') + + ks_client = self.authenticate_vim(vim_obj) + self.discover_placement_attr(vim_obj, ks_client) + self.encode_vim_auth(context, vim_obj['id'], vim_obj['auth_cred']) + LOG.debug('VIM registration completed for %s', vim_obj) + + @log.log + def deregister_vim(self, context, vim_obj): + """Deregister VIM from MEO + + Delete VIM keys from file system + """ + self.delete_vim_auth(context, vim_obj['id'], vim_obj['auth_cred']) + + @log.log + def delete_vim_auth(self, context, vim_id, auth): + """Delete vim information + + Delete vim key stored in file system + """ + LOG.debug('Attempting to delete key for vim id %s', vim_id) + + if auth.get('key_type') == 'barbican_key': + try: + keystone_conf = CONF.keystone_authtoken + secret_uuid = auth['secret_uuid'] + keymgr_api = KEYMGR_API(keystone_conf.auth_url) + keymgr_api.delete(context, secret_uuid) + LOG.debug('VIM key deleted successfully for vim %s', + vim_id) + except Exception as ex: + LOG.warning('VIM key deletion failed for vim %s due to %s', + vim_id, + ex) + raise + else: + key_file = os.path.join(CONF.vim_keys.openstack, vim_id) + try: + os.remove(key_file) + LOG.debug('VIM key deleted successfully for vim %s', + vim_id) + except OSError: + LOG.warning('VIM key deletion failed for vim %s', + vim_id) + + @log.log + def encode_vim_auth(self, context, vim_id, auth): + """Encode VIM credentials + + Store VIM auth using fernet key encryption + """ + fernet_key, fernet_obj = self.keystone.create_fernet_key() + encoded_auth = fernet_obj.encrypt(auth['password'].encode('utf-8')) + auth['password'] = encoded_auth + + if CONF.vim_keys.use_barbican: + try: + keystone_conf = CONF.keystone_authtoken + keymgr_api = KEYMGR_API(keystone_conf.auth_url) + secret_uuid = keymgr_api.store(context, fernet_key) + + auth['key_type'] = 'barbican_key' + auth['secret_uuid'] = secret_uuid + LOG.debug('VIM auth successfully stored for vim %s', + vim_id) + except Exception as ex: + LOG.warning('VIM key creation failed for vim %s due to %s', + vim_id, + ex) + raise + + else: + auth['key_type'] = 'fernet_key' + key_file = os.path.join(CONF.vim_keys.openstack, vim_id) + try: + with open(key_file, 'w') as f: + if six.PY2: + f.write(fernet_key.decode('utf-8')) + else: + f.write(fernet_key) + LOG.debug('VIM auth successfully stored for vim %s', + vim_id) + except IOError: + raise meo.VimKeyNotFoundException(vim_id=vim_id) + + @log.log + def get_vim_resource_id(self, vim_obj, resource_type, resource_name): + """Locates openstack resource by type/name and returns ID + + :param vim_obj: VIM info used to access openstack instance + :param resource_type: type of resource to find + :param resource_name: name of resource to locate + :return: ID of resource + """ + if resource_type in _VALID_RESOURCE_TYPES.keys(): + res_cmd_map = _VALID_RESOURCE_TYPES[resource_type] + client_type = res_cmd_map['client'] + cmd = res_cmd_map['cmd'] + filter_attr = res_cmd_map.get('filter_attr') + vim_res_name = res_cmd_map['vim_res_name'] + else: + raise meo.VimUnsupportedResourceTypeException(type=resource_type) + + client = self._get_client(vim_obj, client_type) + cmd_args = {} + if filter_attr: + cmd_args[filter_attr] = resource_name + + try: + resources = getattr(client, "%s" % cmd)(**cmd_args)[vim_res_name] + LOG.debug('resources output %s', resources) + except Exception: + raise meo.VimGetResourceException( + cmd=cmd, name=resource_name, type=resource_type) + + if len(resources) > 1: + raise meo.VimGetResourceNameNotUnique( + cmd=cmd, name=resource_name) + elif len(resources) < 1: + raise meo.VimGetResourceNotFoundException( + cmd=cmd, name=resource_name) + + return resources[0]['id'] + + @log.log + def _get_client(self, vim_obj, client_type): + """Initializes and returns an openstack client + + :param vim_obj: VIM Information + :param client_type: openstack client to initialize + :return: initialized client + """ + auth_url = vim_obj['auth_url'] + keystone_version = self._validate_auth_url(auth_url) + auth_cred = self._get_auth_creds(keystone_version, vim_obj) + auth_plugin = self._get_auth_plugin(keystone_version, **auth_cred) + sess = session.Session(auth=auth_plugin) + return client_type(session=sess) + + def get_mistral_client(self, auth_dict): + if not auth_dict: + LOG.warning("auth dict required to instantiate mistral client") + raise EnvironmentError('auth dict required for' + ' mistral workflow driver') + return mistral_client.MistralClient( + keystone.Keystone().initialize_client('2', **auth_dict), + auth_dict['token']).get_client() + + def prepare_and_create_workflow(self, resource, action, + kwargs, auth_dict=None): + mistral_client = self.get_mistral_client(auth_dict) + wg = workflow_generator.WorkflowGenerator(resource, action) + wg.task(**kwargs) + if not wg.get_tasks(): + raise meo.NoTasksException(resource=resource, action=action) + definition_yaml = yaml.safe_dump(wg.definition) + workflow = mistral_client.workflows.create(definition_yaml) + return {'id': workflow[0].id, 'input': wg.get_input_dict()} + + def execute_workflow(self, workflow, auth_dict=None): + return self.get_mistral_client(auth_dict)\ + .executions.create( + workflow_identifier=workflow['id'], + workflow_input=workflow['input'], + wf_params={}) + + def get_execution(self, execution_id, auth_dict=None): + return self.get_mistral_client(auth_dict)\ + .executions.get(execution_id) + + def delete_execution(self, execution_id, auth_dict=None): + return self.get_mistral_client(auth_dict).executions\ + .delete(execution_id) + + def delete_workflow(self, workflow_id, auth_dict=None): + return self.get_mistral_client(auth_dict)\ + .workflows.delete(workflow_id) + + +class NeutronClient(object): + """Neutron Client class for networking-sfc driver""" + + def __init__(self, auth_attr): + auth = identity.Password(**auth_attr) + sess = session.Session(auth=auth) + self.client = neutron_client.Client(session=sess) \ No newline at end of file diff --git a/apmec/meo/drivers/workflow/__init__.py b/apmec/meo/drivers/workflow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/meo/drivers/workflow/workflow_generator.py b/apmec/meo/drivers/workflow/workflow_generator.py new file mode 100644 index 0000000..4ae77b7 --- /dev/null +++ b/apmec/meo/drivers/workflow/workflow_generator.py @@ -0,0 +1,157 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +from oslo_utils import uuidutils + +from apmec.mistral import workflow_generator + + +OUTPUT = { + 'create_mea': ['mea_id', 'vim_id', 'mgmt_url', 'status'] +} + + +class WorkflowGenerator(workflow_generator.WorkflowGeneratorBase): + + def _add_create_mea_tasks(self, mes): + meads = mes['mead_details'] + task_dict = dict() + for mead_name, mead_info in (meads).items(): + nodes = mead_info['instances'] + for node in nodes: + task = self.wf_name + '_' + node + task_dict[task] = { + 'action': 'apmec.create_mea body=<% $.mea.{0} ' + '%>'.format(node), + 'input': {'body': '<% $.mea.{0} %>'.format(node)}, + 'publish': { + 'mea_id_' + node: '<% task({0}).result.mea.id ' + '%>'.format(task), + 'vim_id_' + node: '<% task({0}).result.mea.vim_id' + ' %>'.format(task), + 'mgmt_url_' + node: '<% task({0}).result.mea.mgmt_url' + ' %>'.format(task), + 'status_' + node: '<% task({0}).result.mea.status' + ' %>'.format(task), + }, + 'on-success': ['wait_mea_active_%s' % node] + } + return task_dict + + def _add_wait_mea_tasks(self, mes): + meads = mes['mead_details'] + task_dict = dict() + for mead_name, mead_info in (meads).items(): + nodes = mead_info['instances'] + for node in nodes: + task = 'wait_mea_active_%s' % node + task_dict[task] = { + 'action': 'apmec.show_mea mea=<% $.mea_id_{0} ' + '%>'.format(node), + 'retry': { + 'count': 10, + 'delay': 10, + 'break-on': '<% $.status_{0} = "ACTIVE" ' + '%>'.format(node), + 'break-on': '<% $.status_{0} = "ERROR"' + ' %>'.format(node), + 'continue-on': '<% $.status_{0} = "PENDING_CREATE" ' + '%>'.format(node), + }, + 'publish': { + 'mgmt_url_' + node: ' <% task({0}).result.mea.' + 'mgmt_url %>'.format(task), + 'status_' + node: '<% task({0}).result.mea.status' + ' %>'.format(task), + }, + 'on-success': [ + {'delete_mea_' + node: '<% $.status_{0}=' + '"ERROR" %>'.format(node)} + ] + } + return task_dict + + def _add_delete_mea_tasks(self, mes): + meads = mes['mead_details'] + task_dict = dict() + for mead_name, mead_info in (meads).items(): + nodes = mead_info['instances'] + for node in nodes: + task = 'delete_mea_%s' % node + task_dict[task] = { + 'action': 'apmec.delete_mea mea=<% $.mea_id_{0}' + '%>'.format(node), + } + return task_dict + + def _build_output_dict(self, mes): + meads = mes['mead_details'] + task_dict = dict() + for mead_name, mead_info in (meads).items(): + nodes = mead_info['instances'] + for node in nodes: + for op_name in OUTPUT[self.wf_name]: + task_dict[op_name + '_' + node] = \ + '<% $.{0}_{1} %>'.format(op_name, node) + return task_dict + + def get_input_dict(self): + return self.input_dict + + def build_input(self, mes, params): + meads = mes['mead_details'] + id = uuidutils.generate_uuid() + self.input_dict = {'mea': {}} + for mead_name, mead_info in (meads).items(): + nodes = mead_info['instances'] + for node in nodes: + self.input_dict['mea'][node] = dict() + self.input_dict['mea'][node]['mea'] = { + 'attributes': {}, + 'vim_id': mes['mes'].get('vim_id', ''), + 'mead_id': mead_info['id'], + 'name': 'create_mea_%s_%s' % (mead_info['id'], id) + } + if params.get(mead_name): + self.input_dict['mea'][node]['mea']['attributes'] = { + 'param_values': params.get(mead_name) + } + + def create_mea(self, **kwargs): + mes = kwargs.get('mes') + params = kwargs.get('params') + # TODO(anyone): Keep this statements in a loop and + # remove in all the methods. + self.definition[self.wf_identifier]['tasks'] = dict() + self.definition[self.wf_identifier]['tasks'].update( + self._add_create_mea_tasks(mes)) + self.definition[self.wf_identifier]['tasks'].update( + self._add_wait_mea_tasks(mes)) + self.definition[self.wf_identifier]['tasks'].update( + self._add_delete_mea_tasks(mes)) + self.definition[self.wf_identifier]['output'] = \ + self._build_output_dict(mes) + self.build_input(mes, params) + + def delete_mea(self, mes): + mes_dict = {'mead_details': {}} + mea_ids = ast.literal_eval(mes['mea_ids']) + self.definition[self.wf_identifier]['input'] = [] + for mea in mea_ids.keys(): + mea_key = 'mea_id_' + mea + self.definition[self.wf_identifier]['input'].append(mea_key) + self.input_dict[mea_key] = mea_ids[mea] + mes_dict['mead_details'][mea] = {'instances': [mea]} + self.definition[self.wf_identifier]['tasks'] = dict() + self.definition[self.wf_identifier]['tasks'].update( + self._add_delete_mea_tasks(mes_dict)) diff --git a/apmec/meo/meo_plugin.py b/apmec/meo/meo_plugin.py new file mode 100644 index 0000000..ec758e7 --- /dev/null +++ b/apmec/meo/meo_plugin.py @@ -0,0 +1,660 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os +import time +import yaml + +from cryptography import fernet +import eventlet +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from oslo_utils import uuidutils +from tempfile import mkstemp +from toscaparser.tosca_template import ToscaTemplate + +from apmec._i18n import _ +from apmec.common import driver_manager +from apmec.common import log +from apmec.common import utils +from apmec.db.meo import meo_db_plugin +from apmec.db.meo import mes_db +from apmec.extensions import common_services as cs +from apmec.extensions import meo +from apmec.keymgr import API as KEYMGR_API +from apmec import manager +from apmec.meo.workflows.vim_monitor import vim_monitor_utils +from apmec.plugins.common import constants +from apmec.mem import vim_client +from apmec.nfv.tacker_client import TackerClient as tackerclient + +from apmec.catalogs.tosca import utils as toscautils +from toscaparser import tosca_template + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +MISTRAL_RETRIES = 30 +MISTRAL_RETRY_WAIT = 6 + + +def config_opts(): + return [('meo_vim', MeoPlugin.OPTS)] + + +class MeoPlugin(meo_db_plugin.MeoPluginDb, mes_db.MESPluginDb): + """MEO reference plugin for MEO extension + + Implements the MEO extension and defines public facing APIs for VIM + operations. MEO internally invokes the appropriate VIM driver in + backend based on configured VIM types. Plugin also interacts with MEM + extension for providing the specified VIM information + """ + supported_extension_aliases = ['meo'] + + OPTS = [ + cfg.ListOpt( + 'vim_drivers', default=['openstack'], + help=_('VIM driver for launching MEAs')), + cfg.IntOpt( + 'monitor_interval', default=30, + help=_('Interval to check for VIM health')), + ] + cfg.CONF.register_opts(OPTS, 'meo_vim') + + def __init__(self): + super(MeoPlugin, self).__init__() + self._pool = eventlet.GreenPool() + self._vim_drivers = driver_manager.DriverManager( + 'apmec.meo.vim.drivers', + cfg.CONF.meo_vim.vim_drivers) + self.vim_client = vim_client.VimClient() + + def get_auth_dict(self, context): + auth = CONF.keystone_authtoken + return { + 'auth_url': auth.auth_url + '/v3', + 'token': context.auth_token, + 'project_domain_name': auth.project_domain_name or context.domain, + 'project_name': context.tenant_name + } + + def spawn_n(self, function, *args, **kwargs): + self._pool.spawn_n(function, *args, **kwargs) + + @log.log + def create_vim(self, context, vim): + LOG.debug('Create vim called with parameters %s', + strutils.mask_password(vim)) + vim_obj = vim['vim'] + vim_type = vim_obj['type'] + vim_obj['id'] = uuidutils.generate_uuid() + vim_obj['status'] = 'PENDING' + try: + self._vim_drivers.invoke(vim_type, + 'register_vim', + context=context, + vim_obj=vim_obj) + res = super(MeoPlugin, self).create_vim(context, vim_obj) + except Exception: + with excutils.save_and_reraise_exception(): + self._vim_drivers.invoke(vim_type, + 'delete_vim_auth', + context=context, + vim_id=vim_obj['id'], + auth=vim_obj['auth_cred']) + + try: + self.monitor_vim(context, vim_obj) + except Exception: + LOG.warning("Failed to set up vim monitoring") + return res + + def _get_vim(self, context, vim_id): + if not self.is_vim_still_in_use(context, vim_id): + return self.get_vim(context, vim_id, mask_password=False) + + @log.log + def update_vim(self, context, vim_id, vim): + vim_obj = self._get_vim(context, vim_id) + old_vim_obj = copy.deepcopy(vim_obj) + utils.deep_update(vim_obj, vim['vim']) + vim_type = vim_obj['type'] + update_args = vim['vim'] + old_auth_need_delete = False + new_auth_created = False + try: + # re-register the VIM only if there is a change in password. + # auth_url of auth_cred is from vim object which + # is not updatable. so no need to consider it + if 'auth_cred' in update_args: + auth_cred = update_args['auth_cred'] + if 'password' in auth_cred: + vim_obj['auth_cred']['password'] = auth_cred['password'] + # Notice: vim_obj may be updated in vim driver's + self._vim_drivers.invoke(vim_type, + 'register_vim', + context=context, + vim_obj=vim_obj) + new_auth_created = True + + # Check whether old vim's auth need to be deleted + old_key_type = old_vim_obj['auth_cred'].get('key_type') + if old_key_type == 'barbican_key': + old_auth_need_delete = True + + vim_obj = super(MeoPlugin, self).update_vim( + context, vim_id, vim_obj) + if old_auth_need_delete: + try: + self._vim_drivers.invoke(vim_type, + 'delete_vim_auth', + context=context, + vim_id=old_vim_obj['id'], + auth=old_vim_obj['auth_cred']) + except Exception as ex: + LOG.warning("Fail to delete old auth for vim %s due to %s", + vim_id, ex) + return vim_obj + except Exception as ex: + LOG.debug("Got exception when update_vim %s due to %s", + vim_id, ex) + with excutils.save_and_reraise_exception(): + if new_auth_created: + # delete new-created vim auth, old auth is still used. + self._vim_drivers.invoke(vim_type, + 'delete_vim_auth', + context=context, + vim_id=vim_obj['id'], + auth=vim_obj['auth_cred']) + + @log.log + def delete_vim(self, context, vim_id): + vim_obj = self._get_vim(context, vim_id) + self._vim_drivers.invoke(vim_obj['type'], + 'deregister_vim', + context=context, + vim_obj=vim_obj) + try: + auth_dict = self.get_auth_dict(context) + vim_monitor_utils.delete_vim_monitor(context, auth_dict, vim_obj) + except Exception: + LOG.exception("Failed to remove vim monitor") + super(MeoPlugin, self).delete_vim(context, vim_id) + + @log.log + def monitor_vim(self, context, vim_obj): + auth_dict = self.get_auth_dict(context) + vim_monitor_utils.monitor_vim(auth_dict, vim_obj) + + @log.log + def validate_tosca(self, template): + if "tosca_definitions_version" not in template: + raise meo.ToscaParserFailed( + error_msg_details='tosca_definitions_version missing in ' + 'template' + ) + + LOG.debug('template yaml: %s', template) + + toscautils.updateimports(template) + + try: + tosca_template.ToscaTemplate( + a_file=False, yaml_dict_tpl=template) + except Exception as e: + LOG.exception("tosca-parser error: %s", str(e)) + raise meo.ToscaParserFailed(error_msg_details=str(e)) + + def _get_vim_from_mea(self, context, mea_id): + """Figures out VIM based on a MEA + + :param context: SQL Session Context + :param mea_id: MEA ID + :return: VIM or VIM properties if fields are provided + """ + mem_plugin = manager.ApmecManager.get_service_plugins()['MEM'] + vim_id = mem_plugin.get_mea(context, mea_id, fields=['vim_id']) + vim_obj = self.get_vim(context, vim_id['vim_id'], mask_password=False) + if vim_obj is None: + raise meo.VimFromMeaNotFoundException(mea_id=mea_id) + self._build_vim_auth(context, vim_obj) + return vim_obj + + def _build_vim_auth(self, context, vim_info): + LOG.debug('VIM id is %s', vim_info['id']) + vim_auth = vim_info['auth_cred'] + vim_auth['password'] = self._decode_vim_auth(context, + vim_info['id'], + vim_auth) + vim_auth['auth_url'] = vim_info['auth_url'] + + # These attributes are needless for authentication + # from keystone, so we remove them. + needless_attrs = ['key_type', 'secret_uuid'] + for attr in needless_attrs: + if attr in vim_auth: + vim_auth.pop(attr, None) + return vim_auth + + def _decode_vim_auth(self, context, vim_id, auth): + """Decode Vim credentials + + Decrypt VIM cred, get fernet Key from local_file_system or + barbican. + """ + cred = auth['password'].encode('utf-8') + if auth.get('key_type') == 'barbican_key': + keystone_conf = CONF.keystone_authtoken + secret_uuid = auth['secret_uuid'] + keymgr_api = KEYMGR_API(keystone_conf.auth_url) + secret_obj = keymgr_api.get(context, secret_uuid) + vim_key = secret_obj.payload + else: + vim_key = self._find_vim_key(vim_id) + + f = fernet.Fernet(vim_key) + if not f: + LOG.warning('Unable to decode VIM auth') + raise meo.VimNotFoundException( + 'Unable to decode VIM auth key') + return f.decrypt(cred) + + @staticmethod + def _find_vim_key(vim_id): + key_file = os.path.join(CONF.vim_keys.openstack, vim_id) + LOG.debug('Attempting to open key file for vim id %s', vim_id) + with open(key_file, 'r') as f: + return f.read() + LOG.warning('VIM id invalid or key not found for %s', vim_id) + + def _vim_resource_name_to_id(self, context, resource, name, mea_id): + """Converts a VIM resource name to its ID + + :param resource: resource type to find (network, subnet, etc) + :param name: name of the resource to find its ID + :param mea_id: A MEA instance ID that is part of the chain to which + the classifier will apply to + :return: ID of the resource name + """ + vim_obj = self._get_vim_from_mea(context, mea_id) + driver_type = vim_obj['type'] + return self._vim_drivers.invoke(driver_type, + 'get_vim_resource_id', + vim_obj=vim_obj, + resource_type=resource, + resource_name=name) + + @log.log + def create_mesd(self, context, mesd): + mesd_data = mesd['mesd'] + template = mesd_data['attributes'].get('mesd') + if isinstance(template, dict): + mesd_data['attributes']['mesd'] = yaml.safe_dump( + template) + LOG.debug('mesd %s', mesd_data) + + if 'template_source' in mesd_data: + template_source = mesd_data.get('template_source') + else: + template_source = "onboarded" + mesd['mesd']['template_source'] = template_source + + self._parse_template_input(context, mesd) + return super(MeoPlugin, self).create_mesd( + context, mesd) + + def _parse_template_input(self, context, mesd): + mesd_dict = mesd['mesd'] + mesd_yaml = mesd_dict['attributes'].get('mesd') + inner_mesd_dict = yaml.safe_load(mesd_yaml) + mesd['meads'] = dict() + LOG.debug('mesd_dict: %s', inner_mesd_dict) + # From import we can deploy both NS and MEC Application + nsd_imports = inner_mesd_dict['imports'].get('nsds') + vnffg_imports = inner_mesd_dict['imports'].get('vnffgds') + if nsd_imports: + mesd_dict['attributes']['nsds'] = nsd_imports + if vnffg_imports: + mesd_dict['attributes']['vnffgds'] = vnffg_imports + + # Deploy MEC applications + mem_plugin = manager.ApmecManager.get_service_plugins()['MEM'] + mead_imports = inner_mesd_dict['imports']['meads'] + inner_mesd_dict['imports'] = [] + new_files = [] + for mead_name in mead_imports: + mead = mem_plugin.get_mead(context, mead_name) + # Copy MEA types and MEA names + sm_dict = yaml.safe_load(mead['attributes']['mead'])[ + 'topology_template'][ + 'substitution_mappings'] + mesd['meads'][sm_dict['node_type']] = mead['name'] + # Ugly Hack to validate the child templates + # TODO(tbh): add support in tosca-parser to pass child + # templates as dict + fd, temp_path = mkstemp() + with open(temp_path, 'w') as fp: + fp.write(mead['attributes']['mead']) + os.close(fd) + new_files.append(temp_path) + inner_mesd_dict['imports'].append(temp_path) + # Prepend the apmec_defs.yaml import file with the full + # path to the file + toscautils.updateimports(inner_mesd_dict) + + try: + ToscaTemplate(a_file=False, + yaml_dict_tpl=inner_mesd_dict) + except Exception as e: + LOG.exception("tosca-parser error: %s", str(e)) + raise meo.ToscaParserFailed(error_msg_details=str(e)) + finally: + for file_path in new_files: + os.remove(file_path) + inner_mesd_dict['imports'] = mead_imports + + if ('description' not in mesd_dict or + mesd_dict['description'] == ''): + mesd_dict['description'] = inner_mesd_dict.get( + 'description', '') + if (('name' not in mesd_dict or + not len(mesd_dict['name'])) and + 'metadata' in inner_mesd_dict): + mesd_dict['name'] = inner_mesd_dict['metadata'].get( + 'template_name', '') + + LOG.debug('mesd %s', mesd) + + def _get_mead_id(self, mead_name, onboarded_meads): + for mead in onboarded_meads: + if mead_name == mead['name']: + return mead['id'] + + @log.log + def create_mes(self, context, mes): + """Create MES and corresponding MEAs. + + :param mes: mes dict which contains mesd_id and attributes + This method has 3 steps: + step-1: substitute all get_input params to its corresponding values + step-2: Build params dict for substitution mappings case through which + MEAs will actually substitute their requirements. + step-3: Create mistral workflow and execute the workflow + """ + mes_info = mes['mes'] + name = mes_info['name'] + + if mes_info.get('mesd_template'): + mesd_name = utils.generate_resource_name(name, 'inline') + mesd = {'mesd': { + 'attributes': {'mesd': mes_info['mesd_template']}, + 'description': mes_info['description'], + 'name': mesd_name, + 'template_source': 'inline', + 'tenant_id': mes_info['tenant_id']}} + mes_info['mesd_id'] = self.create_mesd(context, mesd).get('id') + + mesd = self.get_mesd(context, mes['mes']['mesd_id']) + mesd_dict = yaml.safe_load(mesd['attributes']['mesd']) + mem_plugin = manager.ApmecManager.get_service_plugins()['MEM'] + onboarded_meads = mem_plugin.get_meads(context, []) + region_name = mes.setdefault('placement_attr', {}).get( + 'region_name', None) + vim_res = self.vim_client.get_vim(context, mes['mes']['vim_id'], + region_name) + driver_type = vim_res['vim_type'] + if not mes['mes']['vim_id']: + mes['mes']['vim_id'] = vim_res['vim_id'] + + nsds = mesd['attributes'].get('nsds') + if nsds: + for nsd in nsds: + vim_obj = self.get_vim(context, mes['mes']['vim_id'], mask_password=False) + self._build_vim_auth(context, vim_obj) + client = tackerclient(vim_obj['auth_cred']) + ns_name = nsd + name + nsd_instance = client.nsd_get(nsd) + ns_arg = {'ns': {'nsd_id': nsd_instance, 'name': ns_name}} + ns_instance = client.ns_create(ns_arg) + + vnffgds = mesd['attributes'].get('vnffgds') + if vnffgds: + for vnffgd in vnffgds: + vim_obj = self.get_vim(context, mes['mes']['vim_id'], mask_password=False) + self._build_vim_auth(context, vim_obj) + client = tackerclient(vim_obj['auth_cred']) + vnffgd_name = vnffgd + name + vnffgd_instance = client.vnffgd_get(vnffgd) + vnffg_arg = {'vnffg': {'vnffgd_id': vnffgd_instance, 'name': vnffgd_name}} + time.sleep(300) + vnffg_instance = client.vnffg_create(vnffg_arg) + + # Step-1 + param_values = mes['mes']['attributes'].get('param_values', {}) + if 'get_input' in str(mesd_dict): + self._process_parameterized_input(mes['mes']['attributes'], + mesd_dict) + # Step-2 + meads = mesd['meads'] + # mead_dict is used while generating workflow + mead_dict = dict() + for node_name, node_val in \ + (mesd_dict['topology_template']['node_templates']).items(): + if node_val.get('type') not in meads.keys(): + continue + mead_name = meads[node_val.get('type')] + if not mead_dict.get(mead_name): + mead_dict[mead_name] = { + 'id': self._get_mead_id(mead_name, onboarded_meads), + 'instances': [node_name] + } + else: + mead_dict[mead_name]['instances'].append(node_name) + if not node_val.get('requirements'): + continue + if not param_values.get(mead_name): + param_values[mead_name] = {} + param_values[mead_name]['substitution_mappings'] = dict() + req_dict = dict() + requirements = node_val.get('requirements') + for requirement in requirements: + req_name = list(requirement.keys())[0] + req_val = list(requirement.values())[0] + res_name = req_val + mes['mes']['mesd_id'][:11] + req_dict[req_name] = res_name + if req_val in mesd_dict['topology_template']['node_templates']: + param_values[mead_name]['substitution_mappings'][ + res_name] = mesd_dict['topology_template'][ + 'node_templates'][req_val] + + param_values[mead_name]['substitution_mappings'][ + 'requirements'] = req_dict + mes['mead_details'] = mead_dict + # Step-3 + kwargs = {'mes': mes, 'params': param_values} + + # NOTE NoTasksException is raised if no tasks. + workflow = self._vim_drivers.invoke( + driver_type, + 'prepare_and_create_workflow', + resource='mea', + action='create', + auth_dict=self.get_auth_dict(context), + kwargs=kwargs) + try: + mistral_execution = self._vim_drivers.invoke( + driver_type, + 'execute_workflow', + workflow=workflow, + auth_dict=self.get_auth_dict(context)) + except Exception as ex: + LOG.error('Error while executing workflow: %s', ex) + self._vim_drivers.invoke(driver_type, + 'delete_workflow', + workflow_id=workflow['id'], + auth_dict=self.get_auth_dict(context)) + raise ex + mes_dict = super(MeoPlugin, self).create_mes(context, mes) + + def _create_mes_wait(self_obj, mes_id, execution_id): + exec_state = "RUNNING" + mistral_retries = MISTRAL_RETRIES + while exec_state == "RUNNING" and mistral_retries > 0: + time.sleep(MISTRAL_RETRY_WAIT) + exec_state = self._vim_drivers.invoke( + driver_type, + 'get_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)).state + LOG.debug('status: %s', exec_state) + if exec_state == 'SUCCESS' or exec_state == 'ERROR': + break + mistral_retries = mistral_retries - 1 + error_reason = None + if mistral_retries == 0 and exec_state == 'RUNNING': + error_reason = _( + "MES creation is not completed within" + " {wait} seconds as creation of mistral" + " execution {mistral} is not completed").format( + wait=MISTRAL_RETRIES * MISTRAL_RETRY_WAIT, + mistral=execution_id) + exec_obj = self._vim_drivers.invoke( + driver_type, + 'get_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)) + self._vim_drivers.invoke(driver_type, + 'delete_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)) + self._vim_drivers.invoke(driver_type, + 'delete_workflow', + workflow_id=workflow['id'], + auth_dict=self.get_auth_dict(context)) + super(MeoPlugin, self).create_mes_post(context, mes_id, exec_obj, + mead_dict, error_reason) + + self.spawn_n(_create_mes_wait, self, mes_dict['id'], + mistral_execution.id) + return mes_dict + + @log.log + def _update_params(self, original, paramvalues): + for key, value in (original).items(): + if not isinstance(value, dict) or 'get_input' not in str(value): + pass + elif isinstance(value, dict): + if 'get_input' in value: + if value['get_input'] in paramvalues: + original[key] = paramvalues[value['get_input']] + else: + LOG.debug('Key missing Value: %s', key) + raise cs.InputValuesMissing(key=key) + else: + self._update_params(value, paramvalues) + + @log.log + def _process_parameterized_input(self, attrs, mesd_dict): + param_vattrs_dict = attrs.pop('param_values', None) + if param_vattrs_dict: + for node in \ + mesd_dict['topology_template']['node_templates'].values(): + if 'get_input' in str(node): + self._update_params(node, param_vattrs_dict['mesd']) + else: + raise cs.ParamYAMLInputMissing() + + @log.log + def delete_mes(self, context, mes_id): + mes = super(MeoPlugin, self).get_mes(context, mes_id) + vim_res = self.vim_client.get_vim(context, mes['vim_id']) + driver_type = vim_res['vim_type'] + workflow = None + try: + workflow = self._vim_drivers.invoke( + driver_type, + 'prepare_and_create_workflow', + resource='mea', + action='delete', + auth_dict=self.get_auth_dict(context), + kwargs={ + 'mes': mes}) + except meo.NoTasksException: + LOG.warning("No MEA deletion task(s).") + if workflow: + try: + mistral_execution = self._vim_drivers.invoke( + driver_type, + 'execute_workflow', + workflow=workflow, + auth_dict=self.get_auth_dict(context)) + + except Exception as ex: + LOG.error('Error while executing workflow: %s', ex) + self._vim_drivers.invoke(driver_type, + 'delete_workflow', + workflow_id=workflow['id'], + auth_dict=self.get_auth_dict(context)) + + raise ex + super(MeoPlugin, self).delete_mes(context, mes_id) + + def _delete_mes_wait(mes_id, execution_id): + exec_state = "RUNNING" + mistral_retries = MISTRAL_RETRIES + while exec_state == "RUNNING" and mistral_retries > 0: + time.sleep(MISTRAL_RETRY_WAIT) + exec_state = self._vim_drivers.invoke( + driver_type, + 'get_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)).state + LOG.debug('status: %s', exec_state) + if exec_state == 'SUCCESS' or exec_state == 'ERROR': + break + mistral_retries -= 1 + error_reason = None + if mistral_retries == 0 and exec_state == 'RUNNING': + error_reason = _( + "MES deletion is not completed within" + " {wait} seconds as deletion of mistral" + " execution {mistral} is not completed").format( + wait=MISTRAL_RETRIES * MISTRAL_RETRY_WAIT, + mistral=execution_id) + exec_obj = self._vim_drivers.invoke( + driver_type, + 'get_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)) + self._vim_drivers.invoke(driver_type, + 'delete_execution', + execution_id=execution_id, + auth_dict=self.get_auth_dict(context)) + self._vim_drivers.invoke(driver_type, + 'delete_workflow', + workflow_id=workflow['id'], + auth_dict=self.get_auth_dict(context)) + super(MeoPlugin, self).delete_mes_post(context, mes_id, exec_obj, + error_reason) + if workflow: + self.spawn_n(_delete_mes_wait, mes['id'], mistral_execution.id) + else: + super(MeoPlugin, self).delete_mes_post( + context, mes_id, None, None) + return mes['id'] diff --git a/apmec/meo/workflows/__init__.py b/apmec/meo/workflows/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/meo/workflows/vim_monitor/__init__.py b/apmec/meo/workflows/vim_monitor/__init__.py new file mode 100644 index 0000000..a383bde --- /dev/null +++ b/apmec/meo/workflows/vim_monitor/__init__.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +RESOURCE_NAME = 'ping_vim' +PING_VIM_TASK_NAME = 'PingVIMTASK' diff --git a/apmec/meo/workflows/vim_monitor/vim_monitor_utils.py b/apmec/meo/workflows/vim_monitor/vim_monitor_utils.py new file mode 100644 index 0000000..a841f1f --- /dev/null +++ b/apmec/meo/workflows/vim_monitor/vim_monitor_utils.py @@ -0,0 +1,89 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from oslo_config import cfg +from oslo_log import log as logging + +from apmec.common import rpc +from apmec.mistral.actionrpc import kill_action as killaction +from apmec.mistral import mistral_client +from apmec.meo.workflows.vim_monitor import workflow_generator +from apmec.mem import keystone + + +LOG = logging.getLogger(__name__) + + +def get_mistral_client(auth_dict): + return mistral_client.MistralClient( + keystone.Keystone().initialize_client('2', **auth_dict), + auth_dict['token']).get_client() + + +def prepare_and_create_workflow(mistral_client, vim_id, action, + kwargs): + wg = workflow_generator.WorkflowGenerator(vim_id, action) + wg.task(**kwargs) + definition_yaml = yaml.safe_dump(wg.definition, default_flow_style=False) + LOG.debug('vim monitor workflow: %s', definition_yaml) + workflow = mistral_client.workflows.create(definition_yaml) + return {'id': workflow[0].id, 'input': wg.get_input_dict()} + + +def execute_workflow(mistral_client, workflow): + return mistral_client.executions.create( + workflow_identifier=workflow['id'], + workflow_input=workflow['input'], + wf_params={}) + + +def delete_executions(mistral_client, vim_id): + executions = mistral_client.executions.list( + workflow_name='vim_id_' + vim_id) + for execution in executions: + mistral_client.executions.delete(execution.id) + + +def delete_workflow(mistral_client, vim_id): + return mistral_client.workflows.delete('vim_id_' + vim_id) + + +def monitor_vim(auth_dict, vim_obj): + mc = get_mistral_client(auth_dict) + auth_url = vim_obj["auth_url"] + vim_ip = auth_url.split("//")[-1].split(":")[0].split("/")[0] + workflow_input_dict = { + 'vim_id': vim_obj['id'], + 'count': cfg.CONF.vim_monitor.count, + 'timeout': cfg.CONF.vim_monitor.timeout, + 'interval': cfg.CONF.vim_monitor.interval, + 'targetip': vim_ip} + workflow = prepare_and_create_workflow( + mc, vim_obj['id'], 'monitor', + workflow_input_dict) + execute_workflow(mc, workflow) + + +def kill_action(context, vim_obj): + target = killaction.MistralActionKillRPC.target + rpc_client = rpc.get_client(target) + cctxt = rpc_client.prepare(server=vim_obj['id']) + cctxt.cast(context, 'killAction') + + +def delete_vim_monitor(context, auth_dict, vim_obj): + mc = get_mistral_client(auth_dict) + delete_executions(mc, vim_obj['id']) + delete_workflow(mc, vim_obj['id']) + kill_action(context, vim_obj) diff --git a/apmec/meo/workflows/vim_monitor/vim_ping_action.py b/apmec/meo/workflows/vim_monitor/vim_ping_action.py new file mode 100644 index 0000000..3c34b1a --- /dev/null +++ b/apmec/meo/workflows/vim_monitor/vim_ping_action.py @@ -0,0 +1,107 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.actions import base +from oslo_config import cfg +from oslo_log import log as logging + +from apmec.agent.linux import utils as linux_utils +from apmec.common import rpc +from apmec.common import topics +from apmec.conductor.conductorrpc import vim_monitor_rpc +from apmec import context as t_context + +LOG = logging.getLogger(__name__) + + +class PingVimAction(base.Action): + + def __init__(self, count, targetip, vim_id, + interval, timeout): + self.killed = False + self.count = count + self.timeout = timeout + self.interval = interval + self.targetip = targetip + self.vim_id = vim_id + self.current_status = "PENDING" + + def start_rpc_listeners(self): + """Start the RPC loop to let the server communicate with actions.""" + self.endpoints = [self] + self.conn = rpc.create_connection() + self.conn.create_consumer(topics.TOPIC_ACTION_KILL, + self.endpoints, fanout=False, + host=self.vim_id) + return self.conn.consume_in_threads() + + def killAction(self, context, **kwargs): + self.killed = True + + def _ping(self): + ping_cmd = ['ping', '-c', self.count, + '-W', self.timeout, + '-i', self.interval, + self.targetip] + + try: + # NOTE(gongysh) since it is called in a loop, the debug log + # should be disabled to avoid eating up mistral executor. + linux_utils.execute(ping_cmd, check_exit_code=True, + debuglog=False) + return 'REACHABLE' + except RuntimeError: + LOG.warning(("Cannot ping ip address: %s"), self.targetip) + return 'UNREACHABLE' + + def _update(self, status): + LOG.info("VIM %s changed to status %s", self.vim_id, status) + target = vim_monitor_rpc.VIMUpdateRPC.target + rpc_client = rpc.get_client(target) + cctxt = rpc_client.prepare() + return cctxt.call(t_context.get_admin_context_without_session(), + 'update_vim', + vim_id=self.vim_id, + status=status) + + def run(self): + servers = [] + try: + rpc.init_action_rpc(cfg.CONF) + servers = self.start_rpc_listeners() + except Exception: + LOG.exception('failed to start rpc in vim action') + return 'FAILED' + try: + while True: + if self.killed: + break + status = self._ping() + if self.current_status != status: + self.current_status = self._update(status) + # TODO(gongysh) If we need to sleep a little time here? + except Exception: + LOG.exception('failed to run mistral action for vim %s', + self.vim_id) + return 'FAILED' + # to stop rpc connection + for server in servers: + try: + server.stop() + except Exception: + LOG.exception( + 'failed to stop rpc connection for vim %s', + self.vim_id) + return 'KILLED' + + def test(self): + return 'REACHABLE' diff --git a/apmec/meo/workflows/vim_monitor/workflow_generator.py b/apmec/meo/workflows/vim_monitor/workflow_generator.py new file mode 100644 index 0000000..4b80772 --- /dev/null +++ b/apmec/meo/workflows/vim_monitor/workflow_generator.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from apmec.mistral import workflow_generator +from apmec.meo.workflows import vim_monitor + +LOG = logging.getLogger(__name__) + + +class WorkflowGenerator(workflow_generator.WorkflowGeneratorBase): + def __init__(self, vim_id, action): + super(WorkflowGenerator, self).__init__( + vim_monitor.RESOURCE_NAME, action) + self.wf_identifier = 'vim_id_' + vim_id + self._build_basic_workflow() + + def _add_ping_vim_tasks(self): + task_dict = dict() + task = self.wf_name + vim_monitor.PING_VIM_TASK_NAME + task_dict[task] = { + 'action': 'apmec.vim_ping_action', + 'input': {'count': self.input_dict_data['count'], + 'targetip': self.input_dict_data['targetip'], + 'vim_id': self.input_dict_data['vim_id'], + 'interval': self.input_dict_data['interval'], + 'timeout': self.input_dict_data['timeout']}, + } + return task_dict + + def get_input_dict(self): + return self.input_dict + + def _build_input(self, vim_id, count, timeout, + interval, targetip): + self.input_dict_data = {'vim_id': vim_id, + 'count': count, + 'timeout': timeout, + 'interval': interval, + 'targetip': targetip} + self.input_dict[self.resource] = self.input_dict_data + + def monitor_ping_vim(self, vim_id=None, count=1, timeout=1, + interval=1, targetip="127.0.0.1"): + self._build_input(vim_id, count, timeout, + interval, targetip) + self.definition[self.wf_identifier]['tasks'] = dict() + self.definition[self.wf_identifier]['tasks'].update( + self._add_ping_vim_tasks()) diff --git a/apmec/mistral/__init__.py b/apmec/mistral/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mistral/actionrpc/__init__.py b/apmec/mistral/actionrpc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/mistral/actionrpc/kill_action.py b/apmec/mistral/actionrpc/kill_action.py new file mode 100644 index 0000000..b66268f --- /dev/null +++ b/apmec/mistral/actionrpc/kill_action.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_messaging + +from apmec.common import topics + + +class MistralActionKillRPC(object): + + target = oslo_messaging.Target( + exchange='apmec', + topic=topics.TOPIC_ACTION_KILL, + fanout=False, + version='1.0') + + def killAction(self, context, **kwargs): + pass diff --git a/apmec/mistral/mistral_client.py b/apmec/mistral/mistral_client.py new file mode 100644 index 0000000..a6a3308 --- /dev/null +++ b/apmec/mistral/mistral_client.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mistralclient.api import client as mistral_client + + +class MistralClient(object): + """Mistral Client class for MESD""" + + def __init__(self, keystone, auth_token): + endpoint = keystone.session.get_endpoint( + service_type='workflowv2', region_name=None) + + self.client = mistral_client.client(auth_token=auth_token, + mistral_url=endpoint) + + def get_client(self): + return self.client diff --git a/apmec/mistral/workflow_generator.py b/apmec/mistral/workflow_generator.py new file mode 100644 index 0000000..d675772 --- /dev/null +++ b/apmec/mistral/workflow_generator.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + + +class WorkflowGeneratorBase(object): + def __init__(self, resource, action): + self.resource = resource + self.action = action + self.wf_name = self.action + '_' + self.resource + self.wf_identifier = 'std.' + self.wf_name + uuidutils.generate_uuid() + self.task = getattr(self, self.wf_name) + self.input_dict = dict() + self._build_basic_workflow() + + def _build_basic_workflow(self): + self.definition = { + 'version': '2.0', + self.wf_identifier: { + 'type': 'direct', + 'input': [self.resource] + } + } + + def get_tasks(self): + return self.definition[self.wf_identifier].get('tasks') diff --git a/apmec/nfv/__init__.py b/apmec/nfv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/nfv/tacker_client.py b/apmec/nfv/tacker_client.py new file mode 100644 index 0000000..dc8854d --- /dev/null +++ b/apmec/nfv/tacker_client.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from keystoneauth1 import identity +from keystoneauth1 import session +from tackerclient.v1_0 import client as tacker_client + + +class TackerClient(object): + """Tacker Client class for VNFM and NFVO negotiation""" + + def __init__(self, auth_attr): + auth = identity.Password(**auth_attr) + sess = session.Session(auth=auth) + self.client = tacker_client.Client(session=sess) + + def nsd_create(self, nsd_dict): + nsd_instance = self.client.create_nsd(body=nsd_dict) + if nsd_instance: + return nsd_instance['nsd']['id'] + else: + return None + + def nsd_get(self, nsd_name): + nsd_dict = self.client.list_nsds() + nsd_list = nsd_dict['nsds'] + nsd_id = None + for nsd in nsd_list: + if nsd['name'] == nsd_name: + nsd_id = nsd['id'] + return nsd_id + + def ns_create(self, ns_dict): + ns_instance = self.client.create_ns(body=ns_dict) + if ns_instance: + return ns_instance['ns']['id'] + else: + return None + + def ns_get(self, ns_name): + ns_dict = self.client.list_nsds() + ns_list = ns_dict['nss'] + ns_id = None + for ns in ns_list: + if ns['name'] == ns_name: + ns_id = ns['id'] + return ns_id + + def ns_delete(self, ns_name): + nsd_id = self.ns_get(ns_name) + if nsd_id: + self.client.delete_ns(nsd_id) + + def vnfd_create(self, vnfd_dict): + vnfd_instance = self.client.create_vnfd(body=vnfd_dict) + if vnfd_instance: + return vnfd_instance['vnf']['id'] + else: + return None + + def vnf_create(self, vnf_dict): + vnf_instance = self.client.create_vnf(body=vnf_dict) + if vnf_instance: + return vnf_instance['vnf']['id'] + else: + return None + + def vnffgd_get(self, vnffgd_name): + vnffgd_dict = self.client.list_vnffgds() + vnffgd_list = vnffgd_dict['vnffgds'] + vnffgd_id = None + for vnffgd in vnffgd_list: + if vnffgd['name'] == vnffgd_name: + vnffgd_id = vnffgd['id'] + return vnffgd_id + + def vnffg_create(self, vnffgd_dict): + vnffg_instance = self.client.create_vnffg(body=vnffgd_dict) + if vnffg_instance: + return vnffg_instance['vnffg']['id'] + else: + return None + + def vnffg_get(self, vnffg_name): + vnffg_dict = self.client.list_vnffgs() + vnffg_list = vnffg_dict['vnffgs'] + vnffg_id = None + for vnffg in vnffg_list: + if vnffg['name'] == vnffg_name: + vnffg_id = vnffg['id'] + return vnffg_id + + def vnffg_delete(self, vnffg_name): + vnffg_id = self.vnffg_get(vnffg_name) + if vnffg_id: + self.client.delete_vnffg(vnffg_id) \ No newline at end of file diff --git a/apmec/plugins/__init__.py b/apmec/plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/plugins/common/__init__.py b/apmec/plugins/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/plugins/common/constants.py b/apmec/plugins/common/constants.py new file mode 100644 index 0000000..dc245d4 --- /dev/null +++ b/apmec/plugins/common/constants.py @@ -0,0 +1,74 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# service type constants: +CORE = "CORE" +DUMMY = "DUMMY" +MEM = "MEM" +MEO = "MEO" +COMMONSERVICES = "COMMONSERVICES" + +COMMON_PREFIXES = { + CORE: "", + DUMMY: "/dummy_svc", + MEM: "", + MEO: "", + COMMONSERVICES: "" +} + +# Service operation status constants +ACTIVE = "ACTIVE" +DOWN = "DOWN" + +PENDING_CREATE = "PENDING_CREATE" +PENDING_UPDATE = "PENDING_UPDATE" +PENDING_DELETE = "PENDING_DELETE" +PENDING_SCALE_IN = "PENDING_SCALE_IN" +PENDING_SCALE_OUT = "PENDING_SCALE_OUT" + +INACTIVE = "INACTIVE" +DEAD = "DEAD" +ERROR = "ERROR" + +ACTIVE_PENDING_STATUSES = ( + ACTIVE, + PENDING_CREATE, + PENDING_UPDATE +) + +POLICY_SCALING = 'tosca.policies.apmec.Scaling' +POLICY_SCALING_ACTIONS = (ACTION_SCALE_OUT, + ACTION_SCALE_IN) = ('out', 'in') +POLICY_ACTIONS = {POLICY_SCALING: POLICY_SCALING_ACTIONS} +POLICY_ALARMING = 'tosca.policies.apmec.Alarming' +DEFAULT_ALARM_ACTIONS = ['respawn', 'log', 'log_and_kill', 'notify'] + +RES_TYPE_MEAD = "mead" +RES_TYPE_MESD = "mesd" +RES_TYPE_mes = "mes" +RES_TYPE_MEA = "mea" +RES_TYPE_VIM = "vim" + +RES_EVT_CREATE = "CREATE" +RES_EVT_DELETE = "DELETE" +RES_EVT_UPDATE = "UPDATE" +RES_EVT_MONITOR = "MONITOR" +RES_EVT_SCALE = "SCALE" +RES_EVT_NA_STATE = "Not Applicable" +RES_EVT_ONBOARDED = "OnBoarded" + +RES_EVT_CREATED_FLD = "created_at" +RES_EVT_DELETED_FLD = "deleted_at" +RES_EVT_UPDATED_FLD = "updated_at" diff --git a/apmec/plugins/common/utils.py b/apmec/plugins/common/utils.py new file mode 100644 index 0000000..8fd020c --- /dev/null +++ b/apmec/plugins/common/utils.py @@ -0,0 +1,67 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common utilities and helper functions for Openstack Networking Plugins. +""" + +from apmec.common import exceptions as n_exc +from apmec.common import utils +from apmec.plugins.common import constants + + +def verify_vlan_range(vlan_range): + """Raise an exception for invalid tags or malformed range.""" + for vlan_tag in vlan_range: + if not utils.is_valid_vlan_tag(vlan_tag): + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("%s is not a valid VLAN tag") % vlan_tag) + if vlan_range[1] < vlan_range[0]: + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("End of VLAN range is less than start of VLAN range")) + + +def parse_network_vlan_range(network_vlan_range): + """Interpret a string as network[:vlan_begin:vlan_end].""" + entry = network_vlan_range.strip() + if ':' in entry: + try: + network, vlan_min, vlan_max = entry.split(':') + vlan_range = (int(vlan_min), int(vlan_max)) + except ValueError as ex: + raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex) + verify_vlan_range(vlan_range) + return network, vlan_range + else: + return entry, None + + +def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries): + """Interpret a list of strings as network[:vlan_begin:vlan_end] entries.""" + networks = {} + for entry in network_vlan_ranges_cfg_entries: + network, vlan_range = parse_network_vlan_range(entry) + if vlan_range: + networks.setdefault(network, []).append(vlan_range) + else: + networks.setdefault(network, []) + return networks + + +def in_pending_status(status): + return status in (constants.PENDING_CREATE, + constants.PENDING_UPDATE, + constants.PENDING_DELETE) diff --git a/apmec/plugins/common_services/__init__.py b/apmec/plugins/common_services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/plugins/common_services/common_services_plugin.py b/apmec/plugins/common_services/common_services_plugin.py new file mode 100644 index 0000000..bad72ec --- /dev/null +++ b/apmec/plugins/common_services/common_services_plugin.py @@ -0,0 +1,44 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec.common import log +from apmec.db.common_services import common_services_db_plugin + + +class CommonServicesPlugin(common_services_db_plugin.CommonServicesPluginDb): + """Reference plugin for COMMONSERVICES extension + + Implements the COMMONSERVICES extension and defines public facing APIs for + common utility operations. + """ + + supported_extension_aliases = ['CommonServices'] + + def __init__(self): + super(CommonServicesPlugin, self).__init__() + + @log.log + def get_event(self, context, event_id, fields=None): + return super(CommonServicesPlugin, self).get_event(context, event_id, + fields) + + @log.log + def get_events(self, context, filters=None, fields=None, sorts=None, + limit=None, marker_obj=None, page_reverse=False): + return super(CommonServicesPlugin, self).get_events(context, filters, + fields, sorts, limit, + marker_obj, + page_reverse) diff --git a/apmec/policy.py b/apmec/policy.py new file mode 100644 index 0000000..2244789 --- /dev/null +++ b/apmec/policy.py @@ -0,0 +1,413 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import re + + +from oslo_config import cfg +from oslo_db import exception as db_exc +from oslo_log import log as logging +from oslo_policy import policy +from oslo_utils import excutils +from oslo_utils import importutils +import six + +from apmec._i18n import _ +from apmec.api.v1 import attributes +from apmec.common import exceptions + + +LOG = logging.getLogger(__name__) + +_ENFORCER = None +ADMIN_CTX_POLICY = 'context_is_admin' + + +def reset(): + global _ENFORCER + if _ENFORCER: + _ENFORCER.clear() + _ENFORCER = None + + +def init(conf=cfg.CONF, policy_file=None): + """Init an instance of the Enforcer class.""" + + global _ENFORCER + if not _ENFORCER: + _ENFORCER = policy.Enforcer(conf, policy_file=policy_file) + _ENFORCER.load_rules(True) + + +def refresh(policy_file=None): + """Reset policy and init a new instance of Enforcer.""" + reset() + init(policy_file=policy_file) + + +def get_resource_and_action(action, pluralized=None): + """Return resource and enforce_attr_based_check(boolean). + + It is per resource and action extracted from api operation. + """ + + data = action.split(':', 1)[0].split('_', 1) + resource = pluralized or ("%ss" % data[-1]) + enforce_attr_based_check = data[0] not in ('get', 'delete') + return (resource, enforce_attr_based_check) + + +def set_rules(policies, overwrite=True): + """Set rules based on the provided dict of rules. + + :param policies: New policies to use. It should be an instance of dict. + :param overwrite: Whether to overwrite current rules or update them + with the new rules. + """ + + LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path) + init() + _ENFORCER.set_rules(policies, overwrite) + + +def _is_attribute_explicitly_set(attribute_name, resource, target, action): + """Verify that an attribute is present and is explicitly set.""" + if 'update' in action: + # In the case of update, the function should not pay attention to a + # default value of an attribute, but check whether it was explicitly + # marked as being updated instead. + return (attribute_name in target[attributes.ATTRIBUTES_TO_UPDATE] and + target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED) + return ('default' in resource[attribute_name] and + attribute_name in target and + target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and + target[attribute_name] != resource[attribute_name]['default']) + + +def _should_validate_sub_attributes(attribute, sub_attr): + """Verify that sub-attributes are iterable and should be validated.""" + validate = attribute.get('validate') + return (validate and isinstance(sub_attr, collections.Iterable) and + any([k.startswith('type:dict') and + v for (k, v) in validate.items()])) + + +def _build_subattr_match_rule(attr_name, attr, action, target): + """Create the rule to match for sub-attribute policy checks.""" + # TODO(salv-orlando): Instead of relying on validator info, introduce + # typing for API attributes + # Expect a dict as type descriptor + validate = attr['validate'] + key = list(filter(lambda k: k.startswith('type:dict'), validate.keys())) + if not key: + LOG.warning("Unable to find data type descriptor for attribute %s", + attr_name) + return + data = validate[key[0]] + if not isinstance(data, dict): + LOG.debug("Attribute type descriptor is not a dict. Unable to " + "generate any sub-attr policy rule for %s.", + attr_name) + return + sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' % + (action, attr_name, + sub_attr_name)) for + sub_attr_name in data if sub_attr_name in + target[attr_name]] + return policy.AndCheck(sub_attr_rules) + + +def _process_rules_list(rules, match_rule): + """Recursively walk a policy rule to extract a list of match entries.""" + if isinstance(match_rule, policy.RuleCheck): + rules.append(match_rule.match) + elif isinstance(match_rule, policy.AndCheck): + for rule in match_rule.rules: + _process_rules_list(rules, rule) + return rules + + +def _build_match_rule(action, target, pluralized): + """Create the rule to match for a given action. + + The policy rule to be matched is built in the following way: + 1) add entries for matching permission on objects + 2) add an entry for the specific action (e.g.: create_network) + 3) add an entry for attributes of a resource for which the action + is being executed (e.g.: create_network:shared) + 4) add an entry for sub-attributes of a resource for which the + action is being executed + (e.g.: create_router:external_gateway_info:network_id) + """ + match_rule = policy.RuleCheck('rule', action) + resource, enforce_attr_based_check = get_resource_and_action( + action, pluralized) + if enforce_attr_based_check: + # assigning to variable with short name for improving readability + res_map = attributes.RESOURCE_ATTRIBUTE_MAP + if resource in res_map: + for attribute_name in res_map[resource]: + if _is_attribute_explicitly_set(attribute_name, + res_map[resource], + target, action): + attribute = res_map[resource][attribute_name] + if 'enforce_policy' in attribute: + attr_rule = policy.RuleCheck('rule', '%s:%s' % + (action, attribute_name)) + # Build match entries for sub-attributes + if _should_validate_sub_attributes( + attribute, target[attribute_name]): + attr_rule = policy.AndCheck( + [attr_rule, _build_subattr_match_rule( + attribute_name, attribute, + action, target)]) + match_rule = policy.AndCheck([match_rule, attr_rule]) + return match_rule + + +# This check is registered as 'tenant_id' so that it can override +# GenericCheck which was used for validating parent resource ownership. +# This will prevent us from having to handling backward compatibility +# for policy.json +# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks +@policy.register('tenant_id') +class OwnerCheck(policy.Check): + """Resource ownership check. + + This check verifies the owner of the current resource, or of another + resource referenced by the one under analysis. + In the former case it falls back to a regular GenericCheck, whereas + in the latter case it leverages the plugin to load the referenced + resource and perform the check. + """ + def __init__(self, kind, match): + # Process the match + try: + self.target_field = re.findall(r'^\%\((.*)\)s$', + match)[0] + except IndexError: + err_reason = (_("Unable to identify a target field from:%s. " + "Match should be in the form %%()s") % + match) + LOG.exception(err_reason) + raise exceptions.PolicyInitError( + policy="%s:%s" % (kind, match), + reason=err_reason) + super(OwnerCheck, self).__init__(kind, match) + + def __call__(self, target, creds, enforcer): + if self.target_field not in target: + # policy needs a plugin check + # target field is in the form resource:field + # however if they're not separated by a colon, use an underscore + # as a separator for backward compatibility + + def do_split(separator): + parent_res, parent_field = self.target_field.split( + separator, 1) + return parent_res, parent_field + + for separator in (':', '_'): + try: + parent_res, parent_field = do_split(separator) + break + except ValueError: + LOG.debug("Unable to find ':' as separator in %s.", + self.target_field) + else: + # If we are here split failed with both separators + err_reason = ("Unable to find resource name in %s" % + self.target_field) + LOG.error(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get( + "%ss" % parent_res, None) + if not parent_foreign_key: + err_reason = ("Unable to verify match:%(match)s as the " + "parent resource: %(res)s was not found" % + {'match': self.match, 'res': parent_res}) + LOG.error(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + # NOTE(salv-orlando): This check currently assumes the parent + # resource is handled by the core plugin. It might be worth + # having a way to map resources to plugins so to make this + # check more general + # NOTE(ihrachys): if import is put in global, circular + # import failure occurs + manager = importutils.import_module('apmec.manager') + f = getattr(manager.ApmecManager.get_instance().plugin, + 'get_%s' % parent_res) + # f *must* exist, if not found it is better to let apmec + # explode. Check will be performed with admin context + context = importutils.import_module('apmec.context') + try: + data = f(context.get_admin_context(), + target[parent_foreign_key], + fields=[parent_field]) + target[self.target_field] = data[parent_field] + except exceptions.NotFound as e: + # NOTE(kevinbenton): a NotFound exception can occur if a + # list operation is happening at the same time as one of + # the parents and its children being deleted. So we issue + # a RetryRequest so the API will redo the lookup and the + # problem items will be gone. + raise db_exc.RetryRequest(e) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception('Policy check error while calling %s!', f) + match = self.match % target + if self.kind in creds: + return match == six.text_type(creds[self.kind]) + return False + + +@policy.register('field') +class FieldCheck(policy.Check): + def __init__(self, kind, match): + # Process the match + resource, field_value = match.split(':', 1) + field, value = field_value.split('=', 1) + + super(FieldCheck, self).__init__(kind, '%s:%s:%s' % + (resource, field, value)) + + # Value might need conversion - we need help from the attribute map + try: + attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field] + conv_func = attr['convert_to'] + except KeyError: + conv_func = lambda x: x + + self.field = field + self.value = conv_func(value) + self.regex = re.compile(value[1:]) if value.startswith('~') else None + + def __call__(self, target_dict, cred_dict, enforcer): + target_value = target_dict.get(self.field) + # target_value might be a boolean, explicitly compare with None + if target_value is None: + LOG.debug("Unable to find requested field: %(field)s in target: " + "%(target_dict)s", + {'field': self.field, 'target_dict': target_dict}) + return False + if self.regex: + return bool(self.regex.match(target_value)) + return target_value == self.value + + +def _prepare_check(context, action, target, pluralized): + """Prepare rule, target, and credentials for the policy engine.""" + # Compare with None to distinguish case in which target is {} + if target is None: + target = {} + match_rule = _build_match_rule(action, target, pluralized) + credentials = context.to_dict() + return match_rule, target, credentials + + +def log_rule_list(match_rule): + if LOG.isEnabledFor(logging.DEBUG): + rules = _process_rules_list([], match_rule) + LOG.debug("Enforcing rules: %s", rules) + + +def check(context, action, target, plugin=None, might_not_exist=False, + pluralized=None): + """Verifies that the action is valid on the target in this context. + + :param context: apmec context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + :param might_not_exist: If True the policy check is skipped (and the + function returns True) if the specified policy does not exist. + Defaults to false. + :param pluralized: pluralized case of resource + e.g. firewall_policy -> pluralized = "firewall_policies" + + :return: Returns True if access is permitted else False. + """ + # If we already know the context has admin rights do not perform an + # additional check and authorize the operation + if context.is_admin: + return True + if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules): + return True + match_rule, target, credentials = _prepare_check(context, + action, + target, + pluralized) + result = _ENFORCER.enforce(match_rule, + target, + credentials, + pluralized=pluralized) + # logging applied rules in case of failure + if not result: + log_rule_list(match_rule) + return result + + +def enforce(context, action, target, plugin=None, pluralized=None): + """Verifies that the action is valid on the target in this context. + + :param context: apmec context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + :param pluralized: pluralized case of resource + e.g. firewall_policy -> pluralized = "firewall_policies" + + :raises oslo_policy.policy.PolicyNotAuthorized: + if verification fails. + """ + # If we already know the context has admin rights do not perform an + # additional check and authorize the operation + if context.is_admin: + return True + rule, target, credentials = _prepare_check(context, + action, + target, + pluralized) + try: + result = _ENFORCER.enforce(rule, target, credentials, action=action, + do_raise=True) + except policy.PolicyNotAuthorized: + with excutils.save_and_reraise_exception(): + log_rule_list(rule) + LOG.debug("Failed policy check for '%s'", action) + return result + + +def check_is_admin(context): + """Verify context has admin rights according to policy settings.""" + init() + # the target is user-self + credentials = context.to_dict() + if ADMIN_CTX_POLICY not in _ENFORCER.rules: + return False + return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials) diff --git a/apmec/releasenotes/notes/remove-passing-infra-and-mgmt-driver-in-api-954fe28b1294a2d6.yaml b/apmec/releasenotes/notes/remove-passing-infra-and-mgmt-driver-in-api-954fe28b1294a2d6.yaml new file mode 100644 index 0000000..4e7bb5e --- /dev/null +++ b/apmec/releasenotes/notes/remove-passing-infra-and-mgmt-driver-in-api-954fe28b1294a2d6.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Removed passing infa and mgmt driver in API. diff --git a/apmec/service.py b/apmec/service.py new file mode 100644 index 0000000..b3f9512 --- /dev/null +++ b/apmec/service.py @@ -0,0 +1,248 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import os +import random + +import logging as std_logging + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_service import service +from oslo_utils import excutils +from oslo_utils import importutils + +from apmec.common import config +from apmec.common import rpc as n_rpc +from apmec import context +from apmec import wsgi + + +service_opts = [ + cfg.IntOpt('report_interval', + default=10, + help=_('Seconds between running components report states')), + cfg.IntOpt('periodic_interval', + default=40, + help=_('Seconds between running periodic tasks')), + cfg.IntOpt('api_workers', + default=0, + help=_('Number of separate worker processes for service')), + cfg.IntOpt('periodic_fuzzy_delay', + default=5, + help=_('Range of seconds to randomly delay when starting the ' + 'periodic task scheduler to reduce stampeding. ' + '(Disable by setting to 0)')), +] +CONF = cfg.CONF +CONF.register_opts(service_opts) + + +def config_opts(): + return [(None, service_opts)] + + +LOG = logging.getLogger(__name__) + + +class WsgiService(service.ServiceBase): + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :_listen: The address on which to listen + :_listen_port: The port on which to listen + + """ + + def __init__(self, app_name): + self.app_name = app_name + self.wsgi_app = None + + def start(self): + self.wsgi_app = _run_wsgi(self.app_name) + + def wait(self): + if self.wsgi_app: + self.wsgi_app.wait() + + def stop(self): + pass + + def reset(self): + pass + + +class ApmecApiService(WsgiService): + """Class for apmec-api service.""" + + @classmethod + def create(cls, app_name='apmec'): + + # Setup logging early + config.setup_logging(cfg.CONF) + # Dump the initial option values + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + service = cls(app_name) + return service + + +def serve_wsgi(cls): + + try: + service = cls.create() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception('Unrecoverable error: please check log ' + 'for details.') + + return service + + +def _run_wsgi(app_name): + app = config.load_paste_app(app_name) + if not app: + LOG.error('No known API applications configured.') + return + server = wsgi.Server("Apmec") + server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, + workers=cfg.CONF.api_workers) + # Dump all option values here after all options are parsed + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + LOG.info("Apmec service started, listening on %(host)s:%(port)s", + {'host': cfg.CONF.bind_host, + 'port': cfg.CONF.bind_port}) + return server + + +class Service(n_rpc.Service): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager. + """ + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + *args, **kwargs): + + self.binary = binary + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=host, *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + super(Service, self).__init__(host, topic, manager=self.manager) + + def start(self): + self.manager.init_host() + super(Service, self).start() + if self.report_interval: + pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = loopingcall.FixedIntervalLoopingCall( + self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + self.manager.after_start() + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None): + """Instantiates class and passes back application object. + + :param host: defaults to cfg.CONF.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'apmec-' part + :param manager: defaults to cfg.CONF._manager + :param report_interval: defaults to cfg.CONF.report_interval + :param periodic_interval: defaults to cfg.CONF.periodic_interval + :param periodic_fuzzy_delay: defaults to cfg.CONF.periodic_fuzzy_delay + + """ + if not host: + host = cfg.CONF.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary.rpartition('neutron-')[2] + topic = topic.replace("-", "_") + if not manager: + manager = cfg.CONF.get('%s_manager' % topic, None) + if report_interval is None: + report_interval = cfg.CONF.report_interval + if periodic_interval is None: + periodic_interval = cfg.CONF.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = cfg.CONF.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay) + + return service_obj + + def kill(self): + """Destroy the service object.""" + self.stop() + + def stop(self): + super(Service, self).stop() + for x in self.timers: + try: + x.stop() + except Exception: + LOG.exception("Exception occurs when timer stops") + self.timers = [] + + def wait(self): + super(Service, self).wait() + for x in self.timers: + try: + x.wait() + except Exception: + LOG.exception("Exception occurs when waiting for timer") + + def reset(self): + config.reset_service() + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service.""" + # Todo(gongysh) report state to neutron server + pass diff --git a/apmec/services/__init__.py b/apmec/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/services/service_base.py b/apmec/services/service_base.py new file mode 100644 index 0000000..53237be --- /dev/null +++ b/apmec/services/service_base.py @@ -0,0 +1,48 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from apmec.api import extensions + + +@six.add_metaclass(abc.ABCMeta) +class MECPluginBase(extensions.PluginInterface): + """Define base interface for any Advanced Service plugin.""" + supported_extension_aliases = [] + + @abc.abstractmethod + def get_plugin_type(self): + """Return one of predefined service types. + + See apmec/plugins/common/constants.py + """ + pass + + @abc.abstractmethod + def get_plugin_name(self): + """Return a symbolic name for the plugin. + + Each service plugin should have a symbolic name. This name + will be used, for instance, by service definitions in service types + """ + pass + + @abc.abstractmethod + def get_plugin_description(self): + """Return string description of the plugin.""" + pass diff --git a/apmec/tests/__init__.py b/apmec/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/base.py b/apmec/tests/base.py new file mode 100644 index 0000000..38e5942 --- /dev/null +++ b/apmec/tests/base.py @@ -0,0 +1,205 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base Test Case for all Unit Tests""" + +import contextlib +import gc +import logging +import os +import os.path +import sys +import weakref + +import eventlet.timeout +import fixtures +import mock +from oslo_config import cfg +from oslo_messaging import conffixture as messaging_conffixture +import testtools + +from apmec.common import config +from apmec.common import rpc as n_rpc +from apmec import manager +from apmec.tests import fake_notifier +from apmec.tests import post_mortem_debug + + +CONF = cfg.CONF +CONF.import_opt('state_path', 'apmec.common.config') +TRUE_STRING = ['True', '1'] +LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" + +ROOTDIR = os.path.dirname(__file__) +ETCDIR = os.path.join(ROOTDIR, 'etc') + + +def etcdir(*p): + return os.path.join(ETCDIR, *p) + + +def fake_use_fatal_exceptions(*args): + return True + + +def fake_consume_in_threads(self): + return [] + + +class BaseTestCase(testtools.TestCase): + + def cleanup_core_plugin(self): + """Ensure that the core plugin is deallocated.""" + nm = manager.ApmecManager + if not nm.has_instance(): + return + + # Perform a check for deallocation only if explicitly + # configured to do so since calling gc.collect() after every + # test increases test suite execution time by ~50%. + check_plugin_deallocation = ( + os.environ.get('OS_CHECK_PLUGIN_DEALLOCATION') in TRUE_STRING) + if check_plugin_deallocation: + plugin = weakref.ref(nm._instance.plugin) + + nm.clear_instance() + + if check_plugin_deallocation: + gc.collect() + + # TODO(marun) Ensure that mocks are deallocated? + if plugin() and not isinstance(plugin(), mock.Base): + self.fail('The plugin for this test was not deallocated.') + + def setup_coreplugin(self, core_plugin=None): + if core_plugin is not None: + cfg.CONF.set_override('core_plugin', core_plugin) + + def setup_notification_driver(self, notification_driver=None): + self.addCleanup(fake_notifier.reset) + if notification_driver is None: + notification_driver = [fake_notifier.__name__] + cfg.CONF.set_override("notification_driver", notification_driver) + + @staticmethod + def config_parse(conf=None, args=None): + """Create the default configurations.""" + # apmec.conf.test includes rpc_backend which needs to be cleaned up + if args is None: + args = ['--config-file', etcdir('apmec.conf.test')] + if conf is None: + config.init(args=args) + else: + conf(args) + + def setUp(self): + super(BaseTestCase, self).setUp() + + # Ensure plugin cleanup is triggered last so that + # test-specific cleanup has a chance to release references. + self.addCleanup(self.cleanup_core_plugin) + + # Configure this first to ensure pm debugging support for setUp() + if os.environ.get('OS_POST_MORTEM_DEBUG') in TRUE_STRING: + self.addOnException(post_mortem_debug.exception_handler) + + if os.environ.get('OS_DEBUG') in TRUE_STRING: + _level = logging.DEBUG + else: + _level = logging.INFO + capture_logs = os.environ.get('OS_LOG_CAPTURE') in TRUE_STRING + if not capture_logs: + logging.basicConfig(format=LOG_FORMAT, level=_level) + self.log_fixture = self.useFixture( + fixtures.FakeLogger( + format=LOG_FORMAT, + level=_level, + nuke_handlers=capture_logs, + )) + + # suppress all but errors here + self.useFixture( + fixtures.FakeLogger( + name='apmec.api.extensions', + format=LOG_FORMAT, + level=logging.ERROR, + nuke_handlers=capture_logs, + )) + + test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0)) + if test_timeout == -1: + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + # If someone does use tempfile directly, ensure that it's cleaned up + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + self.temp_dir = self.useFixture(fixtures.TempDir()).path + cfg.CONF.set_override('state_path', self.temp_dir) + + self.addCleanup(mock.patch.stopall) + self.addCleanup(CONF.reset) + + if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + self.useFixture(fixtures.MonkeyPatch( + 'apmec.common.exceptions.ApmecException.use_fatal_exceptions', + fake_use_fatal_exceptions)) + + self.useFixture(fixtures.MonkeyPatch( + 'oslo_messaging.Notifier', fake_notifier.FakeNotifier)) + + self.messaging_conf = messaging_conffixture.ConfFixture(CONF) + self.messaging_conf.transport_driver = 'fake' + self.messaging_conf.response_timeout = 15 + self.useFixture(self.messaging_conf) + + self.addCleanup(n_rpc.clear_extra_exmods) + n_rpc.add_extra_exmods('apmec.test') + + self.addCleanup(n_rpc.cleanup) + n_rpc.init(CONF) + + if sys.version_info < (2, 7) and getattr(self, 'fmt', '') == 'xml': + raise self.skipException('XML Testing Skipped in Py26') + + def config(self, **kw): + """Override some configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a group argument is supplied, the overrides are applied to + the specified configuration option group. + + All overrides are automatically cleared at the end of the current + test by the fixtures cleanup process. + """ + group = kw.pop('group', None) + for k, v in (kw).items(): + CONF.set_override(k, v, group) + + @contextlib.contextmanager + def assert_max_execution_time(self, max_execution_time=5): + with eventlet.timeout.Timeout(max_execution_time, False): + yield + return + self.fail('Execution of this test timed out') diff --git a/apmec/tests/constants.py b/apmec/tests/constants.py new file mode 100644 index 0000000..7e374ad --- /dev/null +++ b/apmec/tests/constants.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +POLICY_ALARMING = 'tosca.policies.apmec.Alarming' +DEFAULT_ALARM_ACTIONS = ['respawn', 'log', 'log_and_kill', 'notify'] +MEA_CIRROS_CREATE_TIMEOUT = 300 +MEAC_CREATE_TIMEOUT = 600 +MEA_CIRROS_DELETE_TIMEOUT = 300 +MEA_CIRROS_DEAD_TIMEOUT = 500 +ACTIVE_SLEEP_TIME = 3 +DEAD_SLEEP_TIME = 1 +SCALE_WINDOW_SLEEP_TIME = 120 +NS_CREATE_TIMEOUT = 400 +NS_DELETE_TIMEOUT = 300 +NOVA_CLIENT_VERSION = 2 diff --git a/apmec/tests/contrib/README b/apmec/tests/contrib/README new file mode 100644 index 0000000..cc221c5 --- /dev/null +++ b/apmec/tests/contrib/README @@ -0,0 +1,3 @@ +The files in this directory are intended for use by the +infra jobs that run the various functional test suite +in the gate for the apmec repo. diff --git a/apmec/tests/contrib/post_test_hook.sh b/apmec/tests/contrib/post_test_hook.sh new file mode 100755 index 0000000..85bc551 --- /dev/null +++ b/apmec/tests/contrib/post_test_hook.sh @@ -0,0 +1,84 @@ +#!/bin/bash -x +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is executed inside post_test_hook function in devstack gate. + +set -xe + +APMEC_DIR="$BASE/new/apmec" +DEVSTACK_DIR="$BASE/new/devstack" +SCRIPTS_DIR="/usr/os-testr-env/bin/" + +venv=${1:-"dsvm-functional"} + +function generate_test_logs { + local path="$1" + # Compress all $path/*.txt files and move the directories holding those + # files to /opt/stack/logs. Files with .log suffix have their + # suffix changed to .txt (so browsers will know to open the compressed + # files and not download them). + if [ -d "$path" ] + then + sudo find $path -iname "*.log" -type f -exec mv {} {}.txt \; -exec gzip -9 {}.txt \; + sudo mv $path/* /opt/stack/logs/ + fi +} + +function generate_testr_results { + # Give job user rights to access tox logs + sudo -H -u $owner chmod o+rw . + sudo -H -u $owner chmod o+rw -R .stestr + if [ -f ".stestr/0" ] ; then + .tox/$venv/bin/subunit-1to2 < .stestr/0 > ./stestr.subunit + $SCRIPTS_DIR/subunit2html ./stestr.subunit testr_results.html + gzip -9 ./stestr.subunit + gzip -9 ./testr_results.html + sudo mv ./*.gz /opt/stack/logs/ + fi + + if [[ "$venv" == dsvm-functional* ]] + then + generate_test_logs $log_dir + fi +} + +. ${APMEC_DIR}/apmec/tests/contrib/post_test_hook_lib.sh + +if [[ "$venv" == dsvm-functional* ]] +then + owner=stack + sudo_env= + log_dir="/tmp/${venv}-logs" + . $DEVSTACK_DIR/openrc admin admin + fixup_quota + add_key + add_secgrp +fi + +# Set owner permissions according to job's requirements. +cd $APMEC_DIR +sudo chown -R $owner:stack $APMEC_DIR + +# Run tests +echo "Running apmec $venv test suite" +set +e + +sudo -H -u $owner $sudo_env tox -e $venv +testr_exit_code=$? +set -e + +# Collect and parse results +generate_testr_results +exit $testr_exit_code + diff --git a/apmec/tests/contrib/post_test_hook_lib.sh b/apmec/tests/contrib/post_test_hook_lib.sh new file mode 100644 index 0000000..d33c803 --- /dev/null +++ b/apmec/tests/contrib/post_test_hook_lib.sh @@ -0,0 +1,73 @@ +#!/bin/bash -x +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +PRIVATE_KEY_FILE=${PRIVATE_KEY_FILE:-"keypair.priv"} + +function fixup_quota { + echo "Disable nova compute instance & core quota" + openstack quota set --class --instances -1 --cores -1 --ram -1 default + projectId=$(openstack project list | awk '/\ mec\ / {print $2}') + echo "Disable neutron port quota on project 'mec' $projectId" + openstack quota set --ports -1 $projectId +} + +function add_key_if_not_exist { + echo "Adding nova key if not exist" + openstack keypair show userKey >/dev/null + if [[ "$?" != "0" ]]; then + add_key + else + echo "Keypair userKey already exists" + fi +} + +function add_key { + echo "Adding nova key" + userId=$(openstack user list | awk '/\ mec_user\ / {print $2}') + # TODO: openstack cli does not support to add key to a specific user + nova keypair-add userKey --user $userId > ${PRIVATE_KEY_FILE} + echo "Keypair userKey is added" +} + +# Adding nova security groups (#1591372). +function _create_secgrps { + openstack security group create --project mec --description "apmec functest security group" test_secgrp + openstack security group rule create --project mec --ingress --protocol icmp test_secgrp + openstack security group rule create --project mec --ingress --protocol tcp --dst-port 22 test_secgrp +} + +function _check_secgrps { + openstack security group show test_secgrp + if [[ "$?" != "0" ]]; then + echo "Warning: security group is not created correctly" + fi +} + +function add_secgrp_if_not_exist { + echo "Adding nova security group" + openstack security group show test_secgrp + if [[ "$?" != "0" ]]; then + add_secgrp + else + echo "Nova security group already exists" + fi +} + +# Adding nova security groups (#1591372). +function add_secgrp { + echo "Adding nova security group" + _create_secgrps + _check_secgrps + echo "nova security group is added" +} diff --git a/apmec/tests/etc/api-paste.ini.test b/apmec/tests/etc/api-paste.ini.test new file mode 100644 index 0000000..7d815f8 --- /dev/null +++ b/apmec/tests/etc/api-paste.ini.test @@ -0,0 +1,8 @@ +[pipeline:extensions_app_with_filter] +pipeline = extensions extensions_test_app + +[filter:extensions] +paste.filter_factory = apmec.common.extensions:plugin_aware_extension_middleware_factory + +[app:extensions_test_app] +paste.app_factory = apmec.tests.unit.test_extensions:app_factory diff --git a/apmec/tests/etc/apmec.conf.test b/apmec/tests/etc/apmec.conf.test new file mode 100644 index 0000000..c2572a3 --- /dev/null +++ b/apmec/tests/etc/apmec.conf.test @@ -0,0 +1,24 @@ +[DEFAULT] +# Show debugging output in logs (sets DEBUG log level output) +debug = False + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9896 + +# Path to the extensions +api_extensions_path = unit/extensions + +# Paste configuration file +api_paste_config = api-paste.ini.test + +# The messaging module to use, defaults to kombu. +rpc_backend = apmec.openstack.common.rpc.impl_fake + +lock_path = $state_path/lock + +[database] +connection = 'sqlite://' + diff --git a/apmec/tests/etc/rootwrap.d/apmec.test.filters b/apmec/tests/etc/rootwrap.d/apmec.test.filters new file mode 100644 index 0000000..73c614b --- /dev/null +++ b/apmec/tests/etc/rootwrap.d/apmec.test.filters @@ -0,0 +1,12 @@ +# apmec-rootwrap command filters for the unit test + +# this file goes with apmec/tests/unit/_test_rootwrap_exec.py. +# See the comments there about how to run that unit tests + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# a test filter for the RootwrapTest unit test +bash: CommandFilter, /usr/bin/bash, root diff --git a/apmec/tests/etc/samples/install_vnfc.sh b/apmec/tests/etc/samples/install_vnfc.sh new file mode 100644 index 0000000..1842a8b --- /dev/null +++ b/apmec/tests/etc/samples/install_vnfc.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo "Successfully installed MEAC" > /apmec diff --git a/apmec/tests/etc/samples/local-vim.yaml b/apmec/tests/etc/samples/local-vim.yaml new file mode 100644 index 0000000..def6de7 --- /dev/null +++ b/apmec/tests/etc/samples/local-vim.yaml @@ -0,0 +1,7 @@ +auth_url: http://127.0.0.1:5000 +username: mec_user +password: devstack +project_name: mec +domain_name: Default +user_domain_name: Default +project_domain_name: Default diff --git a/apmec/tests/etc/samples/sample-tosca-alarm-respawn.yaml b/apmec/tests/etc/samples/sample-tosca-alarm-respawn.yaml new file mode 100644 index 0000000..7090eb1 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-alarm-respawn.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: [respawn] diff --git a/apmec/tests/etc/samples/sample-tosca-alarm-scale.yaml b/apmec/tests/etc/samples/sample-tosca-alarm-scale.yaml new file mode 100644 index 0000000..81c3d91 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-alarm-scale.yaml @@ -0,0 +1,82 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1] + properties: + increment: 1 + cooldown: 60 + min_instances: 1 + max_instances: 3 + default_instances: 2 + + - vdu_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_scaling_out: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: SG1 + action: [SP1] + + vdu_hcpu_usage_scaling_in: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 10 + constraint: utilization less_than 10% + period: 600 + evaluations: 1 + method: average + comparison_operator: lt + metadata: SG1 + action: [SP1] diff --git a/apmec/tests/etc/samples/sample-tosca-mea-values.yaml b/apmec/tests/etc/samples/sample-tosca-mea-values.yaml new file mode 100644 index 0000000..dd73de4 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mea-values.yaml @@ -0,0 +1,10 @@ +{ + image_name: 'cirros-0.3.5-x86_64-disk', + flavor: 'm1.tiny', + zone: 'nova', + network: 'net_mgmt', + management: 'true', + pkt_in_network: 'net0', + pkt_out_network: 'net1', + vendor: 'apmec' +} diff --git a/apmec/tests/etc/samples/sample-tosca-mead-block-storage.yaml b/apmec/tests/etc/samples/sample-tosca-mead-block-storage.yaml new file mode 100644 index 0000000..84712e6 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-block-storage.yaml @@ -0,0 +1,59 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + name: test-vdu-block-storage + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + name: test-cp + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VB1: + type: tosca.nodes.BlockStorage.Apmec + properties: + size: 1 GB + image: cirros-0.3.5-x86_64-disk + + CB1: + type: tosca.nodes.BlockStorageAttachment + properties: + location: /dev/vdb + requirements: + - virtualBinding: + node: VDU1 + - virtualAttachment: + node: VB1 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-flavor.yaml b/apmec/tests/etc/samples/sample-tosca-mead-flavor.yaml new file mode 100644 index 0000000..22b5a31 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-flavor.yaml @@ -0,0 +1,68 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1_flavor_func: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + disk_size: 1 GB + mem_size: 512 MB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1_flavor_func + + CP2: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1_flavor_func + + CP3: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1_flavor_func + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-image.yaml b/apmec/tests/etc/samples/sample-tosca-mead-image.yaml new file mode 100644 index 0000000..02bc814 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-image.yaml @@ -0,0 +1,71 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example with auto image creation + +metadata: + template_name: sample-tosca-mead-image + +topology_template: + node_templates: + VDU1_image_func: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + artifacts: + MEAImage_image_func: + type: tosca.artifacts.Deployment.Image.VM + file: http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1_image_func + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1_image_func + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1_image_func + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-large-template.yaml b/apmec/tests/etc/samples/sample-tosca-mead-large-template.yaml new file mode 100644 index 0000000..0e4e744 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-large-template.yaml @@ -0,0 +1,137 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead-large-template + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + echo "my hostname is `hostname`" > /tmp/hostname + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-monitor.yaml b/apmec/tests/etc/samples/sample-tosca-mead-monitor.yaml new file mode 100644 index 0000000..61c2107 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-monitor.yaml @@ -0,0 +1,52 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: MEAD With Ping Monitor + +metadata: + template_name: sample-tosca-mead-monitor + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + monitoring_delay: 45 + timeout: 2 + config_drive: true + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /tmp/test.txt + sleep 90 + sudo ifdown eth0 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: ACME diff --git a/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu-monitoring.yaml b/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu-monitoring.yaml new file mode 100644 index 0000000..563dec0 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu-monitoring.yaml @@ -0,0 +1,175 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Multi VDU monitoring example + +metadata: + template_name: sample-tosca-multi-vdu-monitoring + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + monitoring_delay: 45 + timeout: 2 + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: True + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + monitoring_delay: 45 + timeout: 2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + monitoring_delay: 45 + timeout: 2 + config_drive: true + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/cirros/diskinfo + sleep 90 + sudo ifdown eth0 + + CP31: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + + CP32: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU3 + + CP33: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU3 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu.yaml b/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu.yaml new file mode 100644 index 0000000..2555167 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-multi-vdu.yaml @@ -0,0 +1,152 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP31: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + + CP32: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU3 + + CP33: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU3 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead-no-monitor.yaml b/apmec/tests/etc/samples/sample-tosca-mead-no-monitor.yaml new file mode 100644 index 0000000..45dfd3b --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-no-monitor.yaml @@ -0,0 +1,35 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: MEAD With no Monitor + +metadata: + template_name: sample-tosca-mead-no-monitor + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: APMEC diff --git a/apmec/tests/etc/samples/sample-tosca-mead-param.yaml b/apmec/tests/etc/samples/sample-tosca-mead-param.yaml new file mode 100644 index 0000000..eae5e62 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-param.yaml @@ -0,0 +1,101 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: MEA TOSCA template with input parameters + +metadata: + template_name: sample-tosca-mead + +topology_template: + inputs: + image_name: + type: string + description: Image Name + + flavor: + type: string + description: Flavor Information + + zone: + type: string + description: Zone Information + + network: + type: string + description: mgmt network + + management: + type: string + description: management network + + pkt_in_network: + type: string + description: In network + + pkt_out_network: + type: string + description: Out network + + vendor: + type: string + description: Vendor information + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: { get_input: image_name} + flavor: {get_input: flavor} + availability_zone: { get_input: zone } + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: { get_input: management } + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: network } + vendor: {get_input: vendor} + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_in_network } + vendor: {get_input: vendor} + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_out_network } + vendor: {get_input: vendor} diff --git a/apmec/tests/etc/samples/sample-tosca-mead-static-ip.yaml b/apmec/tests/etc/samples/sample-tosca-mead-static-ip.yaml new file mode 100644 index 0000000..84b55bc --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead-static-ip.yaml @@ -0,0 +1,72 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: MEAD with predefined properties. + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + ip_address: 192.168.120.225 + anti_spoofing_protection: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: true + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + vendor: ACME + network_name: net_mgmt + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-mead.yaml b/apmec/tests/etc/samples/sample-tosca-mead.yaml new file mode 100644 index 0000000..48fb885 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-mead.yaml @@ -0,0 +1,82 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + inputs: + vdu-name: + type: string + description: Vdu name + default: test-vdu + cp-name: + type: string + description: Cp name + default: test-cp + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + name: {get_input : vdu-name} + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + key_name: userKey + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + name: {get_input : cp-name} + management: true + anti_spoofing_protection: true + security_groups: + - test_secgrp + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/etc/samples/sample-tosca-scale-all.yaml b/apmec/tests/etc/samples/sample-tosca-scale-all.yaml new file mode 100644 index 0000000..06427a9 --- /dev/null +++ b/apmec/tests/etc/samples/sample-tosca-scale-all.yaml @@ -0,0 +1,50 @@ + +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: sample-tosca-mead-scaling + +metadata: + template_name: sample-tosca-mead-scaling + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1] + properties: + increment: 1 + cooldown: 60 + min_instances: 1 + max_instances: 3 + default_instances: 2 diff --git a/apmec/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml b/apmec/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml new file mode 100644 index 0000000..49d97ca --- /dev/null +++ b/apmec/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Example Floating IP - Allocate one IP from floating network and attach to CP. + +metadata: + template_name: sample-tosca-mead-test-fip-with-floating-network + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 1 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + FIP1: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: public + requirements: + - link: + node: CP1 \ No newline at end of file diff --git a/apmec/tests/etc/samples/sample_tosca_meac.yaml b/apmec/tests/etc/samples/sample_tosca_meac.yaml new file mode 100644 index 0000000..63b4d22 --- /dev/null +++ b/apmec/tests/etc/samples/sample_tosca_meac.yaml @@ -0,0 +1,42 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +metadata: + template_name: sample-tosca-mead-for-meac + +topology_template: + node_templates: + firewall_meac: + type: tosca.nodes.mec.MEAC.Apmec + requirements: + - host: VDU1 + interfaces: + Standard: + create: install_meac.sh + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + artifacts: + fedora: + type: tosca.artifacts.Deployment.Image.VM + file: https://github.com/bharaththiruveedula/dotfiles/raw/master/fedora-sw.qcow2 + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: private + vendor: Apmec diff --git a/apmec/tests/etc/samples/test-ns-nsd.yaml b/apmec/tests/etc/samples/test-ns-nsd.yaml new file mode 100644 index 0000000..d298982 --- /dev/null +++ b/apmec/tests/etc/samples/test-ns-nsd.yaml @@ -0,0 +1,37 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +imports: + - test-mes-mead1 + - test-mes-mead2 + +topology_template: + inputs: + vl1_name: + type: string + description: name of VL1 virtuallink + default: net_mgmt + vl2_name: + type: string + description: name of VL2 virtuallink + default: net0 + node_templates: + MEA1: + type: tosca.nodes.mec.MEA1 + requirements: + - virtualLink1: VL1 + - virtualLink2: VL2 + + MEA2: + type: tosca.nodes.mec.MEA2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl1_name} + vendor: apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl2_name} + vendor: apmec + diff --git a/apmec/tests/etc/samples/test-ns-vnfd1.yaml b/apmec/tests/etc/samples/test-ns-vnfd1.yaml new file mode 100644 index 0000000..f6c78b9 --- /dev/null +++ b/apmec/tests/etc/samples/test-ns-vnfd1.yaml @@ -0,0 +1,98 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example +node_types: + tosca.nodes.mec.MEA1: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + - virtualLink2: + type: tosca.nodes.mec.VL + required: true + capabilities: + forwader1: + type: tosca.capabilities.mec.Forwarder + forwader2: + type: tosca.capabilities.mec.Forwarder + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA1 + requirements: + virtualLink1: [CP11, virtualLink] + virtualLink2: [CP14, virtualLink] + capabilities: + forwarder1: [CP11, forwarder] + forwarder2: [CP14, forwarder] + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP14: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + diff --git a/apmec/tests/etc/samples/test-ns-vnfd2.yaml b/apmec/tests/etc/samples/test-ns-vnfd2.yaml new file mode 100644 index 0000000..dedeb49 --- /dev/null +++ b/apmec/tests/etc/samples/test-ns-vnfd2.yaml @@ -0,0 +1,68 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +node_types: + tosca.nodes.mec.MEA2: + capabilities: + forwarder1: + type: tosca.capabilities.mec.Forwarder +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA2 + capabilities: + forwarder1: [CP21, forwarder] + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + diff --git a/apmec/tests/etc/samples/test-nsd-vnfd1.yaml b/apmec/tests/etc/samples/test-nsd-vnfd1.yaml new file mode 100644 index 0000000..f6c78b9 --- /dev/null +++ b/apmec/tests/etc/samples/test-nsd-vnfd1.yaml @@ -0,0 +1,98 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example +node_types: + tosca.nodes.mec.MEA1: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + - virtualLink2: + type: tosca.nodes.mec.VL + required: true + capabilities: + forwader1: + type: tosca.capabilities.mec.Forwarder + forwader2: + type: tosca.capabilities.mec.Forwarder + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA1 + requirements: + virtualLink1: [CP11, virtualLink] + virtualLink2: [CP14, virtualLink] + capabilities: + forwarder1: [CP11, forwarder] + forwarder2: [CP14, forwarder] + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP14: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + diff --git a/apmec/tests/etc/samples/test-nsd-vnfd2.yaml b/apmec/tests/etc/samples/test-nsd-vnfd2.yaml new file mode 100644 index 0000000..dedeb49 --- /dev/null +++ b/apmec/tests/etc/samples/test-nsd-vnfd2.yaml @@ -0,0 +1,68 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +node_types: + tosca.nodes.mec.MEA2: + capabilities: + forwarder1: + type: tosca.capabilities.mec.Forwarder +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA2 + capabilities: + forwarder1: [CP21, forwarder] + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + diff --git a/apmec/tests/etc/samples/test-nsd.yaml b/apmec/tests/etc/samples/test-nsd.yaml new file mode 100644 index 0000000..d6fa35f --- /dev/null +++ b/apmec/tests/etc/samples/test-nsd.yaml @@ -0,0 +1,37 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +imports: + - test-mesd-mead1 + - test-mesd-mead2 + +topology_template: + inputs: + vl1_name: + type: string + description: name of VL1 virtuallink + default: net_mgmt + vl2_name: + type: string + description: name of VL2 virtuallink + default: net0 + node_templates: + MEA1: + type: tosca.nodes.mec.MEA1 + requirements: + - virtualLink1: VL1 + - virtualLink2: VL2 + + MEA2: + type: tosca.nodes.mec.MEA2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl1_name} + vendor: apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl2_name} + vendor: apmec + diff --git a/apmec/tests/fake_notifier.py b/apmec/tests/fake_notifier.py new file mode 100644 index 0000000..2972fd6 --- /dev/null +++ b/apmec/tests/fake_notifier.py @@ -0,0 +1,52 @@ +# Copyright 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import functools + + +NOTIFICATIONS = [] + + +def reset(): + del NOTIFICATIONS[:] + + +FakeMessage = collections.namedtuple('Message', + ['publisher_id', 'priority', + 'event_type', 'payload']) + + +class FakeNotifier(object): + + def __init__(self, transport, publisher_id=None, + driver=None, topic=None, + serializer=None, retry=None): + self.transport = transport + self.publisher_id = publisher_id + for priority in ('debug', 'info', 'warn', 'error', 'critical'): + setattr(self, priority, + functools.partial(self._notify, priority=priority.upper())) + + def prepare(self, publisher_id=None): + if publisher_id is None: + publisher_id = self.publisher_id + return self.__class__(self.transport, publisher_id) + + def _notify(self, ctxt, event_type, payload, priority): + msg = dict(publisher_id=self.publisher_id, + priority=priority, + event_type=event_type, + payload=payload) + NOTIFICATIONS.append(msg) diff --git a/apmec/tests/functional/__init__.py b/apmec/tests/functional/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/functional/base.py b/apmec/tests/functional/base.py new file mode 100644 index 0000000..1bb39ab --- /dev/null +++ b/apmec/tests/functional/base.py @@ -0,0 +1,222 @@ +# Copyright 2015 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import yaml + +from keystoneauth1.identity import v3 +from keystoneauth1 import session +from neutronclient.v2_0 import client as neutron_client +from novaclient import client as nova_client +from oslo_config import cfg +from tempest.lib import base + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import clients +from apmec.tests.utils import read_file +from apmec import version + +from apmecclient.v1_0 import client as apmec_client + + +CONF = cfg.CONF + + +class BaseApmecTest(base.BaseTestCase): + """Base test case class for all Apmec API tests.""" + + @classmethod + def setUpClass(cls): + super(BaseApmecTest, cls).setUpClass() + kwargs = {} + + cfg.CONF(args=['--config-file', '/etc/apmec/apmec.conf'], + project='apmec', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + cls.client = cls.apmecclient() + cls.h_client = cls.heatclient() + + @classmethod + def get_credentials(cls): + vim_params = yaml.safe_load(read_file('local-vim.yaml')) + vim_params['auth_url'] += '/v3' + return vim_params + + @classmethod + def apmecclient(cls): + vim_params = cls.get_credentials() + auth = v3.Password(auth_url=vim_params['auth_url'], + username=vim_params['username'], + password=vim_params['password'], + project_name=vim_params['project_name'], + user_domain_name=vim_params['user_domain_name'], + project_domain_name=vim_params['project_domain_name']) + auth_ses = session.Session(auth=auth) + return apmec_client.Client(session=auth_ses) + + @classmethod + def novaclient(cls): + vim_params = cls.get_credentials() + auth = v3.Password(auth_url=vim_params['auth_url'], + username=vim_params['username'], + password=vim_params['password'], + project_name=vim_params['project_name'], + user_domain_name=vim_params['user_domain_name'], + project_domain_name=vim_params['project_domain_name']) + auth_ses = session.Session(auth=auth) + return nova_client.Client(constants.NOVA_CLIENT_VERSION, + session=auth_ses) + + @classmethod + def neutronclient(cls): + vim_params = cls.get_credentials() + auth = v3.Password(auth_url=vim_params['auth_url'], + username=vim_params['username'], + password=vim_params['password'], + project_name=vim_params['project_name'], + user_domain_name=vim_params['user_domain_name'], + project_domain_name=vim_params['project_domain_name']) + auth_ses = session.Session(auth=auth) + return neutron_client.Client(session=auth_ses) + + @classmethod + def heatclient(cls): + data = yaml.safe_load(read_file('local-vim.yaml')) + data['auth_url'] = data['auth_url'] + '/v3' + domain_name = data.pop('domain_name') + data['user_domain_name'] = domain_name + data['project_domain_name'] = domain_name + return clients.OpenstackClients(auth_attr=data).heat + + def wait_until_mea_status(self, mea_id, target_status, timeout, + sleep_interval): + start_time = int(time.time()) + while True: + mea_result = self.client.show_mea(mea_id) + status = mea_result['mea']['status'] + if (status == target_status) or ( + (int(time.time()) - start_time) > timeout): + break + time.sleep(sleep_interval) + + self.assertEqual(status, target_status, + "mea %(mea_id)s with status %(status)s is" + " expected to be %(target)s" % + {"mea_id": mea_id, "status": status, + "target": target_status}) + + def wait_until_mea_active(self, mea_id, timeout, sleep_interval): + self.wait_until_mea_status(mea_id, 'ACTIVE', timeout, + sleep_interval) + + def wait_until_mea_delete(self, mea_id, timeout): + start_time = int(time.time()) + while True: + try: + mea_result = self.client.show_mea(mea_id) + time.sleep(1) + except Exception: + return + status = mea_result['mea']['status'] + if (status != 'PENDING_DELETE') or (( + int(time.time()) - start_time) > timeout): + raise Exception("Failed with status: %s" % status) + + def wait_until_mea_dead(self, mea_id, timeout, sleep_interval): + self.wait_until_mea_status(mea_id, 'DEAD', timeout, + sleep_interval) + + def validate_mea_instance(self, mead_instance, mea_instance): + self.assertIsNotNone(mea_instance) + self.assertIsNotNone(mea_instance['mea']['id']) + self.assertIsNotNone(mea_instance['mea']['instance_id']) + self.assertEqual(mea_instance['mea']['mead_id'], mead_instance[ + 'mead']['id']) + + def verify_mea_restart(self, mead_instance, mea_instance): + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.validate_mea_instance(mead_instance, mea_instance) + self.assertIsNotNone(self.client.show_mea(mea_id)['mea']['mgmt_url']) + + self.wait_until_mea_dead( + mea_id, + constants.MEA_CIRROS_DEAD_TIMEOUT, + constants.DEAD_SLEEP_TIME) + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.validate_mea_instance(mead_instance, mea_instance) + + def verify_mea_monitor_events(self, mea_id, mea_state_list): + for state in mea_state_list: + params = {'resource_id': mea_id, 'resource_state': state, + 'event_type': evt_constants.RES_EVT_MONITOR} + mea_evt_list = self.client.list_mea_events(**params) + mesg = ("%s - state transition expected." % state) + self.assertIsNotNone(mea_evt_list['mea_events'], mesg) + + def verify_mea_crud_events(self, mea_id, evt_type, res_state, + tstamp=None, cnt=1): + params = {'resource_id': mea_id, + 'resource_state': res_state, + 'resource_type': evt_constants.RES_TYPE_MEA, + 'event_type': evt_type} + if tstamp: + params['timestamp'] = tstamp + + mea_evt_list = self.client.list_mea_events(**params) + + self.assertIsNotNone(mea_evt_list['mea_events'], + "List of MEA events are Empty") + self.assertEqual(cnt, len(mea_evt_list['mea_events'])) + + def verify_mead_events(self, mead_id, evt_type, res_state, + tstamp=None, cnt=1): + params = {'resource_id': mead_id, + 'resource_state': res_state, + 'resource_type': evt_constants.RES_TYPE_MEAD, + 'event_type': evt_type} + if tstamp: + params['timestamp'] = tstamp + + mead_evt_list = self.client.list_mead_events(**params) + + self.assertIsNotNone(mead_evt_list['mead_events'], + "List of MEAD events are Empty") + self.assertEqual(cnt, len(mead_evt_list['mead_events'])) + + def get_vim(self, vim_list, vim_name): + if len(vim_list.values()) == 0: + assert False, "vim_list is Empty: Default VIM is missing" + + for vim_list in vim_list.values(): + for vim in vim_list: + if vim['name'] == vim_name: + return vim + return None + + def verify_antispoofing_in_stack(self, stack_id, resource_name): + resource_types = self.h_client.resources + resource_details = resource_types.get(stack_id=stack_id, + resource_name=resource_name) + resource_dict = resource_details.to_dict() + self.assertTrue(resource_dict['attributes']['port_security_enabled']) diff --git a/apmec/tests/functional/clients.py b/apmec/tests/functional/clients.py new file mode 100644 index 0000000..e0d8928 --- /dev/null +++ b/apmec/tests/functional/clients.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from heatclient import client as heatclient +from apmec.tests.functional import keystone + + +class OpenstackClients(object): + + def __init__(self, auth_attr, region_name=None): + super(OpenstackClients, self).__init__() + self.keystone_plugin = keystone.Keystone() + self.heat_client = None + self.keystone_client = None + self.region_name = region_name + self.auth_attr = auth_attr + + def _keystone_client(self): + version = self.auth_attr['auth_url'].rpartition('/')[2] + return self.keystone_plugin.initialize_client(version, + **self.auth_attr) + + def _heat_client(self): + endpoint = self.keystone_session.get_endpoint( + service_type='orchestration', region_name=self.region_name) + return heatclient.Client('1', endpoint=endpoint, + session=self.keystone_session) + + @property + def keystone_session(self): + return self.keystone.session + + @property + def keystone(self): + if not self.keystone_client: + self.keystone_client = self._keystone_client() + return self.keystone_client + + @property + def heat(self): + if not self.heat_client: + self.heat_client = self._heat_client() + return self.heat_client diff --git a/apmec/tests/functional/keystone.py b/apmec/tests/functional/keystone.py new file mode 100644 index 0000000..ad8fb0c --- /dev/null +++ b/apmec/tests/functional/keystone.py @@ -0,0 +1,55 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import exceptions +from keystoneauth1.identity import v3 +from keystoneauth1 import session +from keystoneclient import client +from oslo_config import cfg +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class Keystone(object): + """Keystone module for OpenStack VIM + + Handles identity operations for a given OpenStack + instance such as version, session and client + """ + + def get_version(self, base_url=None): + try: + keystone_client = client.Client(auth_url=base_url) + except exceptions.ConnectionError: + raise + return keystone_client.version + + def get_session(self, auth_plugin): + ses = session.Session(auth=auth_plugin) + return ses + + def get_endpoint(self, ses, service_type, region_name=None): + return ses.get_endpoint(service_type, region_name) + + def initialize_client(self, version, **kwargs): + from keystoneclient.v3 import client + auth_plugin = v3.Password(**kwargs) + ses = self.get_session(auth_plugin=auth_plugin) + cli = client.Client(session=ses) + return cli diff --git a/apmec/tests/functional/mem/__init__.py b/apmec/tests/functional/mem/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/functional/mem/test_mea.py b/apmec/tests/functional/mem/test_mea.py new file mode 100644 index 0000000..07970e2 --- /dev/null +++ b/apmec/tests/functional/mem/test_mea.py @@ -0,0 +1,106 @@ +# Copyright 2015 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + +CONF = cfg.CONF +MEA_CIRROS_CREATE_TIMEOUT = 120 + + +class MeaTestCreate(base.BaseApmecTest): + def _test_create_delete_mea(self, mea_name, mead_name, vim_id=None): + data = dict() + data['tosca'] = read_file('sample-tosca-mead-no-monitor.yaml') + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': toscal}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + if vim_id: + mea_arg['mea']['vim_id'] = vim_id + mea_instance = self.client.create_mea(body=mea_arg) + self.validate_mea_instance(mead_instance, mea_instance) + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertIsNotNone(self.client.show_mea(mea_id)['mea']['mgmt_url']) + if vim_id: + self.assertEqual(vim_id, mea_instance['mea']['vim_id']) + + # Get mea details when mea is in active state + mea_details = self.client.list_mea_resources(mea_id)['resources'][0] + self.assertIn('name', mea_details) + self.assertIn('id', mea_details) + self.assertIn('type', mea_details) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + # update VIM name when MEAs are active. + # check for exception. + vim0_id = mea_instance['mea']['vim_id'] + msg = "VIM %s is still in use by MEA" % vim0_id + try: + update_arg = {'vim': {'name': "mea_vim"}} + self.client.update_vim(vim0_id, update_arg) + except Exception as err: + self.assertEqual(err.message, msg) + else: + self.assertTrue( + False, + "Name of vim(s) with active mea(s) should not be changed!") + + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + + def test_create_delete_mea_with_default_vim(self): + self._test_create_delete_mea( + mea_name='test_mea_with_cirros_no_monitoring_default_vim', + mead_name='sample_cirros_mea_no_monitoring_default_vim') + + def test_create_delete_mea_with_vim_id(self): + vim_list = self.client.list_vims() + vim0_id = self.get_vim(vim_list, 'VIM0')['id'] + self._test_create_delete_mea( + vim_id=vim0_id, + mea_name='test_mea_with_cirros_vim_id', + mead_name='sample_cirros_mea_no_monitoring_vim_id') diff --git a/apmec/tests/functional/mem/test_mea_monitoring.py b/apmec/tests/functional/mem/test_mea_monitoring.py new file mode 100644 index 0000000..38b20d0 --- /dev/null +++ b/apmec/tests/functional/mem/test_mea_monitoring.py @@ -0,0 +1,67 @@ +# Copyright 2015 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +class MeaTestPingMonitor(base.BaseApmecTest): + + def _test_mea_with_monitoring(self, mead_file, mea_name): + data = dict() + data['tosca'] = read_file(mead_file) + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mea_name, + 'attributes': {'mead': toscal}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + + # Verify mea goes from ACTIVE->DEAD->ACTIVE states + self.verify_mea_restart(mead_instance, mea_instance) + + # Delete mea_instance with mea_id + mea_id = mea_instance['mea']['id'] + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, ("Failed to delete mea %s after the monitor test" % + mea_id) + + # Verify MEA monitor events captured for states, ACTIVE and DEAD + mea_state_list = [evt_constants.ACTIVE, evt_constants.DEAD] + self.verify_mea_monitor_events(mea_id, mea_state_list) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + + def test_create_delete_mea_monitoring_tosca_template(self): + self._test_mea_with_monitoring( + 'sample-tosca-mead-monitor.yaml', + 'ping monitor mea with tosca template') + + def test_create_delete_mea_multi_vdu_monitoring_tosca_template(self): + self._test_mea_with_monitoring( + 'sample-tosca-mead-multi-vdu-monitoring.yaml', + 'ping monitor multi vdu mea with tosca template') diff --git a/apmec/tests/functional/mem/test_mem_param.py b/apmec/tests/functional/mem/test_mem_param.py new file mode 100644 index 0000000..a2cb433 --- /dev/null +++ b/apmec/tests/functional/mem/test_mem_param.py @@ -0,0 +1,129 @@ +# Copyright 2015 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +class MemTestParam(base.BaseApmecTest): + def _test_mead_create(self, mead_file, mead_name): + yaml_input = read_file(mead_file) + req_dict = {'mead': {'name': mead_name, + 'attributes': {'mead': yaml_input}}} + + # Create mead + mead_instance = self.client.create_mead(body=req_dict) + self.assertIsNotNone(mead_instance) + mead_id = mead_instance['mead']['id'] + self.assertIsNotNone(mead_id) + self.verify_mead_events( + mead_id, evt_constants.RES_EVT_CREATE, + evt_constants.RES_EVT_ONBOARDED) + return mead_instance + + def _test_mead_delete(self, mead_instance): + # Delete mead + mead_id = mead_instance['mead']['id'] + self.assertIsNotNone(mead_id) + try: + self.client.delete_mead(mead_id) + except Exception: + assert False, "mead Delete failed" + self.verify_mead_events(mead_id, evt_constants.RES_EVT_DELETE, + evt_constants.RES_EVT_NA_STATE) + try: + mead_d = self.client.show_mead(mead_id) + except Exception: + assert True, "Mead Delete success" + str(mead_d) + str(Exception) + + def _test_mea_create(self, mead_instance, mea_name, param_values): + # Create the mea with values + mead_id = mead_instance['mead']['id'] + # Create mea with values file + mea_dict = dict() + mea_dict = {'mea': {'mead_id': mead_id, 'name': mea_name, + 'attributes': {'param_values': param_values}}} + mea_instance = self.client.create_mea(body=mea_dict) + + self.validate_mea_instance(mead_instance, mea_instance) + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertIsNotNone(self.client.show_mea(mea_id)['mea']['mgmt_url']) + mea_instance = self.client.show_mea(mea_id) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + # Verify values dictionary is same as param values from mea_show + + param_values = mea_instance['mea']['attributes']['param_values'] + param_values_dict = yaml.safe_load(param_values) + + return mea_instance, param_values_dict + + def _test_mea_delete(self, mea_instance): + # Delete Mea + mea_id = mea_instance['mea']['id'] + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + try: + mea_d = self.client.show_mea(mea_id) + except Exception: + assert True, "Mea Delete success" + str(mea_d) + str(Exception) + + def test_mead_param_tosca_template(self): + mead_name = 'sample_cirros_mead_tosca' + mead_instance = self._test_mead_create( + 'sample-tosca-mead-param.yaml', mead_name) + self._test_mead_delete(mead_instance) + + def test_mea_param_tosca_template(self): + mead_name = 'cirros_mead_tosca_param' + mead_instance = self._test_mead_create( + 'sample-tosca-mead-param.yaml', mead_name) + values_str = read_file('sample-tosca-mea-values.yaml') + values_dict = yaml.safe_load(values_str) + mea_instance, param_values_dict = self._test_mea_create(mead_instance, + 'test_mea_with_parameters_tosca_template', + values_dict) + self.assertEqual(values_dict, param_values_dict) + self._test_mea_delete(mea_instance) + mea_id = mea_instance['mea']['id'] + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + self.addCleanup(self.client.delete_mead, mead_instance['mead']['id']) diff --git a/apmec/tests/functional/mem/test_tosca_mea.py b/apmec/tests/functional/mem/test_tosca_mea.py new file mode 100644 index 0000000..e5f175e --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea.py @@ -0,0 +1,282 @@ +# Copyright 2016 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import unittest +import yaml + +from novaclient import exceptions +from oslo_config import cfg + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +CONF = cfg.CONF +MEA_CIRROS_CREATE_TIMEOUT = 120 + + +class MeaTestToscaCreate(base.BaseApmecTest): + def _test_create_mea(self, mead_file, mea_name, + template_source="onboarded"): + data = dict() + values_str = read_file(mead_file) + data['tosca'] = values_str + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mea_name, + 'attributes': {'mead': toscal}}} + + if template_source == "onboarded": + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + self.validate_mea_instance(mead_instance, mea_instance) + + if template_source == 'inline': + # create mea directly from template + template = yaml.safe_load(values_str) + mea_arg = {'mea': {'mead_template': template, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + mead_id = mea_instance['mea']['mead_id'] + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mea_show_out = self.client.show_mea(mea_id)['mea'] + self.assertIsNotNone(mea_show_out['mgmt_url']) + + input_dict = yaml.safe_load(values_str) + prop_dict = input_dict['topology_template']['node_templates'][ + 'CP1']['properties'] + + # Verify if ip_address is static, it is same as in show_mea + if prop_dict.get('ip_address'): + mgmt_url_input = prop_dict.get('ip_address') + mgmt_info = yaml.safe_load( + mea_show_out['mgmt_url']) + self.assertEqual(mgmt_url_input, mgmt_info['VDU1']) + + # Verify anti spoofing settings + stack_id = mea_show_out['instance_id'] + template_dict = input_dict['topology_template']['node_templates'] + for field in template_dict.keys(): + prop_dict = template_dict[field]['properties'] + if prop_dict.get('anti_spoofing_protection'): + self.verify_antispoofing_in_stack(stack_id=stack_id, + resource_name=field) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + return mead_id, mea_id + + def _test_delete_mea(self, mea_id): + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + def _test_cleanup_mead(self, mead_id, mea_id): + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + + def _test_create_delete_mea_tosca(self, mead_file, mea_name, + template_source): + mead_id, mea_id = self._test_create_mea(mead_file, mea_name, + template_source) + servers = self.novaclient().servers.list() + vdus = [] + for server in servers: + vdus.append(server.name) + self.assertIn('test-vdu', vdus) + + port_list = self.neutronclient().list_ports()['ports'] + vdu_ports = [] + for port in port_list: + vdu_ports.append(port['name']) + self.assertIn('test-cp', vdu_ports) + self._test_delete_mea(mea_id) + if template_source == "onboarded": + self._test_cleanup_mead(mead_id, mea_id) + + def test_create_delete_mea_tosca_from_mead(self): + self._test_create_delete_mea_tosca('sample-tosca-mead.yaml', + 'test_tosca_mea_with_cirros', + 'onboarded') + + def test_create_delete_mea_from_template(self): + self._test_create_delete_mea_tosca('sample-tosca-mead.yaml', + 'test_tosca_mea_with_cirros_inline', + 'inline') + + def test_re_create_delete_mea(self): + self._test_create_delete_mea_tosca('sample-tosca-mead.yaml', + 'test_mea', + 'inline') + time.sleep(1) + self._test_create_delete_mea_tosca('sample-tosca-mead.yaml', + 'test_mea', + 'inline') + + def test_create_delete_mea_static_ip(self): + mead_id, mea_id = self._test_create_mea( + 'sample-tosca-mead-static-ip.yaml', + 'test_tosca_mea_with_cirros_no_monitoring') + self._test_delete_mea(mea_id) + self._test_cleanup_mead(mead_id, mea_id) + + +class MeaTestToscaCreateFlavorCreation(base.BaseApmecTest): + def test_create_delete_mea_tosca_no_monitoring(self): + mead_name = 'tosca_mead_with_auto_flavor' + input_yaml = read_file('sample-tosca-mead-flavor.yaml') + tosca_dict = yaml.safe_load(input_yaml) + tosca_arg = {'mead': {'name': mead_name, 'attributes': {'mead': + tosca_dict}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mea_name = 'tosca_mea_with_auto_flavor' + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + + self.validate_mea_instance(mead_instance, mea_instance) + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertIsNotNone(self.client.show_mea(mea_id)['mea']['mgmt_url']) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + servers = self.novaclient().servers.list() + vdu_server = None + for server in servers: + if 'VDU1_flavor_func' in server.name: + vdu_server = server + break + self.assertIsNotNone(vdu_server) + flavor_id = server.flavor["id"] + nova_flavors = self.novaclient().flavors + flavor = nova_flavors.get(flavor_id) + self.assertIsNotNone(flavor) + self.assertEqual(True, "VDU1_flavor_func_flavor" in flavor.name) + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.assertRaises(exceptions.NotFound, nova_flavors.delete, + [flavor_id]) + + +class MeaTestToscaCreateImageCreation(base.BaseApmecTest): + + @unittest.skip("Until BUG 1673099") + def test_create_delete_mea_tosca_no_monitoring(self): + mead_name = 'tosca_mead_with_auto_image' + input_yaml = read_file('sample-tosca-mead-image.yaml') + tosca_dict = yaml.safe_load(input_yaml) + tosca_arg = {'mead': {'name': mead_name, 'attributes': {'mead': + tosca_dict}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_name = 'tosca_mea_with_auto_image' + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + + self.validate_mea_instance(mead_instance, mea_instance) + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertIsNotNone(self.client.show_mea(mea_id)['mea']['mgmt_url']) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + servers = self.novaclient().servers.list() + vdu_server = None + for server in servers: + if 'VDU1_image_func' in server.name: + vdu_server = server + break + self.assertIsNotNone(vdu_server) + image_id = vdu_server.image["id"] + nova_images = self.novaclient().images + image = nova_images.get(image_id) + self.assertIsNotNone(image) + self.assertEqual(True, "MEAImage_image_func" in image.name) + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.assertRaises(exceptions.NotFound, nova_images.delete, + [image_id]) diff --git a/apmec/tests/functional/mem/test_tosca_mea_alarm.py b/apmec/tests/functional/mem/test_tosca_mea_alarm.py new file mode 100644 index 0000000..539d89e --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea_alarm.py @@ -0,0 +1,151 @@ +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import json +import time +import unittest + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + +import yaml + + +class MeaTestAlarmMonitor(base.BaseApmecTest): + + def _test_mea_tosca_alarm(self, mead_file, mea_name): + mea_trigger_path = '/meas/%s/triggers' + data = dict() + data['tosca'] = read_file(mead_file) + tosca_dict = yaml.safe_load(data['tosca']) + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mea_name, + 'attributes': {'mead': toscal}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + + self.validate_mea_instance(mead_instance, mea_instance) + + mea_id = mea_instance['mea']['id'] + + def _waiting_time(count): + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mea = self.client.show_mea(mea_id)['mea'] + # {"VDU1": ["10.0.0.14", "10.0.0.5"]} + self.assertEqual(count, len(json.loads(mea['mgmt_url'])['VDU1'])) + + def trigger_mea(mea, policy_name, policy_action): + credential = 'g0jtsxu9' + body = {"trigger": {'policy_name': policy_name, + 'action_name': policy_action, + 'params': { + 'data': {'alarm_id': '35a80852-e24f-46ed-bd34-e2f831d00172', 'current': 'alarm'}, # noqa + 'credential': credential} + } + } + self.client.post(mea_trigger_path % mea, body) + + def _inject_monitoring_policy(mead_dict): + polices = mead_dict['topology_template'].get('policies', []) + mon_policy = dict() + for policy_dict in polices: + for name, policy in policy_dict.items(): + if policy['type'] == constants.POLICY_ALARMING: + triggers = policy['triggers'] + for trigger_name, trigger_dict in triggers.items(): + policy_action_list = trigger_dict['action'] + for policy_action_name in policy_action_list: + mon_policy[trigger_name] = policy_action_name + return mon_policy + + def verify_policy(policy_dict, kw_policy): + for name, action in policy_dict.items(): + if kw_policy in name: + return name + + # trigger alarm + monitoring_policy = _inject_monitoring_policy(tosca_dict) + for mon_policy_name, mon_policy_action in monitoring_policy.items(): + if mon_policy_action in constants.DEFAULT_ALARM_ACTIONS: + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + trigger_mea(mea_id, mon_policy_name, mon_policy_action) + else: + if 'scaling_out' in mon_policy_name: + _waiting_time(2) + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + # scaling-out backend action + scaling_out_action = mon_policy_action + '-out' + trigger_mea(mea_id, mon_policy_name, scaling_out_action) + + _waiting_time(3) + + scaling_in_name = verify_policy(monitoring_policy, + kw_policy='scaling_in') + if scaling_in_name: + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + # scaling-in backend action + scaling_in_action = mon_policy_action + '-in' + trigger_mea(mea_id, scaling_in_name, scaling_in_action) + + _waiting_time(2) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.ACTIVE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.PENDING_SCALE_OUT, cnt=1) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.PENDING_SCALE_IN, cnt=1) + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, ("Failed to delete mea %s after the monitor test" % + mea_id) + + # Verify MEA monitor events captured for states, ACTIVE and DEAD + mea_state_list = [evt_constants.ACTIVE, evt_constants.DEAD] + self.verify_mea_monitor_events(mea_id, mea_state_list) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + + def test_mea_alarm_respawn(self): + self._test_mea_tosca_alarm( + 'sample-tosca-alarm-respawn.yaml', + 'alarm and respawn mea') + + @unittest.skip("Skip and wait for releasing Heat Translator") + def test_mea_alarm_scale(self): + self._test_mea_tosca_alarm( + 'sample-tosca-alarm-scale.yaml', + 'alarm and scale mea') diff --git a/apmec/tests/functional/mem/test_tosca_mea_block_storage.py b/apmec/tests/functional/mem/test_tosca_mea_block_storage.py new file mode 100644 index 0000000..ec1bd44 --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea_block_storage.py @@ -0,0 +1,134 @@ +# Copyright 2016 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from oslo_config import cfg + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +CONF = cfg.CONF +MEA_CIRROS_CREATE_TIMEOUT = 120 + + +class MeaBlockStorageTestToscaCreate(base.BaseApmecTest): + def _test_create_mea(self, mead_file, mea_name, + template_source="onboarded"): + data = dict() + values_str = read_file(mead_file) + data['tosca'] = values_str + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mea_name, + 'attributes': {'mead': toscal}}} + + if template_source == "onboarded": + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + self.validate_mea_instance(mead_instance, mea_instance) + + if template_source == 'inline': + # create mea directly from template + template = yaml.safe_load(values_str) + mea_arg = {'mea': {'mead_template': template, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + mead_id = mea_instance['mea']['mead_id'] + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mea_show_out = self.client.show_mea(mea_id)['mea'] + self.assertIsNotNone(mea_show_out['mgmt_url']) + + input_dict = yaml.safe_load(values_str) + prop_dict = input_dict['topology_template']['node_templates'][ + 'CP1']['properties'] + + # Verify if ip_address is static, it is same as in show_mea + if prop_dict.get('ip_address'): + mgmt_url_input = prop_dict.get('ip_address') + mgmt_info = yaml.safe_load( + mea_show_out['mgmt_url']) + self.assertEqual(mgmt_url_input, mgmt_info['VDU1']) + + # Verify anti spoofing settings + stack_id = mea_show_out['instance_id'] + template_dict = input_dict['topology_template']['node_templates'] + for field in template_dict.keys(): + prop_dict = template_dict[field]['properties'] + if prop_dict.get('anti_spoofing_protection'): + self.verify_antispoofing_in_stack(stack_id=stack_id, + resource_name=field) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + return mead_id, mea_id + + def _test_delete_mea(self, mea_id): + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + def _test_cleanup_mead(self, mead_id, mea_id): + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + + def _test_create_delete_mea_tosca(self, mead_file, mea_name, + template_source): + mead_id, mea_id = self._test_create_mea(mead_file, mea_name, + template_source) + servers = self.novaclient().servers.list() + vdus = [] + for server in servers: + vdus.append(server.name) + self.assertIn('test-vdu-block-storage', vdus) + + for server in servers: + if server.name == 'test-vdu-block-storage': + server_id = server.id + server_volumes = self.novaclient().volumes\ + .get_server_volumes(server_id) + self.assertTrue(len(server_volumes) > 0) + self._test_delete_mea(mea_id) + if template_source == "onboarded": + self._test_cleanup_mead(mead_id, mea_id) + + def test_create_delete_mea_tosca_from_mead(self): + self._test_create_delete_mea_tosca( + 'sample-tosca-mead-block-storage.yaml', + 'test_tosca_mea_with_cirros', + 'onboarded') diff --git a/apmec/tests/functional/mem/test_tosca_mea_floatingip.py b/apmec/tests/functional/mem/test_tosca_mea_floatingip.py new file mode 100644 index 0000000..fae924d --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea_floatingip.py @@ -0,0 +1,98 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import yaml + +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +class MeaTestToscaFloatingIp(base.BaseApmecTest): + + def get_heat_stack_resource(self, stack_id, resource_name): + resource_types = self.h_client.resources + resource_details = resource_types.get(stack_id=stack_id, + resource_name=resource_name) + resource_dict = resource_details.to_dict() + return resource_dict + + def connect_public_and_private_nw_with_router(self): + public_nw = 'public' + private_nw = 'net_mgmt' + private_nw_subnet = 'subnet_mgmt' + public_nw_id = None + private_nw_id = None + private_nw_subnet_id = None + neutronclient = self.neutronclient() + networks = neutronclient.list_networks()['networks'] + for nw in networks: + if nw['name'] == public_nw: + public_nw_id = nw['id'] + if nw['name'] == private_nw: + private_nw_id = nw['id'] + if public_nw_id and private_nw_id: + break + self.assertIsNotNone(public_nw_id) + self.assertIsNotNone(private_nw_id) + subnets = neutronclient.list_subnets()['subnets'] + for subnet in subnets: + if subnet['network_id'] == private_nw_id\ + and subnet['name'] == private_nw_subnet: + private_nw_subnet_id = subnet['id'] + break + self.assertIsNotNone(private_nw_subnet_id) + router_id = neutronclient.create_router( + {'router': {'name': 'fip_test_router'}})['router']['id'] + self.assertIsNotNone(router_id) + self.addCleanup(self.neutronclient().delete_router, router_id) + rt_gw_id = neutronclient.add_gateway_router( + router_id, {'network_id': public_nw_id})['router']['id'] + self.assertIsNotNone(rt_gw_id) + self.addCleanup(self.neutronclient().remove_gateway_router, + router_id) + rt_int = neutronclient.add_interface_router( + router_id, {'subnet_id': private_nw_subnet_id})['id'] + self.assertIsNotNone(rt_int) + self.addCleanup(self.neutronclient().remove_interface_router, + router_id, {'subnet_id': private_nw_subnet_id}) + + def test_assign_floatingip_to_vdu(self): + mead_file = 'sample_tosca_assign_floatingip_to_vdu.yaml' + mea_name = 'Assign Floating IP to VDU' + values_str = read_file(mead_file) + template = yaml.safe_load(values_str) + mea_arg = {'mea': {'mead_template': template, 'name': mea_name}} + self.connect_public_and_private_nw_with_router() + mea_instance = self.client.create_mea(body=mea_arg) + mea_id = mea_instance['mea']['id'] + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.addCleanup(self.client.delete_mea, mea_id) + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mea_show_out = self.client.show_mea(mea_id)['mea'] + self.assertIsNotNone(mea_show_out['mgmt_url']) + + stack_id = mea_show_out['instance_id'] + fip_res = self.get_heat_stack_resource(stack_id, 'FIP1') + floating_ip_address = fip_res['attributes']['floating_ip_address'] + self.assertIsNotNone(floating_ip_address) + fip_port_id = fip_res['attributes']['port_id'] + port_res = self.get_heat_stack_resource(stack_id, 'CP1') + port_id = port_res['attributes']['id'] + self.assertEqual(fip_port_id, port_id) diff --git a/apmec/tests/functional/mem/test_tosca_mea_multiple_vdu.py b/apmec/tests/functional/mem/test_tosca_mea_multiple_vdu.py new file mode 100644 index 0000000..c9c11e0 --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea_multiple_vdu.py @@ -0,0 +1,90 @@ +# Copyright 2016 Brocade Communications System, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from toscaparser import tosca_template +import yaml + +from apmec.common import utils +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file +from apmec.catalogs.tosca import utils as toscautils + +CONF = cfg.CONF + + +class MeaTestToscaMultipleVDU(base.BaseApmecTest): + def test_create_delete_tosca_mea_with_multiple_vdus(self): + input_yaml = read_file('sample-tosca-mead-multi-vdu.yaml') + tosca_dict = yaml.safe_load(input_yaml) + mead_name = 'sample-tosca-mead-multi-vdu' + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': tosca_dict}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': + "test_tosca_mea_with_multiple_vdus"}} + mea_instance = self.client.create_mea(body=mea_arg) + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active(mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertEqual('ACTIVE', + self.client.show_mea(mea_id)['mea']['status']) + self.validate_mea_instance(mead_instance, mea_instance) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, + evt_constants.PENDING_CREATE, cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + # Validate mgmt_url with input yaml file + mgmt_url = self.client.show_mea(mea_id)['mea']['mgmt_url'] + self.assertIsNotNone(mgmt_url) + mgmt_dict = yaml.safe_load(str(mgmt_url)) + + input_dict = yaml.safe_load(input_yaml) + toscautils.updateimports(input_dict) + + tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, + yaml_dict_tpl=input_dict) + + vdus = toscautils.findvdus(tosca) + + self.assertEqual(len(vdus), len(mgmt_dict.keys())) + for vdu in vdus: + self.assertIsNotNone(mgmt_dict[vdu.name]) + self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name])) + + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete of test_mea_with_multiple_vdus failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) diff --git a/apmec/tests/functional/mem/test_tosca_mea_scale.py b/apmec/tests/functional/mem/test_tosca_mea_scale.py new file mode 100644 index 0000000..4dfb3ea --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mea_scale.py @@ -0,0 +1,107 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time +import unittest + +from oslo_config import cfg + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + + +CONF = cfg.CONF + + +class MeaTestToscaScale(base.BaseApmecTest): + @unittest.skip("Skip and wait for releasing Heat Translator") + def test_mea_tosca_scale(self): + data = dict() + data['tosca'] = read_file('sample-tosca-scale-all.yaml') + mead_name = 'test_tosca_mea_scale_all' + toscal = data['tosca'] + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': toscal}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_name = 'test_tosca_mea_scale_all' + mea_arg = {'mea': {'mead_id': mead_id, 'name': mea_name}} + mea_instance = self.client.create_mea(body=mea_arg) + + self.validate_mea_instance(mead_instance, mea_instance) + + mea_id = mea_instance['mea']['id'] + + # TODO(kanagaraj-manickam) once load-balancer support is enabled, + # update this logic to validate the scaling + def _wait(count): + self.wait_until_mea_active( + mea_id, + constants.MEA_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mea = self.client.show_mea(mea_id)['mea'] + + # {"VDU1": ["10.0.0.14", "10.0.0.5"]} + self.assertEqual(count, len(json.loads(mea['mgmt_url'])['VDU1'])) + + _wait(2) + # Get nested resources when mea is in active state + mea_details = self.client.list_mea_resources(mea_id)['resources'] + resources_list = list() + for mea_detail in mea_details: + resources_list.append(mea_detail['name']) + self.assertIn('VDU1', resources_list) + + self.assertIn('CP1', resources_list) + self.assertIn('SP1_group', resources_list) + + def _scale(type, count): + body = {"scale": {'type': type, 'policy': 'SP1'}} + self.client.scale_mea(mea_id, body) + _wait(count) + + # scale out + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + _scale('out', 3) + + # scale in + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + _scale('in', 2) + + # Verifying that as part of SCALE OUT, MEA states PENDING_SCALE_OUT + # and ACTIVE occurs and as part of SCALE IN, MEA states + # PENDING_SCALE_IN and ACTIVE occur. + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.ACTIVE, cnt=2) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.PENDING_SCALE_OUT, cnt=1) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_SCALE, + evt_constants.PENDING_SCALE_IN, cnt=1) + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete failed" + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) + self.addCleanup(self.wait_until_mea_delete, mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) diff --git a/apmec/tests/functional/mem/test_tosca_meac.py b/apmec/tests/functional/mem/test_tosca_meac.py new file mode 100644 index 0000000..e073638 --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_meac.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_config import cfg +from toscaparser import tosca_template +import unittest +import yaml + + +from apmec.common import utils +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file +from apmec.catalogs.tosca import utils as toscautils + +CONF = cfg.CONF +SOFTWARE_DEPLOYMENT = 'OS::Heat::SoftwareDeployment' + + +class MeaTestToscaMEAC(base.BaseApmecTest): + + @unittest.skip("Until BUG 1673012") + def test_create_delete_tosca_meac(self): + input_yaml = read_file('sample_tosca_meac.yaml') + tosca_dict = yaml.safe_load(input_yaml) + path = os.path.abspath(os.path.join( + os.path.dirname(__file__), "../../etc/samples")) + mead_name = 'sample-tosca-meac' + tosca_dict['topology_template']['node_templates' + ]['firewall_meac' + ]['interfaces' + ]['Standard']['create'] = path \ + + '/install_meac.sh' + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': tosca_dict}}} + + # Create mead with tosca template + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + # Create mea with mead_id + mead_id = mead_instance['mead']['id'] + mea_arg = {'mea': {'mead_id': mead_id, 'name': + "test_tosca_meac"}} + mea_instance = self.client.create_mea(body=mea_arg) + + mea_id = mea_instance['mea']['id'] + self.wait_until_mea_active(mea_id, + constants.MEAC_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + self.assertEqual('ACTIVE', + self.client.show_mea(mea_id)['mea']['status']) + self.validate_mea_instance(mead_instance, mea_instance) + + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.PENDING_CREATE, + cnt=2) + self.verify_mea_crud_events( + mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) + + # Validate mgmt_url with input yaml file + mgmt_url = self.client.show_mea(mea_id)['mea']['mgmt_url'] + self.assertIsNotNone(mgmt_url) + mgmt_dict = yaml.safe_load(str(mgmt_url)) + + input_dict = yaml.safe_load(input_yaml) + toscautils.updateimports(input_dict) + + tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, + yaml_dict_tpl=input_dict) + + vdus = toscautils.findvdus(tosca) + + self.assertEqual(len(vdus), len(mgmt_dict.keys())) + for vdu in vdus: + self.assertIsNotNone(mgmt_dict[vdu.name]) + self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name])) + + # Check the status of SoftwareDeployment + heat_stack_id = self.client.show_mea(mea_id)['mea']['instance_id'] + resource_types = self.h_client.resources + resources = resource_types.list(stack_id=heat_stack_id) + for resource in resources: + resource = resource.to_dict() + if resource['resource_type'] == \ + SOFTWARE_DEPLOYMENT: + self.assertEqual('CREATE_COMPLETE', + resource['resource_status']) + break + + # Delete mea_instance with mea_id + try: + self.client.delete_mea(mea_id) + except Exception: + assert False, "mea Delete of test_mea_with_multiple_vdus failed" + + self.wait_until_mea_delete(mea_id, + constants.MEA_CIRROS_DELETE_TIMEOUT) + self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE, + evt_constants.PENDING_DELETE, cnt=2) + + # Delete mead_instance + self.addCleanup(self.client.delete_mead, mead_id) diff --git a/apmec/tests/functional/mem/test_tosca_mead.py b/apmec/tests/functional/mem/test_tosca_mead.py new file mode 100644 index 0000000..6f1dfee --- /dev/null +++ b/apmec/tests/functional/mem/test_tosca_mead.py @@ -0,0 +1,65 @@ +# Copyright 2016 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo_config import cfg +import yaml + +from apmec.plugins.common import constants as evt_constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + +CONF = cfg.CONF + + +class MeadTestCreate(base.BaseApmecTest): + def _test_create_list_delete_tosca_mead(self, tosca_mead_file, mead_name): + input_yaml = read_file(tosca_mead_file) + tosca_dict = yaml.safe_load(input_yaml) + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': tosca_dict}}} + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertIsNotNone(mead_instance) + + meads = self.client.list_meads().get('meads') + self.assertIsNotNone(meads, "List of meads are Empty after Creation") + + mead_id = mead_instance['mead']['id'] + self.verify_mead_events( + mead_id, evt_constants.RES_EVT_CREATE, + evt_constants.RES_EVT_ONBOARDED) + + try: + self.client.delete_mead(mead_id) + except Exception: + assert False, "mead Delete failed" + self.verify_mead_events(mead_id, evt_constants.RES_EVT_DELETE, + evt_constants.RES_EVT_NA_STATE) + + def test_tosca_mead(self): + self._test_create_list_delete_tosca_mead('sample-tosca-mead.yaml', + 'sample-tosca-mead-template') + + def test_tosca_large_mead(self): + self._test_create_list_delete_tosca_mead( + 'sample-tosca-mead-large-template.yaml', + 'sample-tosca-mead-large-template') + + def test_tosca_re_create_delete_mead(self): + self._test_create_list_delete_tosca_mead('sample-tosca-mead.yaml', + 'test_mead') + time.sleep(1) + self._test_create_list_delete_tosca_mead('sample-tosca-mead.yaml', + 'test_mead') diff --git a/apmec/tests/functional/meo/__init__.py b/apmec/tests/functional/meo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/functional/meo/test_meo.py b/apmec/tests/functional/meo/test_meo.py new file mode 100644 index 0000000..9566a0e --- /dev/null +++ b/apmec/tests/functional/meo/test_meo.py @@ -0,0 +1,178 @@ +# Copyright 2016 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from oslo_config import cfg +from apmecclient.common import exceptions + +from apmec.plugins.common import constants as evt_constants +from apmec.tests import constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + +import time +CONF = cfg.CONF + + +class MesdTestCreate(base.BaseApmecTest): + def _test_create_tosca_mead(self, tosca_mead_file, mead_name): + input_yaml = read_file(tosca_mead_file) + tosca_dict = yaml.safe_load(input_yaml) + tosca_arg = {'mead': {'name': mead_name, + 'attributes': {'mead': tosca_dict}}} + mead_instance = self.client.create_mead(body=tosca_arg) + self.assertEqual(mead_instance['mead']['name'], mead_name) + self.assertIsNotNone(mead_instance) + + meads = self.client.list_meads().get('meads') + self.assertIsNotNone(meads, "List of meads are Empty after Creation") + return mead_instance['mead']['id'] + + def _test_create_mesd(self, tosca_mesd_file, mesd_name): + input_yaml = read_file(tosca_mesd_file) + tosca_dict = yaml.safe_load(input_yaml) + tosca_arg = {'mesd': {'name': mesd_name, + 'attributes': {'mesd': tosca_dict}}} + mesd_instance = self.client.create_mesd(body=tosca_arg) + self.assertIsNotNone(mesd_instance) + return mesd_instance['mesd']['id'] + + def _test_delete_mesd(self, mesd_id): + try: + self.client.delete_mesd(mesd_id) + except Exception: + assert False, "mesd Delete failed" + + def _test_delete_mead(self, mead_id, timeout=constants.MES_DELETE_TIMEOUT): + start_time = int(time.time()) + while True: + try: + self.client.delete_mead(mead_id) + except exceptions.Conflict: + time.sleep(2) + except Exception: + assert False, "mead Delete failed" + else: + break + if (int(time.time()) - start_time) > timeout: + assert False, "mead still in use" + self.verify_mead_events(mead_id, evt_constants.RES_EVT_DELETE, + evt_constants.RES_EVT_NA_STATE) + + def _wait_until_mes_status(self, mes_id, target_status, timeout, + sleep_interval): + start_time = int(time.time()) + while True: + mes_result = self.client.show_mes(mes_id) + status = mes_result['mes']['status'] + if (status == target_status) or ( + (int(time.time()) - start_time) > timeout): + break + time.sleep(sleep_interval) + + self.assertEqual(status, target_status, + "mes %(mes_id)s with status %(status)s is" + " expected to be %(target)s" % + {"mes_id": mes_id, "status": status, + "target": target_status}) + + def _wait_until_mes_delete(self, mes_id, timeout): + start_time = int(time.time()) + while True: + try: + mes_result = self.client.show_mes(mes_id) + time.sleep(2) + except Exception: + return + status = mes_result['mes']['status'] + if (status != 'PENDING_DELETE') or (( + int(time.time()) - start_time) > timeout): + raise Exception("Failed with status: %s" % status) + + def _test_create_delete_mes(self, mesd_file, mes_name, + template_source='onboarded'): + mead1_id = self._test_create_tosca_mead( + 'test-mes-mead1.yaml', + 'test-mes-mead1') + mead2_id = self._test_create_tosca_mead( + 'test-mes-mead2.yaml', + 'test-mes-mead2') + + if template_source == 'onboarded': + mesd_id = self._test_create_mesd( + mesd_file, + 'test-mes-mesd') + mes_arg = {'mes': { + 'mesd_id': mesd_id, + 'name': mes_name, + 'attributes': {"param_values": { + "mesd": { + "vl2_name": "net0", + "vl1_name": "net_mgmt"}}}}} + mes_instance = self.client.create_mes(body=mes_arg) + mes_id = mes_instance['mes']['id'] + + if template_source == 'inline': + input_yaml = read_file(mesd_file) + template = yaml.safe_load(input_yaml) + mes_arg = {'mes': { + 'name': mes_name, + 'attributes': {"param_values": { + "mesd": { + "vl2_name": "net0", + "vl1_name": "net_mgmt"}}}, + 'mesd_template': template}} + mes_instance = self.client.create_mes(body=mes_arg) + mes_id = mes_instance['mes']['id'] + + self._wait_until_mes_status(mes_id, 'ACTIVE', + constants.MES_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + mes_show_out = self.client.show_mes(mes_id)['mes'] + self.assertIsNotNone(mes_show_out['mgmt_urls']) + + try: + self.client.delete_mes(mes_id) + except Exception as e: + print("Exception:", e) + assert False, "mes Delete failed" + if template_source == 'onboarded': + self._wait_until_mes_delete(mes_id, constants.NS_DELETE_TIMEOUT) + self._test_delete_mesd(mesd_id) + self._test_delete_mead(mead1_id) + self._test_delete_mead(mead2_id) + + def test_create_delete_mesd(self): + mead1_id = self._test_create_tosca_mead( + 'test-mesd-mead1.yaml', + 'test-mesd-mead1') + mead2_id = self._test_create_tosca_mead( + 'test-mesd-mead2.yaml', + 'test-mesd-mead2') + mesd_id = self._test_create_mesd( + 'test-mesd.yaml', + 'test-mesd') + self._test_delete_mesd(mesd_id) + self._test_delete_mead(mead1_id) + self._test_delete_mead(mead2_id) + + def test_create_delete_network_service(self): + self._test_create_delete_mes('test-mes-mesd.yaml', + 'test-mes-onboarded', + template_source='onboarded') + time.sleep(1) + self._test_create_delete_mes('test-mes-mesd.yaml', + 'test-mes-inline', + template_source='inline') diff --git a/apmec/tests/functional/meo/test_vim.py b/apmec/tests/functional/meo/test_vim.py new file mode 100644 index 0000000..7f9acf1 --- /dev/null +++ b/apmec/tests/functional/meo/test_vim.py @@ -0,0 +1,191 @@ +# Copyright 2016 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import yaml + +from apmec.plugins.common import constants as evt_constants +from apmec.tests.functional import base +from apmec.tests.utils import read_file + +from apmecclient.common import exceptions + +SECRET_PASSWORD = '***' + + +class VimTestCreate(base.BaseApmecTest): + + def _test_create_delete_vim(self, vim_file, name, description, vim_type, + version=None): + + data, vim_arg = self._generate_vim_data( + 'local-vim.yaml', name, description, vim_type, version) + # updated args + new_name = "fake %s" % name + new_desc = "fake %s" % description + update_vim_arg = {'vim': {'name': new_name, + 'description': new_desc}} + + # Register vim + vim_res = self.client.create_vim(vim_arg) + vim_obj = vim_res['vim'] + vim_id = vim_obj['id'] + self.verify_vim(vim_obj, data, name, description, version) + self.verify_vim_events(vim_id, evt_constants.RES_EVT_CREATE) + + # Read vim + vim_show_res = self.client.show_vim(vim_id) + self.verify_vim(vim_show_res['vim'], data, name, description, version) + + # Update vim + vim_update = self.client.update_vim(vim_id, update_vim_arg) + vim_obj = vim_update['vim'] + self.verify_vim(vim_obj, data, new_name, new_desc, version) + self.verify_vim_events(vim_id, evt_constants.RES_EVT_UPDATE) + + # With the updated name above, create another VIM with the + # same name and check for Duplicate name exception. + vim_arg['vim']['name'] = update_vim_arg['vim']['name'] + msg = "vim already exist with given ['tenant_id', 'name', "\ + "'deleted_at']" + try: + self.client.create_vim(vim_arg) + except Exception as err: + self.assertEqual(err.message, msg) + + # Since there already exists a DEFAULT VM, Verify that a update + # to is_default to TRUE for another VIM raises an exception. + update_arg = {'vim': {'is_default': True}} + msg = "Default VIM already exists." + self.assertRaisesRegex(exceptions.InternalServerError, msg, + self.client.update_vim, + vim_id, update_arg) + + # Delete vim + try: + self.client.delete_vim(vim_id) + except Exception: + self.assertFalse(True, "Failed to delete vim %s" % vim_id) + self.verify_vim_events(vim_id, evt_constants.RES_EVT_DELETE) + + def verify_vim(self, vim_instance, config_data, name, description, + version): + expected_regions = ['RegionOne'] + self.assertIsNotNone(vim_instance) + self.assertEqual(description, vim_instance['description']) + self.assertEqual(name, vim_instance['name']) + self.assertIsNotNone(vim_instance['tenant_id']) + self.assertIsNotNone(vim_instance['id']) + self.assertEqual(config_data['username'], + vim_instance['auth_cred']['username']) + self.assertEqual(SECRET_PASSWORD, + vim_instance['auth_cred']['password']) + self.assertEqual(expected_regions, + vim_instance['placement_attr']['regions']) + if version: + method_name = 'verify_vim_' + version + getattr(self, method_name)(vim_instance, config_data) + + def verify_vim_events(self, vim_id, evt_type, tstamp=None, cnt=1): + params = {'resource_id': vim_id, + 'resource_type': evt_constants.RES_TYPE_VIM, + 'event_type': evt_type} + if tstamp: + params['timestamp'] = tstamp + + vim_evt_list = self.client.list_vim_events(**params) + + self.assertIsNotNone(vim_evt_list['vim_events'], + "List of VIM events are Empty") + self.assertEqual(cnt, len(vim_evt_list['vim_events'])) + + def verify_vim_v2(self, vim_instance, config_data): + self.assertEqual(config_data['project_name'], + vim_instance['auth_cred']['tenant_name']) + + def verify_vim_v3(self, vim_instance, config_data): + self.assertEqual(config_data['project_name'], + vim_instance['auth_cred']['project_name']) + + def test_create_delete_local_vim(self): + name = 'Default vim' + description = 'Local vim description' + vim_type = 'openstack' + ks_version = 'v3' + self._test_create_delete_vim('local-vim.yaml', name, description, + vim_type, ks_version) + + def _generate_vim_data(self, vim_file, name, description, vim_type, + version=None): + + data = yaml.safe_load(read_file(vim_file)) + password = data['password'] + username = data['username'] + project_name = data['project_name'] + auth_url = data['auth_url'] + if version: + if ('v2' == version and (not auth_url.endswith("/v2.0") or + not auth_url.endswith("/v2.0/"))): + auth_url += "/v2.0" + elif (not auth_url.endswith("/v3") or + not auth_url.endswith("/v3/")): + auth_url += "/v3" + domain_name = data.get('domain_name', None) + vim_arg = {'vim': {'name': name, 'description': description, + 'type': vim_type, + 'auth_url': auth_url, + 'auth_cred': {'username': username, + 'password': password, + 'user_domain_name': domain_name}, + 'vim_project': {'name': project_name, + 'project_domain_name': + domain_name}, + 'is_default': False}} + return data, vim_arg + + def test_re_create_delete_local_vim(self): + name = 'test_vim' + description = 'Test vim description' + vim_type = 'openstack' + ks_version = 'v3' + self._test_create_delete_vim('local-vim.yaml', name, description, + vim_type, ks_version) + time.sleep(1) + self._test_create_delete_vim('local-vim.yaml', name, description, + vim_type, ks_version) + + def test_duplicate_vim(self): + name = 'test_duplicate_vim' + description = 'Test duplicate vim description' + vim_type = 'openstack' + version = 'v3' + data, vim_arg = self._generate_vim_data( + 'local-vim.yaml', name, description, vim_type, version) + + # Register vim + vim_res = self.client.create_vim(vim_arg) + vim_obj = vim_res['vim'] + vim_id = vim_obj['id'] + + # Read vim + vim_show_res = self.client.show_vim(vim_id) + self.verify_vim(vim_show_res['vim'], data, name, description, version) + msg = "vim already exist with given ['tenant_id', 'name', "\ + "'deleted_at']" + err_msg = None + try: + self.client.create_vim(vim_arg) + except Exception as err: + err_msg = err.message + self.assertEqual(err_msg, msg) diff --git a/apmec/tests/post_mortem_debug.py b/apmec/tests/post_mortem_debug.py new file mode 100644 index 0000000..70b9941 --- /dev/null +++ b/apmec/tests/post_mortem_debug.py @@ -0,0 +1,104 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pdb +import traceback + + +def exception_handler(exc_info): + """Exception handler enabling post-mortem debugging. + + A class extending testtools.TestCase can add this handler in setUp(): + + self.addOnException(post_mortem_debug.exception_handler) + + When an exception occurs, the user will be dropped into a pdb + session in the execution environment of the failure. + + Frames associated with the testing framework are excluded so that + the post-mortem session for an assertion failure will start at the + assertion call (e.g. self.assertTrue) rather than the framework code + that raises the failure exception (e.g. the assertTrue method). + """ + tb = exc_info[2] + ignored_traceback = get_ignored_traceback(tb) + if ignored_traceback: + tb = FilteredTraceback(tb, ignored_traceback) + traceback.print_exception(exc_info[0], exc_info[1], tb) + pdb.post_mortem(tb) + + +def get_ignored_traceback(tb): + """Retrieve the first traceback of an ignored trailing chain. + + Given an initial traceback, find the first traceback of a trailing + chain of tracebacks that should be ignored. The criteria for + whether a traceback should be ignored is whether its frame's + globals include the __unittest marker variable. This criteria is + culled from: + + unittest.TestResult._is_relevant_tb_level + + For example: + + tb.tb_next => tb0.tb_next => tb1.tb_next + + - If no tracebacks were to be ignored, None would be returned. + - If only tb1 was to be ignored, tb1 would be returned. + - If tb0 and tb1 were to be ignored, tb0 would be returned. + - If either of only tb or only tb0 was to be ignored, None would + be returned because neither tb or tb0 would be part of a + trailing chain of ignored tracebacks. + """ + # Turn the traceback chain into a list + tb_list = [] + while tb: + tb_list.append(tb) + tb = tb.tb_next + + # Find all members of an ignored trailing chain + ignored_tracebacks = [] + for tb in reversed(tb_list): + if '__unittest' in tb.tb_frame.f_globals: + ignored_tracebacks.append(tb) + else: + break + + # Return the first member of the ignored trailing chain + if ignored_tracebacks: + return ignored_tracebacks[-1] + + +class FilteredTraceback(object): + """Wraps a traceback to filter unwanted frames.""" + + def __init__(self, tb, filtered_traceback): + """Constructor. + + :param tb: The start of the traceback chain to filter. + :param filtered_traceback: The first traceback of a trailing + chain that is to be filtered. + """ + self._tb = tb + self.tb_lasti = self._tb.tb_lasti + self.tb_lineno = self._tb.tb_lineno + self.tb_frame = self._tb.tb_frame + self._filtered_traceback = filtered_traceback + + @property + def tb_next(self): + tb_next = self._tb.tb_next + if tb_next and tb_next != self._filtered_traceback: + return FilteredTraceback(tb_next, self._filtered_traceback) diff --git a/apmec/tests/tools.py b/apmec/tests/tools.py new file mode 100644 index 0000000..ff49516 --- /dev/null +++ b/apmec/tests/tools.py @@ -0,0 +1,44 @@ +# Copyright (c) 2013 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""setup_mock_calls and verify_mock_calls are convenient methods +to setup a sequence of mock calls. + +expected_calls_and_values is a list of (expected_call, return_value): + + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", + self.BR_NAME, pname], root_helper=self.root_helper), + None), + (mock.call(["ovs-vsctl", self.TO, "set", "Interface", + pname, "type=gre"], root_helper=self.root_helper), + None), + .... + ] + +* expected_call should be mock.call(expected_arg, ....) +* return_value is passed to side_effect of a mocked call. + A return value or an exception can be specified. +""" + + +def setup_mock_calls(mocked_call, expected_calls_and_values): + return_values = [call[1] for call in expected_calls_and_values] + mocked_call.side_effect = return_values + + +def verify_mock_calls(mocked_call, expected_calls_and_values): + expected_calls = [call[0] for call in expected_calls_and_values] + mocked_call.assert_has_calls(expected_calls) diff --git a/apmec/tests/unit/__init__.py b/apmec/tests/unit/__init__.py new file mode 100644 index 0000000..5f0b0ab --- /dev/null +++ b/apmec/tests/unit/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_config import cfg + + +reldir = os.path.join(os.path.dirname(__file__), '..', '..', '..') +absdir = os.path.abspath(reldir) +cfg.CONF.state_path = absdir +cfg.CONF.use_stderr = False diff --git a/apmec/tests/unit/_test_rootwrap_exec.py b/apmec/tests/unit/_test_rootwrap_exec.py new file mode 100644 index 0000000..3e96af7 --- /dev/null +++ b/apmec/tests/unit/_test_rootwrap_exec.py @@ -0,0 +1,82 @@ +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import fixtures +from oslo_log import log as logging + +from apmec.agent.linux import utils +from apmec.tests import base + + +LOG = logging.getLogger(__name__) + + +class RootwrapTestExec(base.BaseTestCase): + """Simple unit test to test the basic rootwrap mechanism + + Essentially hello-world. Just run a command as root and check that + it actually *did* run as root, and generated the right output. + + NB that this is named _test_rootwrap so as not to get run by default + from scripts like tox. That's because it actually executes a sudo'ed + command, and that won't work in the automated test environment, at + least as it stands today. To run this, rename it to + test_rootwrap.py, or run it by hand. + """ + + def setUp(self): + super(RootwrapTestExec, self).setUp() + self.cwd = os.getcwd() + "/../../.." + # stuff a stupid bash script into /tmp, so that the next + # method can execute it. + self.test_file = self.useFixture( + fixtures.TempDir()).join("rootwrap-test.sh") + with open(self.test_file, 'w') as f: + f.write('#!/bin/bash\n') + f.write('ID=`id | sed \'s/uid=//\' | sed \'s/(.*//\' `\n') + f.write("echo $ID $1\ +\" Now is the time for all good men to come \ +to the aid of their party.\"\n") + # we need a temporary conf file, pointing into pwd for the filter + # specs. there's probably a better way to do this, but I couldn't + # figure it out. 08/15/12 -- jrd + self.conf_file = self.useFixture( + fixtures.TempDir()).join("rootwrap.conf") + with open(self.conf_file, 'w') as f: + f.write("# temporary conf file for rootwrap-test, " + + "generated by test_rootwrap.py\n") + f.write("[DEFAULT]\n") + f.write("filters_path=" + self.cwd + + "/apmec/tests/etc/rootwrap.d/") + # now set the root helper to sudo our rootwrap script, + # with the new conf + self.root_helper = "sudo " + self.cwd + "/bin/apmec-rootwrap " + self.root_helper += self.conf_file + + def runTest(self): + try: + result = utils.execute(["bash", self.test_file, 'arg'], + self.root_helper) + self.assertEqual("0 arg Now is the time for all good men to \ +come to the aid of their party.", result) + except Exception: + LOG.exception("Losing in rootwrap test") + + def tearDown(self): + os.remove(self.test_file) + os.remove(self.conf_file) + super(RootwrapTestExec, self).tearDown() diff --git a/apmec/tests/unit/base.py b/apmec/tests/unit/base.py new file mode 100644 index 0000000..44a6183 --- /dev/null +++ b/apmec/tests/unit/base.py @@ -0,0 +1,33 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock +from oslo_config import cfg +from oslo_config import fixture as config_fixture +from oslotest import base + +CONF = cfg.CONF + + +class TestCase(base.BaseTestCase): + + def setUp(self): + super(TestCase, self).setUp() + self.config_fixture = self.useFixture(config_fixture.Config(CONF)) + + def _mock(self, target, new=mock.DEFAULT): + patcher = mock.patch(target, new) + return patcher.start() diff --git a/apmec/tests/unit/database_stubs.py b/apmec/tests/unit/database_stubs.py new file mode 100644 index 0000000..d890c9c --- /dev/null +++ b/apmec/tests/unit/database_stubs.py @@ -0,0 +1,184 @@ +# Copyright 2011, Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""stubs.py provides interface methods for the database test cases""" + +from oslo_log import log as logging + +from apmec.db import api as db + +LOG = logging.getLogger(__name__) + + +class ApmecDB(object): + """Class conisting of methods to call Apmec db methods.""" + def get_all_networks(self, tenant_id): + """Get all networks.""" + nets = [] + try: + for net in db.network_list(tenant_id): + LOG.debug("Getting network: %s", net.uuid) + net_dict = {} + net_dict["tenant_id"] = net.tenant_id + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + nets.append(net_dict) + except Exception as exc: + LOG.error("Failed to get all networks: %s", str(exc)) + return nets + + def get_network(self, network_id): + """Get a network.""" + net = [] + try: + for net in db.network_get(network_id): + LOG.debug("Getting network: %s", net.uuid) + net_dict = {} + net_dict["tenant_id"] = net.tenant_id + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + net.append(net_dict) + except Exception as exc: + LOG.error("Failed to get network: %s", str(exc)) + return net + + def create_network(self, tenant_id, net_name): + """Create a network.""" + net_dict = {} + try: + res = db.network_create(tenant_id, net_name) + LOG.debug("Created network: %s", res.uuid) + net_dict["tenant_id"] = res.tenant_id + net_dict["id"] = str(res.uuid) + net_dict["name"] = res.name + return net_dict + except Exception as exc: + LOG.error("Failed to create network: %s", str(exc)) + + def delete_network(self, net_id): + """Delete a network.""" + try: + net = db.network_destroy(net_id) + LOG.debug("Deleted network: %s", net.uuid) + net_dict = {} + net_dict["id"] = str(net.uuid) + return net_dict + except Exception as exc: + LOG.error("Failed to delete network: %s", str(exc)) + + def update_network(self, tenant_id, net_id, param_data): + """Rename a network.""" + try: + net = db.network_update(net_id, tenant_id, **param_data) + LOG.debug("Updated network: %s", net.uuid) + net_dict = {} + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + return net_dict + except Exception as exc: + LOG.error("Failed to update network: %s", str(exc)) + + def get_all_ports(self, net_id): + """Get all ports.""" + ports = [] + try: + for port in db.port_list(net_id): + LOG.debug("Getting port: %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + ports.append(port_dict) + return ports + except Exception as exc: + LOG.error("Failed to get all ports: %s", str(exc)) + + def get_port(self, net_id, port_id): + """Get a port.""" + port_list = [] + port = db.port_get(port_id, net_id) + try: + LOG.debug("Getting port: %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + port_list.append(port_dict) + return port_list + except Exception as exc: + LOG.error("Failed to get port: %s", str(exc)) + + def create_port(self, net_id): + """Add a port.""" + port_dict = {} + try: + port = db.port_create(net_id) + LOG.debug("Creating port %s", port.uuid) + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to create port: %s", str(exc)) + + def delete_port(self, net_id, port_id): + """Delete a port.""" + try: + port = db.port_destroy(port_id, net_id) + LOG.debug("Deleted port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + return port_dict + except Exception as exc: + LOG.error("Failed to delete port: %s", str(exc)) + + def update_port(self, net_id, port_id, **kwargs): + """Update a port.""" + try: + port = db.port_update(port_id, net_id, **kwargs) + LOG.debug("Updated port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to update port state: %s", str(exc)) + + def plug_interface(self, net_id, port_id, int_id): + """Plug interface to a port.""" + try: + port = db.port_set_attachment(port_id, net_id, int_id) + LOG.debug("Attached interface to port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to plug interface: %s", str(exc)) + + def unplug_interface(self, net_id, port_id): + """Unplug interface to a port.""" + try: + db.port_unset_attachment(port_id, net_id) + LOG.debug("Detached interface from port %s", port_id) + except Exception as exc: + LOG.error("Failed to unplug interface: %s", str(exc)) diff --git a/apmec/tests/unit/db/__init__.py b/apmec/tests/unit/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/db/base.py b/apmec/tests/unit/db/base.py new file mode 100644 index 0000000..de327b5 --- /dev/null +++ b/apmec/tests/unit/db/base.py @@ -0,0 +1,51 @@ +# Copyright 2015 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from apmec.common import config +from apmec.db import api as db_api +from apmec.db import model_base +from apmec.tests.unit import base + + +class SqlFixture(fixtures.Fixture): + + # flag to indicate that the models have been loaded + _TABLES_ESTABLISHED = False + + def setUp(self): + super(SqlFixture, self).setUp() + # Register all data models + engine = db_api.get_engine() + if not SqlFixture._TABLES_ESTABLISHED: + model_base.BASE.metadata.create_all(engine) + SqlFixture._TABLES_ESTABLISHED = True + + def clear_tables(): + with engine.begin() as conn: + for table in reversed( + model_base.BASE.metadata.sorted_tables): + conn.execute(table.delete()) + + self.addCleanup(clear_tables) + + +class SqlTestCase(base.TestCase): + + def setUp(self): + config.set_db_defaults() + super(SqlTestCase, self).setUp() + self.useFixture(SqlFixture()) diff --git a/apmec/tests/unit/db/utils.py b/apmec/tests/unit/db/utils.py new file mode 100644 index 0000000..f944658 --- /dev/null +++ b/apmec/tests/unit/db/utils.py @@ -0,0 +1,202 @@ +# Copyright 2015 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import codecs +from datetime import datetime +import os +import yaml + + +DUMMY_mes_2_NAME = 'dummy_mes_2' + + +def _get_template(name): + filename = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../mem/infra_drivers/openstack/data/", name) + f = codecs.open(filename, encoding='utf-8', errors='strict') + return f.read() + +tosca_mead_openwrt = _get_template('test_tosca_openwrt.yaml') +config_data = _get_template('config_data.yaml') +update_config_data = _get_template('update_config_data.yaml') +mead_scale_tosca_template = _get_template('tosca_scale.yaml') +mead_alarm_respawn_tosca_template = _get_template( + 'test_tosca_mead_alarm_respawn.yaml') +mead_alarm_scale_tosca_template = _get_template( + 'test_tosca_mead_alarm_scale.yaml') +mead_alarm_multi_actions_tosca_template = _get_template( + 'test_tosca_mead_alarm_multi_actions.yaml') +mesd_tosca_template = yaml.safe_load(_get_template('tosca_mesd_template.yaml')) + + +def get_dummy_mead_obj(): + return {u'mead': {u'service_types': [{u'service_type': u'mead'}], + 'name': 'dummy_mead', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + u'attributes': {u'mead': yaml.safe_load( + tosca_mead_openwrt)}, + 'description': 'dummy_mead_description', + 'template_source': 'onboarded', + u'auth': {u'tenantName': u'admin', u'passwordCredentials': { + u'username': u'admin', u'password': u'devstack'}}}} + + +def get_dummy_mead_obj_inline(): + return {u'mead': {u'service_types': [{u'service_type': u'mead'}], + 'name': 'tmpl-koeak4tqgoqo8cr4-dummy_inline_mea', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + u'attributes': {u'mead': yaml.safe_load( + tosca_mead_openwrt)}, + 'template_source': 'inline', + u'auth': {u'tenantName': u'admin', u'passwordCredentials': { + u'username': u'admin', u'password': u'devstack'}}}} + + +def get_dummy_inline_mea_obj(): + return {'mea': {'description': 'dummy_inline_mea_description', + 'mead_template': yaml.safe_load(tosca_mead_openwrt), + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_inline_mea', + 'attributes': {}, + 'mead_id': None}} + + +def get_dummy_mea_obj(): + return {'mea': {'description': 'dummy_mea_description', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mea', + 'deleted_at': datetime.min, + 'attributes': {}, + 'mead_template': None}} + + +def get_dummy_mea_config_obj(): + return {'mea': {u'attributes': {u'config': {'vdus': {'vdu1': { + 'config': {'firewall': 'dummy_firewall_values'}}}}}}} + + +def get_dummy_device_obj(): + return {'status': 'PENDING_CREATE', 'instance_id': None, 'name': + u'test_openwrt', 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'mead': { + 'service_types': [{'service_type': u'mead', + 'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], + 'description': u'OpenWRT with services', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mgmt_driver': u'openwrt', + 'attributes': {u'mead': tosca_mead_openwrt}, + 'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', + 'name': u'openwrt_services'}, + 'mgmt_url': None, 'service_context': [], + 'attributes': {u'param_values': u''}, + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', + 'description': u'OpenWRT with services'} + + +def get_dummy_mea_config_attr(): + return {'status': 'PENDING_CREATE', 'instance_id': None, 'name': + u'test_openwrt', 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'mead': {'service_types': [{'service_type': u'mead', + 'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], + 'description': u'OpenWRT with services', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mgmt_driver': u'openwrt', + 'attributes': {u'mead': tosca_mead_openwrt}, + 'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name': + u'openwrt_services'}, 'mgmt_url': None, 'service_context': [], + 'attributes': {u'config': config_data}, + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', + 'description': u'OpenWRT with services'} + + +def get_dummy_mea_update_config(): + return {'mea': {'attributes': {'config': update_config_data}}} + + +def get_vim_obj(): + return {'vim': {'type': 'openstack', 'auth_url': + 'http://localhost:5000', 'vim_project': {'name': + 'test_project'}, 'auth_cred': {'username': 'test_user', + 'password': + 'test_password'}, + 'name': 'VIM0', + 'tenant_id': 'test-project'}} + + +def get_vim_auth_obj(): + return {'username': 'test_user', + 'password': 'test_password', + 'project_id': None, + 'project_name': 'test_project', + 'auth_url': 'http://localhost:5000/v3', + 'user_domain_name': 'default', + 'project_domain_name': 'default'} + +def get_dummy_mesd_obj(): + return {'mesd': {'description': 'dummy_mesd_description', + 'name': 'dummy_MESD', + 'tenant_id': u'8819a1542a5948b68f94d4be0fd50496', + 'attributes': {u'mesd': mesd_tosca_template}, + 'template_source': 'onboarded'}} + + +def get_dummy_mesd_obj_inline(): + return {'mesd': {'description': 'dummy_mesd_description_inline', + 'name': 'dummy_MESD_inline', + 'tenant_id': u'8819a1542a5948b68f94d4be0fd50496', + 'attributes': {u'mesd': mesd_tosca_template}, + 'template_source': 'inline'}} + + +def get_dummy_mes_obj(): + return {'mes': {'description': 'dummy_mes_description', + 'id': u'ba6bf017-f6f7-45f1-a280-57b073bf78ea', + 'mesd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mes', + 'attributes': { + 'param_values': {'mesd': {'vl1_name': 'net_mgmt', + 'vl2_name': 'net0'}}}}} + + +def get_dummy_mes_obj_inline(): + return {'mes': {'description': 'dummy_mes_description_inline', + 'id': u'ff35e3f0-0a11-4071-bce6-279fdf1c8bf9', + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mes_inline', + 'attributes': { + 'param_values': {'mesd': {'vl1_name': 'net_mgmt', + 'vl2_name': 'net0'}}}, + 'mesd_template': mesd_tosca_template}} + + +def get_dummy_mes_obj_2(): + return {'mes': {'description': 'dummy_mes_description', + 'id': u'ba6bf017-f6f7-45f1-a280-57b073bf78ea', + 'mesd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': DUMMY_mes_2_NAME, + 'attributes': { + 'param_values': {'mesd': {'vl1_name': 'net_mgmt', + 'vl2_name': 'net0'}}}}} diff --git a/apmec/tests/unit/extension_stubs.py b/apmec/tests/unit/extension_stubs.py new file mode 100644 index 0000000..ae0d373 --- /dev/null +++ b/apmec/tests/unit/extension_stubs.py @@ -0,0 +1,77 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from apmec.api import extensions +from apmec import wsgi + + +class StubExtension(object): + + def __init__(self, alias="stub_extension"): + self.alias = alias + + def get_name(self): + return "Stub Extension" + + def get_alias(self): + return self.alias + + def get_description(self): + return "" + + def get_namespace(self): + return "" + + def get_updated(self): + return "" + + +class StubPlugin(object): + + def __init__(self, supported_extensions=None): + self.supported_extension_aliases = supported_extensions \ + if supported_extensions else [] + + +class ExtensionExpectingPluginInterface(StubExtension): + """Expect plugin to implement all methods in StubPluginInterface. + + This extension expects plugin to implement all the methods defined + in StubPluginInterface. + """ + + def get_plugin_interface(self): + return StubPluginInterface + + +class StubPluginInterface(extensions.PluginInterface): + + @abc.abstractmethod + def get_foo(self, bar=None): + pass + + +class StubBaseAppController(wsgi.Controller): + + def index(self, request): + return "base app index" + + def show(self, request, id): + return {'fort': 'knox'} + + def update(self, request, id): + return {'uneditable': 'original_value'} diff --git a/apmec/tests/unit/extensions/__init__.py b/apmec/tests/unit/extensions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/extensions/extendedattribute.py b/apmec/tests/unit/extensions/extendedattribute.py new file mode 100644 index 0000000..8c7bd83 --- /dev/null +++ b/apmec/tests/unit/extensions/extendedattribute.py @@ -0,0 +1,54 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec.api import extensions + +EXTENDED_ATTRIBUTE = 'extended_attribute' +EXTENDED_ATTRIBUTES_2_0 = { + 'ext_test_resources': { + EXTENDED_ATTRIBUTE: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Extendedattribute(extensions.ExtensionDescriptor): + """Extension class supporting extended attribute for router.""" + + @classmethod + def get_name(cls): + return "Extended Extension Attributes" + + @classmethod + def get_alias(cls): + return "extended-ext-attr" + + @classmethod + def get_description(cls): + return "Provides extended_attr attribute to router" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-02-05T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/apmec/tests/unit/extensions/extensionattribute.py b/apmec/tests/unit/extensions/extensionattribute.py new file mode 100644 index 0000000..e55dd0e --- /dev/null +++ b/apmec/tests/unit/extensions/extensionattribute.py @@ -0,0 +1,102 @@ +# Copyright 2013 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from apmec.api import extensions +from apmec.api.v1 import base +from apmec import manager + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'ext_test_resources': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + } +} + + +class Extensionattribute(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Extension Test Resource" + + @classmethod + def get_alias(cls): + return "ext-obj-test" + + @classmethod + def get_description(cls): + return "Extension Test Resource" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-02-05T10:00:00-00:00" + + def update_attributes_map(self, attributes): + super(Extensionattribute, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.ApmecManager.get_plugin() + resource_name = 'ext_test_resource' + collection_name = resource_name + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) + + controller = base.create_resource(collection_name, + resource_name, + plugin, params, + member_actions={}) + + ex = extensions.ResourceExtension(collection_name, + controller, + member_actions={}) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class ExtensionObjectTestPluginBase(object): + + @abc.abstractmethod + def create_ext_test_resource(self, context, router): + pass + + @abc.abstractmethod + def get_ext_test_resource(self, context, id, fields=None): + pass diff --git a/apmec/tests/unit/extensions/foxinsocks.py b/apmec/tests/unit/extensions/foxinsocks.py new file mode 100644 index 0000000..6547e55 --- /dev/null +++ b/apmec/tests/unit/extensions/foxinsocks.py @@ -0,0 +1,109 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_serialization import jsonutils + +from apmec.api import extensions +from apmec import wsgi + + +class FoxInSocksController(wsgi.Controller): + + def index(self, request): + return "Try to say this Mr. Knox, sir..." + + +class FoxInSocksPluginInterface(extensions.PluginInterface): + + @abc.abstractmethod + def method_to_support_foxnsox_extension(self): + pass + + +class Foxinsocks(object): + + def __init__(self): + pass + + def get_plugin_interface(self): + return FoxInSocksPluginInterface + + def get_name(self): + return "Fox In Socks" + + def get_alias(self): + return "FOXNSOX" + + def get_description(self): + return "The Fox In Socks Extension" + + def get_namespace(self): + return "http://www.fox.in.socks/api/ext/pie/v1.0" + + def get_updated(self): + return "2011-01-22T13:25:27-06:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_actions(self): + return [extensions.ActionExtension('dummy_resources', + 'FOXNSOX:add_tweedle', + self._add_tweedle_handler), + extensions.ActionExtension('dummy_resources', + 'FOXNSOX:delete_tweedle', + self._delete_tweedle_handler)] + + def get_request_extensions(self): + request_exts = [] + + def _goose_handler(req, res): + # NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = jsonutils.loads(res.body) + data['FOXNSOX:googoose'] = req.GET.get('chewing') + res.body = jsonutils.dumps(data) + return res + + req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', + _goose_handler) + request_exts.append(req_ext1) + + def _bands_handler(req, res): + # NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = jsonutils.loads(res.body) + data['FOXNSOX:big_bands'] = 'Pig Bands!' + res.body = jsonutils.dumps(data) + return res + + req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', + _bands_handler) + request_exts.append(req_ext2) + return request_exts + + def _add_tweedle_handler(self, input_dict, req, id): + return "Tweedle {0} Added.".format( + input_dict['FOXNSOX:add_tweedle']['name']) + + def _delete_tweedle_handler(self, input_dict, req, id): + return "Tweedle {0} Deleted.".format( + input_dict['FOXNSOX:delete_tweedle']['name']) diff --git a/apmec/tests/unit/extensions/v2attributes.py b/apmec/tests/unit/extensions/v2attributes.py new file mode 100644 index 0000000..ab40f26 --- /dev/null +++ b/apmec/tests/unit/extensions/v2attributes.py @@ -0,0 +1,48 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + 'v2attrs:something': {'allow_post': False, + 'allow_put': False, + 'is_visible': True}, + 'v2attrs:something_else': {'allow_post': True, + 'allow_put': False, + 'is_visible': False}, + } +} + + +class V2attributes(object): + def get_name(self): + return "V2 Extended Attributes Example" + + def get_alias(self): + return "v2attrs" + + def get_description(self): + return "Demonstrates extended attributes on V2 core resources" + + def get_namespace(self): + return "http://docs.openstack.org/ext/examples/v2attributes/api/v1.0" + + def get_updated(self): + return "2012-07-18T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/apmec/tests/unit/mem/__init__.py b/apmec/tests/unit/mem/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/infra_drivers/__init__.py b/apmec/tests/unit/mem/infra_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/__init__.py b/apmec/tests/unit/mem/infra_drivers/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/config_data.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/config_data.yaml new file mode 100644 index 0000000..5468f9c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/config_data.yaml @@ -0,0 +1,12 @@ +vdus: + vdu1: + config: + firewall: | + package firewall + + config defaults + option syn_flood '1' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'REJECT' + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml new file mode 100644 index 0000000..2a55e02 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml @@ -0,0 +1,26 @@ + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +resources: + VDU1: + type: OS::Nova::Server + properties: + availability_zone: nova + user_data_format: SOFTWARE_CONFIG + config_drive: false + networks: + - port: { get_resource: CP1 } + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + metadata: {metering.mea: SG1} + VL1: + type: OS::Neutron::Net + CP1: + type: OS::Neutron::Port + properties: + network: net_mgmt + port_security_enabled: false +heat_template_version: 2013-05-23 +description: Apmec Scaling template \ No newline at end of file diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor.yaml new file mode 100644 index 0000000..f5112d6 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 10 + ram: 512 + vcpus: 2 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml new file mode 100644 index 0000000..1d03ab7 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.nano + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_defaults.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_defaults.yaml new file mode 100644 index 0000000..b33bf16 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_defaults.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 10 + ram: 512 + vcpus: 1 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_no_units.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_no_units.yaml new file mode 100644 index 0000000..817854c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_flavor_no_units.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 2 + ram: 512 + vcpus: 2 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_after_processed_image.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_after_processed_image.yaml new file mode 100644 index 0000000..19d9e52 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_after_processed_image.yaml @@ -0,0 +1,16 @@ +heat_template_version: 2013-05-23 +description: OpenWRT with services +outputs: {} +resources: + VDU1: + properties: + config_drive: true + flavor: m1.tiny + image: {get_resource: VDU1_image} + VDU1_image: + type: OS::Glance::Image + properties: + location: http://URL/v1/openwrt.qcow2 + container_format: bare + disk_format: raw + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_before_processed_image.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_before_processed_image.yaml new file mode 100644 index 0000000..f0dd37c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_image_before_processed_image.yaml @@ -0,0 +1,9 @@ +heat_template_version: 2013-05-23 +description: OpenWRT with services +outputs: {} +resources: + VDU1: + properties: + config_drive: true + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt.yaml new file mode 100644 index 0000000..03a809c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt.yaml @@ -0,0 +1,26 @@ +description: OpenWRT with services +heat_template_version: 2013-05-23 +outputs: + mgmt_ip-vdu1: + description: management ip address + value: + get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address] +resources: + vdu1: + properties: + availability_zone: nova + config_drive: true + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + metadata: {param0: key0, param1: key1} + networks: + - port: {get_resource: vdu1-net_mgmt-port} + - {network: net0} + - {network: net1} + type: OS::Nova::Server + vdu1-net_mgmt-port: + properties: + fixed_ips: [] + network: net_mgmt + port_security_enabled: false + type: OS::Neutron::Port diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml new file mode 100644 index 0000000..ac9a4a4 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml @@ -0,0 +1,41 @@ +description: Parameterized MEA descriptor for IP addresses +heat_template_version: 2013-05-23 +outputs: + mgmt_ip-vdu1: + description: management ip address + value: + get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address] +resources: + vdu1: + properties: + availability_zone: nova + config_drive: true + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + metadata: {param0: key0, param1: key1} + networks: + - port: {get_resource: vdu1-net_mgmt-port} + - port: {get_resource: vdu1-net0-port} + - port: {get_resource: vdu1-net1-port} + type: OS::Nova::Server + vdu1-net0-port: + properties: + fixed_ips: + - {ip_address: 10.10.0.98} + network: net0 + port_security_enabled: false + type: OS::Neutron::Port + vdu1-net1-port: + properties: + fixed_ips: + - {ip_address: 10.10.1.98} + network: net1 + port_security_enabled: false + type: OS::Neutron::Port + vdu1-net_mgmt-port: + properties: + fixed_ips: + - {ip_address: 192.168.120.98} + network: net_mgmt + port_security_enabled: false + type: OS::Neutron::Port diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_params.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_params.yaml new file mode 100644 index 0000000..6c56fe4 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_openwrt_params.yaml @@ -0,0 +1,35 @@ +description: Parameterized MEA descriptor +heat_template_version: 2013-05-23 +outputs: + mgmt_ip-vdu1: + description: management ip address + value: + get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address] +resources: + vdu1: + properties: + availability_zone: nova + config_drive: true + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + key_name: keyName + metadata: {param0: key0, param1: key1} + networks: + - port: {get_resource: vdu1-net_mgmt-port} + - {network: net0} + - {network: net1} + user_data: '#!/bin/sh + + echo "my hostname is `hostname`" > /tmp/hostname + + df -h > /home/cirros/diskinfo + + ' + user_data_format: RAW + type: OS::Nova::Server + vdu1-net_mgmt-port: + properties: + fixed_ips: [] + network: net_mgmt + port_security_enabled: false + type: OS::Neutron::Port diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_custom.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_custom.yaml new file mode 100644 index 0000000..a4561aa --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_custom.yaml @@ -0,0 +1,25 @@ + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +resources: + VDU1: + type: OS::Nova::Server + properties: + availability_zone: nova + user_data_format: SOFTWARE_CONFIG + config_drive: false + networks: + - port: { get_resource: CP1 } + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + VL1: + type: OS::Neutron::Net + CP1: + type: OS::Neutron::Port + properties: + network: net_mgmt + port_security_enabled: false +heat_template_version: 2013-05-23 +description: Apmec Scaling template \ No newline at end of file diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_main.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_main.yaml new file mode 100644 index 0000000..96049ee --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_scale_main.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2013-05-23 +description: 'sample-tosca-mead-scaling + +' + +parameters: {} +outputs: {} +resources: + SP1_group: + properties: + desired_capacity: 2 + max_size: 3 + min_size: 1 + cooldown: 60 + resource: {type: SP1_res.yaml} + type: OS::Heat::AutoScalingGroup + SP1_scale_in: + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: {get_resource: SP1_group} + cooldown: 60 + scaling_adjustment: -1 + type: OS::Heat::ScalingPolicy + SP1_scale_out: + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: {get_resource: SP1_group} + cooldown: 60 + scaling_adjustment: 1 + type: OS::Heat::ScalingPolicy diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml new file mode 100644 index 0000000..452512a --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml @@ -0,0 +1,41 @@ +heat_template_version: 2013-05-23 +description: 'An exception will be raised when having the mismatched metadata +(metadata is described in monitoring policy but unavailable in VDU properties). +' + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +parameters: {} +resources: + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: cirros-0.3.5-x86_64-disk + networks: + - port: {get_resource: CP1} + user_data_format: SOFTWARE_CONFIG + type: OS::Nova::Server + CP1: + properties: {network: net_mgmt, port_security_enabled: false} + type: OS::Neutron::Port + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 1 + ram: 512 + vcpus: 2 + vdu_hcpu_usage_respawning: + type: OS::Aodh::Alarm + properties: + description: utilization greater_than 50% + meter_name: cpu_util + threshold: 50 + period: 60 + statistic: avg + evaluation_periods: 1 + comparison_operator: gt + 'matching_metadata': {'metadata.user_metadata.mea': 'VDU1'} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml new file mode 100644 index 0000000..d61494a --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2013-05-23 +description: 'Demo example + +' + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +parameters: {} +resources: + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: cirros-0.3.5-x86_64-disk + networks: + - port: {get_resource: CP1} + user_data_format: SOFTWARE_CONFIG + metadata: {'metering.mea': 'VDU1'} + type: OS::Nova::Server + CP1: + properties: {network: net_mgmt, port_security_enabled: false} + type: OS::Neutron::Port + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 1 + ram: 512 + vcpus: 2 + vdu_hcpu_usage_respawning: + type: OS::Aodh::Alarm + properties: + description: utilization greater_than 50% + meter_name: cpu_util + threshold: 50 + period: 60 + statistic: avg + evaluation_periods: 1 + comparison_operator: gt + 'matching_metadata': {'metadata.user_metadata.mea': 'VDU1'} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml new file mode 100644 index 0000000..0d1ed30 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml @@ -0,0 +1,53 @@ +heat_template_version: 2013-05-23 +description: 'sample-tosca-mead-scaling + +' + +parameters: {} +outputs: {} +resources: + SP1_group: + properties: + cooldown: 60 + desired_capacity: 2 + max_size: 3 + min_size: 1 + resource: {type: SP1_res.yaml} + type: OS::Heat::AutoScalingGroup + SP1_scale_in: + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: {get_resource: SP1_group} + cooldown: 60 + scaling_adjustment: -1 + type: OS::Heat::ScalingPolicy + SP1_scale_out: + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: {get_resource: SP1_group} + cooldown: 60 + scaling_adjustment: 1 + type: OS::Heat::ScalingPolicy + + vdu_hcpu_usage_scaling_out: + type: OS::Aodh::Alarm + properties: + description: utilization greater_than 50% + meter_name: cpu_util + statistic: avg + period: 600 + evaluation_periods: 1 + threshold: 50 + matching_metadata: {'metadata.user_metadata.mea': SG1} + comparison_operator: gt + vdu_lcpu_usage_scaling_in: + type: OS::Aodh::Alarm + properties: + description: utilization less_than 10% + meter_name: cpu_util + statistic: avg + period: 600 + evaluation_periods: 1 + threshold: 10 + matching_metadata: {'metadata.user_metadata.mea': SG1} + comparison_operator: lt diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml new file mode 100644 index 0000000..312de13 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml @@ -0,0 +1,88 @@ +heat_template_version: 2013-05-23 + +description: > + VIP Template + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP2, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.nano + image: {get_resource: VDU1_image} + networks: + - port: + get_resource: CP1 + - port: + get_resource: CP2 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + fixed_ips: + - ip_address: 10.10.1.11 + allowed_address_pairs: + - ip_address: 10.10.1.12 + network: existing_network_1 + port_security_enabled: true + security_groups: + - default + + VCP1: + type: OS::Neutron::Port + properties: + fixed_ips: + - ip_address: 10.10.1.12 + network: existing_network_1 + port_security_enabled: true + security_groups: + - default + + CP2: + type: OS::Neutron::Port + properties: + fixed_ips: + - ip_address: 10.10.2.21 + allowed_address_pairs: + - ip_address: 10.10.2.22 + - ip_address: 10.10.2.23 + mac_address: fe:1a:29:d9:36:45 + mac_address: fe:1a:29:d9:36:45 + network: existing_network_2 + port_security_enabled: true + security_groups: + - default + + VCP2: + type: OS::Neutron::Port + properties: + fixed_ips: + - ip_address: 10.10.2.22 + network: existing_network_2 + port_security_enabled: true + security_groups: + - default + + VCP3: + type: OS::Neutron::Port + properties: + fixed_ips: + - ip_address: 10.10.2.23 + network: existing_network_2 + port_security_enabled: true + security_groups: + - default + + VDU1_image: + type: OS::Glance::Image + properties: + container_format: bare + disk_format: raw + location: http://URL/vRouterMEA.qcow2 + name: vm_image diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml new file mode 100644 index 0000000..100f340 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 10 + ram: 4096 + vcpus: 8 + extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:numa_nodes': 2, 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid'} +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml new file mode 100644 index 0000000..b921622 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 40 + ram: 4096 + vcpus: 6 + extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:numa_mem.1': 3072, 'hw:numa_mem.0': 1024, 'hw:numa_cpus.0': '0,1', 'hw:numa_cpus.1': '2,3,4,5', 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid', 'hw:numa_nodes': 2} +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml new file mode 100644 index 0000000..4d593b5 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 40 + ram: 4096 + vcpus: 6 + extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid'} +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml new file mode 100644 index 0000000..e22afe4 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 40 + ram: 4096 + vcpus: 6 + extra_specs: {'hw:mem_page_size': 'any'} +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml new file mode 100644 index 0000000..a89ed8c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 40 + ram: 4096 + vcpus: 6 + extra_specs: { 'hw:numa_mem.1': 3072, 'hw:numa_mem.0': 1024, 'hw:numa_cpus.0': '0,1', 'hw:numa_cpus.1': '2,3,4,5', 'hw:numa_nodes': 2} +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml new file mode 100644 index 0000000..e2e657e --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: {get_resource: VDU1_flavor} + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + VDU1_flavor: + type: OS::Nova::Flavor + properties: + disk: 40 + ram: 4096 + vcpus: 6 + extra_specs: {'hw:numa_nodes': 2 } +outputs: {} diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml new file mode 100644 index 0000000..317ddb7 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml @@ -0,0 +1,45 @@ +description: > + Generic VDU with parameterized image and flavor + +heat_template_version: 2013-05-23 +outputs: + mgmt_ip-VDU1: + value: {get_attr: [CP1, fixed_ips, 0, ip_address]} +parameters: + flavor: + constraints: + - allowed_values: [m1.tiny, m1.small, m1.medium, m1.large, m1.large] + default: m1.large + description: Flavor name for the server + type: string + image: + default: cirros + description: Image name for the server + type: string +resources: + CP1: + properties: + network: net_mgmt + port_security_enabled: False + type: OS::Neutron::Port + CP2: + properties: + network: pkt_in + port_security_enabled: False + type: OS::Neutron::Port + CP3: + properties: + network: pkt_out + port_security_enabled: False + type: OS::Neutron::Port + VDU1: + properties: + config_drive: False + flavor: { get_param: flavor } + image: { get_param: image } + networks: + - port: { get_resource: CP1 } + - port: { get_resource: CP2 } + - port: { get_resource: CP3 } + user_data_format: SOFTWARE_CONFIG + type: OS::Nova::Server diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_image.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_image.yaml new file mode 100644 index 0000000..6e6ddff --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_image.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2013-05-23 + +description: > + OpenWRT with services + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.nano + image: {get_resource: VDU1_image} + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + + VDU1_image: + type: OS::Glance::Image + properties: + container_format: bare + disk_format: raw + location: http://URL/vRouterMEA.qcow2 + name: vm_image diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml new file mode 100644 index 0000000..935bb9c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml @@ -0,0 +1,37 @@ +heat_template_version: 2013-05-23 + +description: > + SecurityGroup Template + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.nano + image: {get_resource: VDU1_image} + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + port_security_enabled: true + mac_address: fe:1a:29:d9:36:43 + fixed_ips: + - ip_address: 10.10.1.12 + VDU1_image: + type: OS::Glance::Image + properties: + container_format: bare + disk_format: raw + location: http://URL/vRouterMEA.qcow2 + name: vm_image diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml new file mode 100644 index 0000000..3451f31 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2013-05-23 + +description: > + SRIOV and management port example + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +parameters: {} +resources: + CP1: + properties: {'binding:vnic_type': direct, network: net-mgmt} + type: OS::Neutron::Port + CP2: + properties: {network: net0} + type: OS::Neutron::Port + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: numa-sriov + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + networks: + - port: {get_resource: CP1} + - port: {get_resource: CP2} + type: OS::Nova::Server diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml new file mode 100644 index 0000000..3a668b3 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2013-05-23 +description: 'Monitoring for multiple VDUs + +' +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + mgmt_ip-VDU2: + value: + get_attr: [CP2, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + networks: + - port: {get_resource: CP1} + user_data_format: SOFTWARE_CONFIG + type: OS::Nova::Server + + CP1: + properties: {network: net_mgmt, port_security_enabled: false} + type: OS::Neutron::Port + + VDU2: + properties: + availability_zone: nova + config_drive: false + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + networks: + - port: {get_resource: CP2} + user_data_format: SOFTWARE_CONFIG + type: OS::Nova::Server + + CP2: + properties: {network: net_mgmt, port_security_enabled: false} + type: OS::Neutron::Port diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt.yaml new file mode 100644 index 0000000..75dbdb0 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2013-05-23 +description: 'OpenWRT with services + + ' +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.tiny + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + port_security_enabled: false +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml new file mode 100644 index 0000000..7d42cfa --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2013-05-23 +description: 'OpenWRT with services + + ' +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.tiny + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + value_specs: {port_security_enabled: false} +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml new file mode 100644 index 0000000..b10c0d9 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2013-05-23 +description: 'OpenWRT with services + + ' +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.tiny + image: OpenWRT + networks: + - port: + get_resource: CP1 + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + port_security_enabled: false +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_security_groups.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_security_groups.yaml new file mode 100644 index 0000000..c59d41b --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_security_groups.yaml @@ -0,0 +1,38 @@ +heat_template_version: 2013-05-23 + +description: > + SecurityGroup Template + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.nano + image: {get_resource: VDU1_image} + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + port_security_enabled: true + security_groups: + - default + - test_secgrp + + VDU1_image: + type: OS::Glance::Image + properties: + container_format: bare + disk_format: raw + location: http://URL/vRouterMEA.qcow2 + name: vm_image diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_sriov.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_sriov.yaml new file mode 100644 index 0000000..d10c8fc --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_sriov.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2013-05-23 + +description: > + SRIOV example + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +parameters: {} +resources: + CP1: + properties: {name: sriov, network: net-mgmt} + type: OS::Neutron::Port + CP2: + properties: {'binding:vnic_type': direct, network: sr3010} + type: OS::Neutron::Port + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: numa-sriov + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + networks: + - port: {get_resource: CP1} + - port: {get_resource: CP2} + type: OS::Nova::Server diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnfc.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnfc.yaml new file mode 100644 index 0000000..1bb38e9 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnfc.yaml @@ -0,0 +1,36 @@ +heat_template_version: 2013-05-23 +parameters: {} +resources: + VDU1: + type: OS::Nova::Server + properties: + config_drive: false + flavor: m1.small + image: Fedora + networks: + - port: + get_resource: CP1 + user_data_format: SOFTWARE_CONFIG + CP1: + type: OS::Neutron::Port + properties: + network: existing_network_1 + port_security_enabled: false + firewall_meac_create_config: + type: OS::Heat::SoftwareConfig + properties: + config: 'echo "Test case for Apmec";' + group: script + firewall_meac_create_deploy: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: firewall_meac_create_config} + server: {get_resource: VDU1} + depends_on: + - VDU1 + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml new file mode 100644 index 0000000..cf9eb92 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2013-05-23 + +description: > + VNIC Normal Port example + +outputs: + mgmt_ip-VDU1: + value: + get_attr: [CP1, fixed_ips, 0, ip_address] +parameters: {} +resources: + CP1: + properties: {network: net-mgmt} + type: OS::Neutron::Port + CP2: + properties: {'binding:vnic_type': normal, network: net0} + type: OS::Neutron::Port + VDU1: + properties: + availability_zone: nova + config_drive: false + flavor: m1.small + image: OpenWRT + user_data_format: SOFTWARE_CONFIG + networks: + - port: {get_resource: CP1} + - port: {get_resource: CP2} + type: OS::Nova::Server diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml new file mode 100644 index 0000000..9331861 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml @@ -0,0 +1,105 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: VIP Template + +metadata: + template_name: vipTemplate + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + artifacts: + vm_image: + type: tosca.artifacts.Deployment.Image.VM + file: http://URL/vRouterMEA.qcow2 + properties: + flavor: m1.nano + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + ip_address: 10.10.1.11 + anti_spoofing_protection: true + allowed_address_pairs: + - ip_address: 10.10.1.12 + security_groups: + - default + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VCP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + ip_address: 10.10.1.12 + anti_spoofing_protection: true + security_groups: + - default + requirements: + - virtualLink: + node: VL1 + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + mac_address: fe:1a:29:d9:36:45 + ip_address: 10.10.2.21 + anti_spoofing_protection: true + allowed_address_pairs: + - ip_address: 10.10.2.22 + - ip_address: 10.10.2.23 + mac_address: fe:1a:29:d9:36:45 + security_groups: + - default + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + VCP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + ip_address: 10.10.2.22 + anti_spoofing_protection: true + security_groups: + - default + requirements: + - virtualLink: + node: VL2 + VCP3: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + ip_address: 10.10.2.23 + anti_spoofing_protection: true + security_groups: + - default + requirements: + - virtualLink: + node: VL2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_2 + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor.yaml new file mode 100644 index 0000000..f3c0dec --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 2 + disk_size: 10 GB + mem_size: 512 MB + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml new file mode 100644 index 0000000..6cae5a3 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 2 + disk_size: 10 GB + mem_size: 512 MB + properties: + image: OpenWRT + mgmt_driver: openwrt + flavor: m1.nano + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml new file mode 100644 index 0000000..486c041 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml @@ -0,0 +1,41 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml new file mode 100644 index 0000000..63c2dc4 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml @@ -0,0 +1,43 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 2 + disk_size: 2048 + mem_size: 512 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_image.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_image.yaml new file mode 100644 index 0000000..5be51b6 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_image.yaml @@ -0,0 +1,42 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + artifacts: + vm_image: + type: tosca.artifacts.Deployment.Image.VM + file: http://URL/vRouterMEA.qcow2 + properties: + flavor: m1.nano + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mac_ip.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mac_ip.yaml new file mode 100644 index 0000000..277b688 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mac_ip.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: SecurityGroup Template + +metadata: + template_name: SecurityGroup + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + artifacts: + vm_image: + type: tosca.artifacts.Deployment.Image.VM + file: http://URL/vRouterMEA.qcow2 + properties: + flavor: m1.nano + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + mac_address: fe:1a:29:d9:36:43 + ip_address: 10.10.1.12 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_meac.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_meac.yaml new file mode 100644 index 0000000..5f13b82 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_meac.yaml @@ -0,0 +1,39 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +metadata: + template_name: sample-tosca-mead-for-meac + +topology_template: + node_templates: + firewall_meac: + type: tosca.nodes.mec.MEAC.Apmec + requirements: + - host: VDU1 + interfaces: + Standard: + create: install_meac.sh + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: Fedora + flavor: m1.small + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_multi_actions.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_multi_actions.yaml new file mode 100644 index 0000000..425181f --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_multi_actions.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + mon_policy_multi_actions: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + actions: [respawn, log] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_respawn.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_respawn.yaml new file mode 100644 index 0000000..7090eb1 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_respawn.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: [respawn] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_scale.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_scale.yaml new file mode 100644 index 0000000..d21f8d3 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_mead_alarm_scale.yaml @@ -0,0 +1,67 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + properties: + increment: 1 + cooldown: 120 + min_instances: 1 + max_instances: 3 + default_instances: 2 + targets: [VDU1] + + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_scaling_out: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: SG1 + action: [SP1] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt.yaml new file mode 100644 index 0000000..0a4e40a --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: OpenWRT + flavor: m1.tiny + config: | + param0: key1 + param1: key2 + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml new file mode 100644 index 0000000..0385051 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml @@ -0,0 +1,49 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: OpenWRT + flavor: m1.tiny + config: | + param0: key1 + param1: key2 + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_security_groups.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_security_groups.yaml new file mode 100644 index 0000000..84ec89c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/test_tosca_security_groups.yaml @@ -0,0 +1,46 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: SecurityGroup Template + +metadata: + template_name: SecurityGroup + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + artifacts: + vm_image: + type: tosca.artifacts.Deployment.Image.VM + file: http://URL/vRouterMEA.qcow2 + properties: + flavor: m1.nano + mgmt_driver: noop + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + security_groups: + - default + - test_secgrp + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: existing_network_1 + vendor: ACME diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_metadata.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_metadata.yaml new file mode 100644 index 0000000..af90732 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_metadata.yaml @@ -0,0 +1,60 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: > + An exception will be raised when having the mismatched metadata + (metadata is described in monitoring policy but unavailable in + VDU properties). + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: Ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 60 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: '' diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_respawn.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_respawn.yaml new file mode 100644 index 0000000..7320426 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_respawn.yaml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: Ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 60 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: '' diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_scale.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_scale.yaml new file mode 100644 index 0000000..d5a295b --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_alarm_scale.yaml @@ -0,0 +1,78 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: sample-tosca-mead-scaling + +metadata: + template_name: sample-tosca-mead-scaling + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + flavor: m1.tiny + metadata: {metering.mea: SG1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1] + properties: + increment: 1 + cooldown: 60 + min_instances: 1 + max_instances: 3 + default_instances: 2 + + - vdu_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_scaling_out: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: SG1 + action: [SP1] + + vdu_lcpu_usage_scaling_in: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 10 + constraint: utilization less_than 10% + period: 600 + evaluations: 1 + method: average + comparison_operator: lt + metadata: SG1 + action: [SP1] diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_block_storage.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_block_storage.yaml new file mode 100644 index 0000000..498bc41 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_block_storage.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VB1: + type: tosca.nodes.BlockStorage.Apmec + properties: + size: 1 GB + image: cirros-0.3.5-x86_64-disk + + CB1: + type: tosca.nodes.BlockStorageAttachment + properties: + location: /dev/vdb + requirements: + - virtualBinding: + node: VDU1 + - virtualAttachment: + node: VB1 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml new file mode 100644 index 0000000..b683162 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml @@ -0,0 +1,36 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 8 + disk_size: 10 GB + mem_size: 4096 MB + mem_page_size: any + numa_node_count: 2 + cpu_allocation: + cpu_affinity: dedicated + thread_allocation: avoid + socket_count: 2 + thread_count: 2 + core_count: 2 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml new file mode 100644 index 0000000..ab16750 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 6 + disk_size: 40 GB + mem_size: 4096 MB + mem_page_size: any + numa_nodes: + node0: + id: 0 + vcpus: [0, 1] + mem_size: 1024 + node1: + id: 1 + vcpus: [2,3,4,5] + mem_size: 3072 + cpu_allocation: + cpu_affinity: dedicated + thread_allocation: avoid + socket_count: 2 + thread_count: 2 + core_count: 2 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml new file mode 100644 index 0000000..8cf2830 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml @@ -0,0 +1,34 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 6 + disk_size: 40 GB + mem_size: 4096 MB + cpu_allocation: + cpu_affinity: dedicated + thread_allocation: avoid + socket_count: 2 + thread_count: 2 + core_count: 2 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml new file mode 100644 index 0000000..2e94cea --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml @@ -0,0 +1,29 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 6 + disk_size: 40 GB + mem_size: 4096 MB + mem_page_size: any + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml new file mode 100644 index 0000000..7911dcb --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml @@ -0,0 +1,37 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 6 + disk_size: 40 GB + mem_size: 4096 MB + numa_nodes: + node0: + id: 0 + vcpus: [0, 1] + mem_size: 1024 + node1: + id: 1 + vcpus: [2,3,4,5] + mem_size: 3072 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml new file mode 100644 index 0000000..f70e688 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml @@ -0,0 +1,38 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 6 + disk_size: 40 GB + mem_size: 4096 MB + numa_nodes: + node0: + id: 0 + vcpus: [0, 1] + mem_size: 1024 + node1: + id: 1 + vcpus: [2,3,4,5] + mem_size: 3072 + numa_node_count: 2 + properties: + image: OpenWRT + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_generic_mead_params.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_generic_mead_params.yaml new file mode 100644 index 0000000..40c0d9c --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_generic_mead_params.yaml @@ -0,0 +1,86 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Generic VDU with parameterized image and flavor + +metadata: + template_name: OpenWRT + +topology_template: + + inputs: + flavor: + type: string + description: Flavor name for the server + constraints: + - valid_values: [ m1.tiny, m1.small, m1.medium, m1.large, m1.large ] + default: m1.tiny + + image: + type: string + description: Image name for the server + default: OpenWRT + + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: { get_input: image } + flavor: { get_input: flavor } + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: pkt_in + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: pkt_out + vendor: Apmec + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml new file mode 100644 index 0000000..c4a17ac --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: SRIOV and management port example + +metadata: + template_name: sample-SRIOV-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: OpenWRT + flavor: numa-sriov + availability_zone: nova + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + type: sriov + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net-mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml new file mode 100644 index 0000000..67f5c70 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml @@ -0,0 +1,74 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Monitoring for multiple VDUs + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_nsd_template.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_nsd_template.yaml new file mode 100644 index 0000000..d97b2d9 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_nsd_template.yaml @@ -0,0 +1,38 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +imports: + - MEA1 + - MEA2 + +topology_template: + inputs: + vl1_name: + type: string + description: name of VL1 virtuallink + default: net_mgmt + vl2_name: + type: string + description: name of VL2 virtuallink + default: net0 + + node_templates: + MEA1: + type: tosca.nodes.mec.MEA1 + requirements: + - virtualLink1: VL1 + - virtualLink2: VL2 + + MEA2: + type: tosca.nodes.mec.MEA2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl1_name} + vendor: apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: {get_input: vl2_name} + vendor: apmec + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_scale.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_scale.yaml new file mode 100644 index 0000000..02ba990 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_scale.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: sample-tosca-mead-scaling + +metadata: + template_name: sample-tosca-mead-scaling + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + flavor: m1.tiny + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1] + properties: + increment: 1 + cooldown: 60 + min_instances: 1 + max_instances: 3 + default_instances: 2 + diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_sriov.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_sriov.yaml new file mode 100644 index 0000000..7bb5e33 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_sriov.yaml @@ -0,0 +1,59 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: SRIOV example + +metadata: + template_name: sample-SRIOV-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: OpenWRT + flavor: numa-sriov + availability_zone: nova + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + name: sriov + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + type: sriov + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net-mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: sr3010 + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_vnic_port.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_vnic_port.yaml new file mode 100644 index 0000000..fd257f2 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/tosca_vnic_port.yaml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: VNIC Normal Port example + +metadata: + template_name: sample-vnic-normal-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: OpenWRT + flavor: m1.small + availability_zone: nova + mgmt_driver: openwrt + monitoring_policy: + name: ping + actions: + failure: respawn + parameters: + count: 3 + interval: 10 + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + type: vnic + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net-mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/data/update_config_data.yaml b/apmec/tests/unit/mem/infra_drivers/openstack/data/update_config_data.yaml new file mode 100644 index 0000000..5d95165 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/data/update_config_data.yaml @@ -0,0 +1,11 @@ +vdus: + vdu1: + config: + firewall: | + package firewall + + config defaults + option syn_flood '3' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'ACCEPT' diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack.py b/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack.py new file mode 100644 index 0000000..dc1c245 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack.py @@ -0,0 +1,463 @@ +# Copyright 2015 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import codecs +import json +import mock +import os +import yaml + +from apmec import context +from apmec.extensions import mem +from apmec.tests.unit import base +from apmec.tests.unit.db import utils +from apmec.mem.infra_drivers.openstack import openstack + + +class FakeHeatClient(mock.Mock): + + class Stack(mock.Mock): + stack_status = 'CREATE_COMPLETE' + outputs = [{u'output_value': u'192.168.120.31', u'description': + u'management ip address', u'output_key': u'mgmt_ip-vdu1'}] + + def create(self, *args, **kwargs): + return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}} + + def get(self, id): + return self.Stack() + + +def _get_template(name): + filename = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "data/", name) + f = codecs.open(filename, encoding='utf-8', errors='strict') + return f.read() + + +class TestOpenStack(base.TestCase): + hot_template = _get_template('hot_openwrt.yaml') + hot_param_template = _get_template('hot_openwrt_params.yaml') + hot_ipparam_template = _get_template('hot_openwrt_ipparams.yaml') + tosca_mead_openwrt = _get_template('test_tosca_openwrt.yaml') + config_data = _get_template('config_data.yaml') + + def setUp(self): + super(TestOpenStack, self).setUp() + self.context = context.get_admin_context() + self.infra_driver = openstack.OpenStack() + self._mock_heat_client() + self.addCleanup(mock.patch.stopall) + + def _mock_heat_client(self): + self.heat_client = mock.Mock(wraps=FakeHeatClient()) + fake_heat_client = mock.Mock() + fake_heat_client.return_value = self.heat_client + self._mock( + 'apmec.mem.infra_drivers.openstack.heat_client.HeatClient', + fake_heat_client) + + def _mock(self, target, new=mock.DEFAULT): + patcher = mock.patch(target, new) + return patcher.start() + + def _get_mead(self, template): + return {'mead': {'attributes': {'mead': template}}} + + def _get_expected_mead(self, template): + return {'attributes': {'mead': template}, + 'description': 'OpenWRT with services', + 'mgmt_driver': 'openwrt', 'name': 'OpenWRT', + 'service_types': [{'service_type': 'mead', + 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], + 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437', + 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec'} + + def _get_expected_fields(self): + return {'stack_name': + 'apmec.mem.infra_drivers.openstack.openstack_OpenStack' + '-eb84260e-5ff7-4332-b032-50a14d6c1123', 'template': + self.hot_template} + + def _get_expected_fields_user_data(self): + return {'stack_name': + 'apmec.mem.infra_drivers.openstack.openstack_OpenStack' + '-18685f68-2b2a-4185-8566-74f54e548811', + 'template': self.hot_param_template} + + def _get_expected_fields_ipaddr_data(self): + return {'stack_name': + 'apmec.mem.infra_drivers.openstack.openstack_OpenStack' + '-d1337add-d5a1-4fd4-9447-bb9243c8460b', + 'template': self.hot_ipparam_template} + + def _get_expected_mea_wait_obj(self, param_values=''): + return {'status': 'PENDING_CREATE', + 'instance_id': None, + 'name': u'test_openwrt', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'mead': { + 'service_types': [{ + 'service_type': u'mead', + 'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], + 'description': u'OpenWRT with services', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mgmt_driver': u'openwrt', + 'attributes': {u'mead': self.tosca_mead_openwrt}, + 'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', + 'name': u'OpenWRT'}, + 'mgmt_url': '{"vdu1": "192.168.120.31"}', + 'service_context': [], + 'attributes': {u'param_values': param_values}, + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', + 'description': u'OpenWRT with services'} + + def _get_expected_mea_update_obj(self): + return {'status': 'PENDING_CREATE', 'instance_id': None, 'name': + u'test_openwrt', 'tenant_id': + u'ad7ebc56538745a08ef7c5e97f8bd437', 'mead_id': + u'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'mead': { + 'service_types': [{'service_type': u'mead', 'id': + u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], 'description': + u'OpenWRT with services', 'tenant_id': + u'ad7ebc56538745a08ef7c5e97f8bd437', 'mgmt_driver': u'openwrt', + 'attributes': {u'mead': self.tosca_mead_openwrt}, + 'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name': + u'openwrt_services'}, 'mgmt_url': None, 'service_context': [], + 'attributes': {'config': utils.update_config_data}, + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', 'description': + u'OpenWRT with services'} + + def _get_expected_active_mea(self): + return {'status': 'ACTIVE', + 'instance_id': None, + 'name': u'test_openwrt', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'mead': { + 'service_types': [{ + 'service_type': u'mead', + 'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], + 'description': u'OpenWRT with services', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'mgmt_driver': u'openwrt', + 'infra_driver': u'heat', + 'attributes': {u'mead': self.tosca_mead_openwrt}, + 'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', + 'name': u'openwrt_services'}, + 'mgmt_url': '{"vdu1": "192.168.120.31"}', + 'service_context': [], + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', + 'description': u'OpenWRT with services'} + + def test_delete(self): + mea_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738' + self.infra_driver.delete(plugin=None, context=self.context, + mea_id=mea_id, + auth_attr=utils.get_vim_auth_obj()) + self.heat_client.delete.assert_called_once_with(mea_id) + + def test_update(self): + mea_obj = utils.get_dummy_mea_config_attr() + mea_config_obj = utils.get_dummy_mea_update_config() + expected_mea_update = self._get_expected_mea_update_obj() + mea_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738' + self.infra_driver.update(plugin=None, context=self.context, + mea_id=mea_id, mea_dict=mea_obj, + mea=mea_config_obj, + auth_attr=utils.get_vim_auth_obj()) + expected_mea_update['attributes']['config'] = yaml.safe_load( + expected_mea_update['attributes']['config']) + mea_obj['attributes']['config'] = yaml.safe_load(mea_obj['attributes'][ + 'config']) + self.assertEqual(expected_mea_update, mea_obj) + + def _get_expected_fields_tosca(self, template): + return {'stack_name': + 'apmec.mem.infra_drivers.openstack.openstack_OpenStack' + '-eb84260e' + '-5ff7-4332-b032-50a14d6c1123', + 'template': _get_template(template)} + + def _get_expected_tosca_mea(self, + tosca_tpl_name, + hot_tpl_name, + param_values='', + is_monitor=True, + multi_vdus=False): + tosca_tpl = _get_template(tosca_tpl_name) + exp_tmpl = self._get_expected_mead(tosca_tpl) + tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name)) + dvc = { + 'mead': exp_tmpl, + 'description': u'OpenWRT with services', + 'attributes': { + 'heat_template': tosca_hw_dict, + 'param_values': param_values + }, + 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', + 'instance_id': None, + 'mgmt_url': None, + 'name': u'test_openwrt', + 'service_context': [], + 'status': 'PENDING_CREATE', + 'mead_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437' + } + # Add monitoring attributes for those yaml, which are having it + if is_monitor: + if multi_vdus: + dvc['attributes'].update( + {'monitoring_policy': '{"vdus": {"VDU1": {"ping": ' + '{"name": "ping", "actions": ' + '{"failure": "respawn"}, ' + '"parameters": {"count": 3, ' + '"interval": 10}, ' + '"monitoring_params": ' + '{"count": 3, "interval": 10}}}, ' + '"VDU2": {"ping": {"name": "ping", ' + '"actions": {"failure": "respawn"}, ' + '"parameters": {"count": 3, ' + '"interval": 10}, ' + '"monitoring_params": {"count": 3, ' + '"interval": 10}}}}}'}) + else: + dvc['attributes'].update( + {'monitoring_policy': '{"vdus": {"VDU1": {"ping": ' + '{"name": "ping", "actions": ' + '{"failure": "respawn"}, ' + '"parameters": {"count": 3, ' + '"interval": 10}, ' + '"monitoring_params": ' + '{"count": 3, ' + '"interval": 10}}}}}'}) + + return dvc + + def _get_dummy_tosca_mea(self, template, input_params=''): + + tosca_template = _get_template(template) + mea = utils.get_dummy_device_obj() + dtemplate = self._get_expected_mead(tosca_template) + + mea['mead'] = dtemplate + mea['attributes'] = {} + mea['attributes']['param_values'] = input_params + return mea + + def _test_assert_equal_for_tosca_templates(self, + tosca_tpl_name, + hot_tpl_name, + input_params='', + files=None, + is_monitor=True, + multi_vdus=False): + mea = self._get_dummy_tosca_mea(tosca_tpl_name, input_params) + expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738' + expected_fields = self._get_expected_fields_tosca(hot_tpl_name) + expected_mea = self._get_expected_tosca_mea(tosca_tpl_name, + hot_tpl_name, + input_params, + is_monitor, + multi_vdus) + result = self.infra_driver.create(plugin=None, context=self.context, + mea=mea, + auth_attr=utils.get_vim_auth_obj()) + actual_fields = self.heat_client.create.call_args[0][0] + actual_fields["template"] = yaml.safe_load(actual_fields["template"]) + expected_fields["template"] = \ + yaml.safe_load(expected_fields["template"]) + + if files: + for k, v in actual_fields["files"].items(): + actual_fields["files"][k] = yaml.safe_load(v) + + expected_fields["files"] = {} + for k, v in files.items(): + expected_fields["files"][k] = yaml.safe_load(_get_template(v)) + + self.assertEqual(expected_fields, actual_fields) + mea["attributes"]["heat_template"] = yaml.safe_load( + mea["attributes"]["heat_template"]) + self.heat_client.create.assert_called_once_with(expected_fields) + self.assertEqual(expected_result, result) + + if files: + expected_fields["files"] = {} + for k, v in files.items(): + expected_mea["attributes"][k] = yaml.safe_load( + _get_template(v)) + mea["attributes"][k] = yaml.safe_load( + mea["attributes"][k]) + expected_mea["attributes"]['scaling_group_names'] = { + 'SP1': 'SP1_group'} + mea["attributes"]['scaling_group_names'] = json.loads( + mea["attributes"]['scaling_group_names'] + ) + self.assertEqual(expected_mea, mea) + + def test_create_tosca(self): + # self.skipTest("Not ready yet") + self._test_assert_equal_for_tosca_templates('test_tosca_openwrt.yaml', + 'hot_tosca_openwrt.yaml') + + def test_create_tosca_with_userdata(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_openwrt_userdata.yaml', + 'hot_tosca_openwrt_userdata.yaml') + + def test_create_tosca_with_new_flavor(self): + self._test_assert_equal_for_tosca_templates('test_tosca_flavor.yaml', + 'hot_flavor.yaml') + + def test_create_tosca_with_new_flavor_with_defaults(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_flavor_defaults.yaml', + 'hot_flavor_defaults.yaml') + + def test_create_tosca_with_flavor_and_capabilities(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_flavor_and_capabilities.yaml', + 'hot_flavor_and_capabilities.yaml') + + def test_create_tosca_with_flavor_no_units(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_flavor_no_units.yaml', + 'hot_flavor_no_units.yaml') + + def test_create_tosca_with_flavor_extra_specs_all_numa_count(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_all_numa_count.yaml', + 'hot_tosca_flavor_all_numa_count.yaml') + + def test_create_tosca_with_flavor_extra_specs_all_numa_nodes(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_all_numa_nodes.yaml', + 'hot_tosca_flavor_all_numa_nodes.yaml') + + def test_create_tosca_with_flavor_extra_specs_numa_node_count_trumps(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_numa_nodes_count.yaml', + 'hot_tosca_flavor_numa_nodes_count.yaml') + + def test_create_tosca_with_flavor_extra_specs_huge_pages(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_huge_pages.yaml', + 'hot_tosca_flavor_huge_pages.yaml') + + def test_create_tosca_with_flavor_extra_specs_cpu_allocations(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_cpu_allocations.yaml', + 'hot_tosca_flavor_cpu_allocations.yaml') + + def test_create_tosca_with_flavor_extra_specs_numa_nodes(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_flavor_numa_nodes.yaml', + 'hot_tosca_flavor_numa_nodes.yaml') + + def test_create_tosca_with_new_image(self): + self._test_assert_equal_for_tosca_templates('test_tosca_image.yaml', + 'hot_tosca_image.yaml') + + def test_create_tosca_sriov(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_sriov.yaml', + 'hot_tosca_sriov.yaml' + ) + + def test_create_tosca_vnic_normal(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_vnic_port.yaml', + 'hot_tosca_vnic_normal.yaml' + ) + + def test_create_tosca_mgmt_sriov_port(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_mgmt_sriov.yaml', + 'hot_tosca_mgmt_sriov.yaml' + ) + + def test_tosca_params(self): + input_params = 'image: cirros\nflavor: m1.large' + self._test_assert_equal_for_tosca_templates( + 'tosca_generic_mead_params.yaml', + 'hot_tosca_generic_mead_params.yaml', + input_params + ) + + def test_create_tosca_scale(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_scale.yaml', + 'hot_scale_main.yaml', + files={'SP1_res.yaml': 'hot_scale_custom.yaml'}, + is_monitor=False + ) + + def test_get_resource_info(self): + mea_obj = self._get_expected_active_mea() + self.assertRaises(mem.InfraDriverUnreachable, + self.infra_driver.get_resource_info, + plugin=None, context=self.context, mea_info=mea_obj, + auth_attr=utils.get_vim_auth_obj(), + region_name=None) + + def test_create_port_with_security_groups(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_security_groups.yaml', + 'hot_tosca_security_groups.yaml' + ) + + def test_create_port_with_allowed_address_pairs(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_allowed_address_pairs.yaml', + 'hot_tosca_allowed_address_pairs.yaml' + ) + + def test_create_port_with_mac_and_ip(self): + self._test_assert_equal_for_tosca_templates( + 'test_tosca_mac_ip.yaml', + 'hot_tosca_mac_ip.yaml' + ) + + def test_create_tosca_alarm_respawn(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_alarm_respawn.yaml', + 'hot_tosca_alarm_respawn.yaml', + is_monitor=False + ) + + def test_create_tosca_alarm_scale(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_alarm_scale.yaml', + 'hot_tosca_alarm_scale.yaml', + files={'SP1_res.yaml': 'hot_alarm_scale_custom.yaml'}, + is_monitor=False + ) + + def test_create_tosca_with_alarm_monitoring_not_matched(self): + self.assertRaises(mem.MetadataNotMatched, + self._test_assert_equal_for_tosca_templates, + 'tosca_alarm_metadata.yaml', + 'hot_tosca_alarm_metadata.yaml', + is_monitor=False + ) + + def test_create_tosca_monitoring_multi_vdus(self): + self._test_assert_equal_for_tosca_templates( + 'tosca_monitoring_multi_vdu.yaml', + 'hot_tosca_monitoring_multi_vdu.yaml', + multi_vdus=True + ) diff --git a/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack_driver.py b/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack_driver.py new file mode 100644 index 0000000..7316cc3 --- /dev/null +++ b/apmec/tests/unit/mem/infra_drivers/openstack/test_openstack_driver.py @@ -0,0 +1,41 @@ +# Copyright 2017 99cloud, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from apmec.extensions import mem +from apmec.tests.unit import base +from apmec.mem.infra_drivers.openstack import openstack + + +class TestOpenStack(base.TestCase): + + @mock.patch("apmec.mem.infra_drivers.openstack.heat_client.HeatClient") + def test_create_wait_with_heat_connection_exception(self, mocked_hc): + stack = {"stack_status", "CREATE_IN_PROGRESS"} + mocked_hc.get.side_effect = [stack, Exception("any stuff")] + openstack_driver = openstack.OpenStack() + self.assertRaises(mem.MEACreateWaitFailed, + openstack_driver.create_wait, + None, None, {}, 'mea_id', None) + + @mock.patch("apmec.mem.infra_drivers.openstack.heat_client.HeatClient") + def test_delete_wait_with_heat_connection_exception(self, mocked_hc): + stack = {"stack_status", "DELETE_IN_PROGRESS"} + mocked_hc.get.side_effect = [stack, Exception("any stuff")] + openstack_driver = openstack.OpenStack() + self.assertRaises(mem.MEADeleteWaitFailed, + openstack_driver.delete_wait, + None, None, 'mea_id', None, None) diff --git a/apmec/tests/unit/mem/monitor_drivers/__init__.py b/apmec/tests/unit/mem/monitor_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/monitor_drivers/http_ping/__init__.py b/apmec/tests/unit/mem/monitor_drivers/http_ping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/monitor_drivers/http_ping/test_http_ping.py b/apmec/tests/unit/mem/monitor_drivers/http_ping/test_http_ping.py new file mode 100644 index 0000000..8fc8591 --- /dev/null +++ b/apmec/tests/unit/mem/monitor_drivers/http_ping/test_http_ping.py @@ -0,0 +1,56 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +import six.moves.urllib.error as urlerr +import testtools + +from apmec.mem.monitor_drivers.http_ping import http_ping + + +class TestMEAMonitorHTTPPing(testtools.TestCase): + + def setUp(self): + super(TestMEAMonitorHTTPPing, self).setUp() + self.monitor_http_ping = http_ping.MEAMonitorHTTPPing() + + @mock.patch('six.moves.urllib.request.urlopen') + def test_monitor_call_for_success(self, mock_urlopen): + test_device = {} + test_kwargs = { + 'mgmt_ip': 'a.b.c.d' + } + self.monitor_http_ping.monitor_call(test_device, + test_kwargs) + mock_urlopen.assert_called_once_with('http://a.b.c.d:80', timeout=5) + + @mock.patch('six.moves.urllib.request.urlopen') + def test_monitor_call_for_failure(self, mock_urlopen): + mock_urlopen.side_effect = urlerr.URLError("MOCK Error") + test_device = {} + test_kwargs = { + 'mgmt_ip': 'a.b.c.d' + } + monitor_return = self.monitor_http_ping.monitor_call(test_device, + test_kwargs) + self.assertEqual('failure', monitor_return) + + def test_monitor_url(self): + test_device = { + 'monitor_url': 'a.b.c.d' + } + test_monitor_url = self.monitor_http_ping.monitor_url(mock.ANY, + mock.ANY, + test_device) + self.assertEqual('a.b.c.d', test_monitor_url) diff --git a/apmec/tests/unit/mem/monitor_drivers/ping/__init__.py b/apmec/tests/unit/mem/monitor_drivers/ping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/monitor_drivers/ping/test_ping.py b/apmec/tests/unit/mem/monitor_drivers/ping/test_ping.py new file mode 100644 index 0000000..1c6765e --- /dev/null +++ b/apmec/tests/unit/mem/monitor_drivers/ping/test_ping.py @@ -0,0 +1,61 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +import testtools + +from apmec.mem.monitor_drivers.ping import ping + + +class TestMEAMonitorPing(testtools.TestCase): + + def setUp(self): + super(TestMEAMonitorPing, self).setUp() + self.monitor_ping = ping.MEAMonitorPing() + + @mock.patch('apmec.agent.linux.utils.execute') + def test_monitor_call_for_success(self, mock_utils_execute): + test_device = {} + test_kwargs = { + 'mgmt_ip': 'a.b.c.d' + } + mock_ping_cmd = ['ping', + '-c', 5, + '-W', 1, + '-i', '0.2', + 'a.b.c.d'] + self.monitor_ping.monitor_call(test_device, + test_kwargs) + mock_utils_execute.assert_called_once_with(mock_ping_cmd, + check_exit_code=True) + + @mock.patch('apmec.agent.linux.utils.execute') + def test_monitor_call_for_failure(self, mock_utils_execute): + mock_utils_execute.side_effect = RuntimeError() + test_device = {} + test_kwargs = { + 'mgmt_ip': 'a.b.c.d' + } + monitor_return = self.monitor_ping.monitor_call(test_device, + test_kwargs) + self.assertEqual('failure', monitor_return) + + def test_monitor_url(self): + test_device = { + 'monitor_url': 'a.b.c.d' + } + test_monitor_url = self.monitor_ping.monitor_url(mock.ANY, + mock.ANY, + test_device) + self.assertEqual('a.b.c.d', test_monitor_url) diff --git a/apmec/tests/unit/mem/test_monitor.py b/apmec/tests/unit/mem/test_monitor.py new file mode 100644 index 0000000..c06074d --- /dev/null +++ b/apmec/tests/unit/mem/test_monitor.py @@ -0,0 +1,130 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import json + +import mock +from oslo_utils import timeutils +import testtools + +from apmec.db.common_services import common_services_db_plugin +from apmec.plugins.common import constants +from apmec.mem import monitor + +MOCK_DEVICE_ID = 'a737497c-761c-11e5-89c3-9cb6541d805d' +MOCK_MEA_DEVICE = { + 'id': MOCK_DEVICE_ID, + 'management_ip_addresses': { + 'vdu1': 'a.b.c.d' + }, + 'monitoring_policy': { + 'vdus': { + 'vdu1': { + 'ping': { + 'actions': { + 'failure': 'respawn' + }, + 'monitoring_params': { + 'count': 1, + 'monitoring_delay': 0, + 'interval': 0, + 'timeout': 2 + } + } + } + } + }, + 'boot_at': timeutils.utcnow(), + 'action_cb': mock.MagicMock() +} + + +class TestMEAMonitor(testtools.TestCase): + + def setUp(self): + super(TestMEAMonitor, self).setUp() + p = mock.patch('apmec.common.driver_manager.DriverManager') + self.mock_monitor_manager = p.start() + mock.patch('apmec.db.common_services.common_services_db_plugin.' + 'CommonServicesPluginDb.create_event' + ).start() + self._cos_db_plugin =\ + common_services_db_plugin.CommonServicesPluginDb() + self.addCleanup(p.stop) + + def test_to_hosting_mea(self): + test_device_dict = { + 'id': MOCK_DEVICE_ID, + 'mgmt_url': '{"vdu1": "a.b.c.d"}', + 'attributes': { + 'monitoring_policy': json.dumps( + MOCK_MEA_DEVICE['monitoring_policy']) + } + } + action_cb = mock.MagicMock() + expected_output = { + 'id': MOCK_DEVICE_ID, + 'action_cb': action_cb, + 'management_ip_addresses': { + 'vdu1': 'a.b.c.d' + }, + 'mea': test_device_dict, + 'monitoring_policy': MOCK_MEA_DEVICE['monitoring_policy'] + } + output_dict = monitor.MEAMonitor.to_hosting_mea(test_device_dict, + action_cb) + self.assertEqual(expected_output, output_dict) + + @mock.patch('apmec.mem.monitor.MEAMonitor.__run__') + def test_add_hosting_mea(self, mock_monitor_run): + test_device_dict = { + 'id': MOCK_DEVICE_ID, + 'mgmt_url': '{"vdu1": "a.b.c.d"}', + 'attributes': { + 'monitoring_policy': json.dumps( + MOCK_MEA_DEVICE['monitoring_policy']) + }, + 'status': 'ACTIVE' + } + action_cb = mock.MagicMock() + test_boot_wait = 30 + test_memonitor = monitor.MEAMonitor(test_boot_wait) + new_dict = test_memonitor.to_hosting_mea(test_device_dict, action_cb) + test_memonitor.add_hosting_mea(new_dict) + test_device_id = list(test_memonitor._hosting_meas.keys())[0] + self.assertEqual(MOCK_DEVICE_ID, test_device_id) + self._cos_db_plugin.create_event.assert_called_with( + mock.ANY, res_id=mock.ANY, res_type=constants.RES_TYPE_MEA, + res_state=mock.ANY, evt_type=constants.RES_EVT_MONITOR, + tstamp=mock.ANY, details=mock.ANY) + + @mock.patch('apmec.mem.monitor.MEAMonitor.__run__') + def test_run_monitor(self, mock_monitor_run): + test_hosting_mea = MOCK_MEA_DEVICE + test_hosting_mea['mea'] = {} + test_boot_wait = 30 + mock_kwargs = { + 'count': 1, + 'monitoring_delay': 0, + 'interval': 0, + 'mgmt_ip': 'a.b.c.d', + 'timeout': 2 + } + test_memonitor = monitor.MEAMonitor(test_boot_wait) + self.mock_monitor_manager.invoke = mock.MagicMock() + test_memonitor._monitor_manager = self.mock_monitor_manager + test_memonitor.run_monitor(test_hosting_mea) + self.mock_monitor_manager\ + .invoke.assert_called_once_with('ping', 'monitor_call', mea={}, + kwargs=mock_kwargs) diff --git a/apmec/tests/unit/mem/test_plugin.py b/apmec/tests/unit/mem/test_plugin.py new file mode 100644 index 0000000..3a9ab2f --- /dev/null +++ b/apmec/tests/unit/mem/test_plugin.py @@ -0,0 +1,474 @@ +# Copyright 2015 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +import mock +from mock import patch +from oslo_utils import uuidutils +import yaml + +from apmec import context +from apmec.db.common_services import common_services_db_plugin +from apmec.db.meo import meo_db +from apmec.db.mem import mem_db +from apmec.extensions import mem +from apmec.plugins.common import constants +from apmec.tests.unit.db import base as db_base +from apmec.tests.unit.db import utils +from apmec.mem import plugin + + +class FakeDriverManager(mock.Mock): + def invoke(self, *args, **kwargs): + if 'create' in args: + return uuidutils.generate_uuid() + + if 'get_resource_info' in args: + return {'resources': {'name': 'dummy_mea', + 'type': 'dummy', + 'id': uuidutils.generate_uuid()}} + + +class FakeMEAMonitor(mock.Mock): + pass + + +class FakeGreenPool(mock.Mock): + pass + + +class FakeVimClient(mock.Mock): + pass + + +class TestMEMPlugin(db_base.SqlTestCase): + def setUp(self): + super(TestMEMPlugin, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self._mock_vim_client() + self._stub_get_vim() + self._mock_device_manager() + self._mock_mea_monitor() + self._mock_mea_alarm_monitor() + self._mock_green_pool() + self._insert_dummy_vim() + self.mem_plugin = plugin.MEMPlugin() + mock.patch('apmec.db.common_services.common_services_db_plugin.' + 'CommonServicesPluginDb.create_event' + ).start() + self._cos_db_plugin =\ + common_services_db_plugin.CommonServicesPluginDb() + + def _mock_device_manager(self): + self._device_manager = mock.Mock(wraps=FakeDriverManager()) + self._device_manager.__contains__ = mock.Mock( + return_value=True) + fake_device_manager = mock.Mock() + fake_device_manager.return_value = self._device_manager + self._mock( + 'apmec.common.driver_manager.DriverManager', fake_device_manager) + + def _mock_vim_client(self): + self.vim_client = mock.Mock(wraps=FakeVimClient()) + fake_vim_client = mock.Mock() + fake_vim_client.return_value = self.vim_client + self._mock( + 'apmec.mem.vim_client.VimClient', fake_vim_client) + + def _stub_get_vim(self): + vim_obj = {'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'vim_name': 'fake_vim', 'vim_auth': + {'auth_url': 'http://localhost:5000', 'password': + 'test_pw', 'username': 'test_user', 'project_name': + 'test_project'}, 'vim_type': 'test_vim'} + self.vim_client.get_vim.return_value = vim_obj + + def _mock_green_pool(self): + self._pool = mock.Mock(wraps=FakeGreenPool()) + fake_green_pool = mock.Mock() + fake_green_pool.return_value = self._pool + self._mock( + 'eventlet.GreenPool', fake_green_pool) + + def _mock_mea_monitor(self): + self._mea_monitor = mock.Mock(wraps=FakeMEAMonitor()) + fake_mea_monitor = mock.Mock() + fake_mea_monitor.return_value = self._mea_monitor + self._mock( + 'apmec.mem.monitor.MEAMonitor', fake_mea_monitor) + + def _mock_mea_alarm_monitor(self): + self._mea_alarm_monitor = mock.Mock(wraps=FakeMEAMonitor()) + fake_mea_alarm_monitor = mock.Mock() + fake_mea_alarm_monitor.return_value = self._mea_alarm_monitor + self._mock( + 'apmec.mem.monitor.MEAAlarmMonitor', fake_mea_alarm_monitor) + + def _insert_dummy_device_template(self): + session = self.context.session + device_template = mem_db.MEAD( + id='eb094833-995e-49f0-a047-dfb56aaf7c4e', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='fake_template', + description='fake_template_description', + template_source='onboarded', + deleted_at=datetime.min) + session.add(device_template) + session.flush() + return device_template + + def _insert_dummy_device_template_inline(self): + session = self.context.session + device_template = mem_db.MEAD( + id='d58bcc4e-d0cf-11e6-bf26-cec0c932ce01', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='tmpl-koeak4tqgoqo8cr4-dummy_inline_mea', + description='inline_fake_template_description', + deleted_at=datetime.min, + template_source='inline') + session.add(device_template) + session.flush() + return device_template + + def _insert_dummy_mead_attributes(self, template): + session = self.context.session + mead_attr = mem_db.MEADAttribute( + id='eb094833-995e-49f0-a047-dfb56aaf7c4e', + mead_id='eb094833-995e-49f0-a047-dfb56aaf7c4e', + key='mead', + value=template) + session.add(mead_attr) + session.flush() + return mead_attr + + def _insert_dummy_device(self): + session = self.context.session + device_db = mem_db.MEA( + id='6261579e-d6f3-49ad-8bc3-a9cb974778fe', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='fake_device', + description='fake_device_description', + instance_id='da85ea1a-4ec4-4201-bbb2-8d9249eca7ec', + mead_id='eb094833-995e-49f0-a047-dfb56aaf7c4e', + vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + placement_attr={'region': 'RegionOne'}, + status='ACTIVE', + deleted_at=datetime.min) + session.add(device_db) + session.flush() + return device_db + + def _insert_scaling_attributes_mea(self): + session = self.context.session + mea_attributes = mem_db.MEAAttribute( + id='7800cb81-7ed1-4cf6-8387-746468522651', + mea_id='6261579e-d6f3-49ad-8bc3-a9cb974778fe', + key='scaling_group_names', + value='{"SP1": "G1"}' + ) + session.add(mea_attributes) + session.flush() + return mea_attributes + + def _insert_scaling_attributes_mead(self): + session = self.context.session + mead_attributes = mem_db.MEADAttribute( + id='7800cb81-7ed1-4cf6-8387-746468522650', + mead_id='eb094833-995e-49f0-a047-dfb56aaf7c4e', + key='mead', + value=utils.mead_scale_tosca_template + ) + session.add(mead_attributes) + session.flush() + return mead_attributes + + def _insert_dummy_vim(self): + session = self.context.session + vim_db = meo_db.Vim( + id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='fake_vim', + description='fake_vim_description', + type='test_vim', + status='Active', + deleted_at=datetime.min, + placement_attr={'regions': ['RegionOne']}) + vim_auth_db = meo_db.VimAuth( + vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + password='encrypted_pw', + auth_url='http://localhost:5000', + vim_project={'name': 'test_project'}, + auth_cred={'username': 'test_user', 'user_domain_id': 'default', + 'project_domain_id': 'default'}) + session.add(vim_db) + session.add(vim_auth_db) + session.flush() + + @mock.patch('apmec.mem.plugin.toscautils.updateimports') + @mock.patch('apmec.mem.plugin.ToscaTemplate') + @mock.patch('apmec.mem.plugin.toscautils.get_mgmt_driver') + def test_create_mead(self, mock_get_mgmt_driver, mock_tosca_template, + mock_update_imports): + mock_get_mgmt_driver.return_value = 'dummy_mgmt_driver' + mock_tosca_template.return_value = mock.ANY + + mead_obj = utils.get_dummy_mead_obj() + result = self.mem_plugin.create_mead(self.context, mead_obj) + self.assertIsNotNone(result) + self.assertIn('id', result) + self.assertEqual('dummy_mead', result['name']) + self.assertEqual('dummy_mead_description', result['description']) + self.assertEqual('dummy_mgmt_driver', result['mgmt_driver']) + self.assertIn('service_types', result) + self.assertIn('attributes', result) + self.assertIn('created_at', result) + self.assertIn('updated_at', result) + self.assertIn('template_source', result) + yaml_dict = yaml.safe_load(utils.tosca_mead_openwrt) + mock_tosca_template.assert_called_once_with( + a_file=False, yaml_dict_tpl=yaml_dict) + mock_get_mgmt_driver.assert_called_once_with(mock.ANY) + mock_update_imports.assert_called_once_with(yaml_dict) + self._cos_db_plugin.create_event.assert_called_once_with( + self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY, + res_state=constants.RES_EVT_ONBOARDED, + res_type=constants.RES_TYPE_MEAD, tstamp=mock.ANY) + + def test_create_mead_no_service_types(self): + mead_obj = utils.get_dummy_mead_obj() + mead_obj['mead'].pop('service_types') + self.assertRaises(mem.ServiceTypesNotSpecified, + self.mem_plugin.create_mead, + self.context, mead_obj) + + def test_create_mea_with_mead(self): + self._insert_dummy_device_template() + mea_obj = utils.get_dummy_mea_obj() + result = self.mem_plugin.create_mea(self.context, mea_obj) + self.assertIsNotNone(result) + self.assertIn('id', result) + self.assertIn('instance_id', result) + self.assertIn('status', result) + self.assertIn('attributes', result) + self.assertIn('mgmt_url', result) + self.assertIn('created_at', result) + self.assertIn('updated_at', result) + self._device_manager.invoke.assert_called_with('test_vim', + 'create', + plugin=mock.ANY, + context=mock.ANY, + mea=mock.ANY, + auth_attr=mock.ANY) + self._pool.spawn_n.assert_called_once_with(mock.ANY) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_MEA, + tstamp=mock.ANY, details=mock.ANY) + + @mock.patch('apmec.mem.plugin.MEMPlugin.create_mead') + def test_create_mea_from_template(self, mock_create_mead): + self._insert_dummy_device_template_inline() + mock_create_mead.return_value = {'id': + 'd58bcc4e-d0cf-11e6-bf26-cec0c932ce01'} + mea_obj = utils.get_dummy_inline_mea_obj() + result = self.mem_plugin.create_mea(self.context, mea_obj) + self.assertIsNotNone(result) + self.assertIn('id', result) + self.assertIn('instance_id', result) + self.assertIn('status', result) + self.assertIn('attributes', result) + self.assertIn('mgmt_url', result) + self.assertIn('created_at', result) + self.assertIn('updated_at', result) + mock_create_mead.assert_called_once_with(mock.ANY, mock.ANY) + self._device_manager.invoke.assert_called_with('test_vim', + 'create', + plugin=mock.ANY, + context=mock.ANY, + mea=mock.ANY, + auth_attr=mock.ANY) + self._pool.spawn_n.assert_called_once_with(mock.ANY) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_CREATE, + res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_MEA, + tstamp=mock.ANY, details=mock.ANY) + + def test_show_mea_details_mea_inactive(self): + self._insert_dummy_device_template() + mea_obj = utils.get_dummy_mea_obj() + result = self.mem_plugin.create_mea(self.context, mea_obj) + self.assertRaises(mem.MEAInactive, self.mem_plugin.get_mea_resources, + self.context, result['id']) + + def test_show_mea_details_mea_active(self): + self._insert_dummy_device_template() + active_mea = self._insert_dummy_device() + resources = self.mem_plugin.get_mea_resources(self.context, + active_mea['id'])[0] + self.assertIn('name', resources) + self.assertIn('type', resources) + self.assertIn('id', resources) + + def test_delete_mea(self): + self._insert_dummy_device_template() + dummy_device_obj = self._insert_dummy_device() + self.mem_plugin.delete_mea(self.context, dummy_device_obj[ + 'id']) + self._device_manager.invoke.assert_called_with('test_vim', 'delete', + plugin=mock.ANY, + context=mock.ANY, + mea_id=mock.ANY, + auth_attr=mock.ANY, + region_name=mock.ANY) + self._mea_monitor.delete_hosting_mea.assert_called_with(mock.ANY) + self._pool.spawn_n.assert_called_once_with(mock.ANY, mock.ANY, + mock.ANY, mock.ANY, + mock.ANY) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_MEA, + tstamp=mock.ANY, details=mock.ANY) + + def test_update_mea(self): + self._insert_dummy_device_template() + dummy_device_obj = self._insert_dummy_device() + mea_config_obj = utils.get_dummy_mea_config_obj() + result = self.mem_plugin.update_mea(self.context, dummy_device_obj[ + 'id'], mea_config_obj) + self.assertIsNotNone(result) + self.assertEqual(dummy_device_obj['id'], result['id']) + self.assertIn('instance_id', result) + self.assertIn('status', result) + self.assertIn('attributes', result) + self.assertIn('mgmt_url', result) + self.assertIn('updated_at', result) + self._pool.spawn_n.assert_called_once_with(mock.ANY, mock.ANY, + mock.ANY, mock.ANY, + mock.ANY) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_MEA, + tstamp=mock.ANY) + + def _get_dummy_scaling_policy(self, type): + mea_scale = {} + mea_scale['scale'] = {} + mea_scale['scale']['type'] = type + mea_scale['scale']['policy'] = 'SP1' + return mea_scale + + def _test_scale_mea(self, type, scale_state): + # create mead + self._insert_dummy_device_template() + self._insert_scaling_attributes_mead() + + # create mea + dummy_device_obj = self._insert_dummy_device() + self._insert_scaling_attributes_mea() + + # scale mea + mea_scale = self._get_dummy_scaling_policy(type) + self.mem_plugin.create_mea_scale( + self.context, + dummy_device_obj['id'], + mea_scale) + + # validate + self._device_manager.invoke.assert_called_once_with( + mock.ANY, + 'scale', + plugin=mock.ANY, + context=mock.ANY, + auth_attr=mock.ANY, + policy=mock.ANY, + region_name=mock.ANY + ) + + self._pool.spawn_n.assert_called_once_with(mock.ANY) + + self._cos_db_plugin.create_event.assert_called_with( + self.context, + evt_type=constants.RES_EVT_SCALE, + res_id='6261579e-d6f3-49ad-8bc3-a9cb974778fe', + res_state=scale_state, + res_type=constants.RES_TYPE_MEA, + tstamp=mock.ANY) + + def test_scale_mea_out(self): + self._test_scale_mea('out', constants.PENDING_SCALE_OUT) + + def test_scale_mea_in(self): + self._test_scale_mea('in', constants.PENDING_SCALE_IN) + + def _get_dummy_active_mea(self, mead_template): + dummy_mea = utils.get_dummy_device_obj() + dummy_mea['mead']['attributes']['mead'] = mead_template + dummy_mea['status'] = 'ACTIVE' + dummy_mea['instance_id'] = '4c00108e-c69d-4624-842d-389c77311c1d' + dummy_mea['vim_id'] = '437ac8ef-a8fb-4b6e-8d8a-a5e86a376e8b' + return dummy_mea + + def _test_create_mea_trigger(self, policy_name, action_value): + mea_id = "6261579e-d6f3-49ad-8bc3-a9cb974778fe" + trigger_request = {"trigger": {"action_name": action_value, "params": { + "credential": "026kll6n", "data": {"current": "alarm", + 'alarm_id': + "b7fa9ffd-0a4f-4165-954b-5a8d0672a35f"}}, + "policy_name": policy_name}} + expected_result = {"action_name": action_value, "params": { + "credential": "026kll6n", "data": {"current": "alarm", + "alarm_id": "b7fa9ffd-0a4f-4165-954b-5a8d0672a35f"}}, + "policy_name": policy_name} + self._mea_alarm_monitor.process_alarm_for_mea.return_value = True + trigger_result = self.mem_plugin.create_mea_trigger( + self.context, mea_id, trigger_request) + self.assertEqual(expected_result, trigger_result) + + @patch('apmec.db.mem.mem_db.MEMPluginDb.get_mea') + def test_create_mea_trigger_respawn(self, mock_get_mea): + dummy_mea = self._get_dummy_active_mea( + utils.mead_alarm_respawn_tosca_template) + mock_get_mea.return_value = dummy_mea + self._test_create_mea_trigger(policy_name="vdu_hcpu_usage_respawning", + action_value="respawn") + + @patch('apmec.db.mem.mem_db.MEMPluginDb.get_mea') + def test_create_mea_trigger_scale(self, mock_get_mea): + dummy_mea = self._get_dummy_active_mea( + utils.mead_alarm_scale_tosca_template) + mock_get_mea.return_value = dummy_mea + self._test_create_mea_trigger(policy_name="vdu_hcpu_usage_scaling_out", + action_value="SP1-out") + + @patch('apmec.db.mem.mem_db.MEMPluginDb.get_mea') + def test_create_mea_trigger_multi_actions(self, mock_get_mea): + dummy_mea = self._get_dummy_active_mea( + utils.mead_alarm_multi_actions_tosca_template) + mock_get_mea.return_value = dummy_mea + self._test_create_mea_trigger(policy_name="mon_policy_multi_actions", + action_value="respawn&log") + + @patch('apmec.db.mem.mem_db.MEMPluginDb.get_mea') + def test_get_mea_policies(self, mock_get_mea): + mea_id = "6261579e-d6f3-49ad-8bc3-a9cb974778fe" + dummy_mea = self._get_dummy_active_mea( + utils.mead_alarm_respawn_tosca_template) + mock_get_mea.return_value = dummy_mea + policies = self.mem_plugin.get_mea_policies(self.context, mea_id, + filters={'name': 'vdu1_cpu_usage_monitoring_policy'}) + self.assertEqual(1, len(policies)) diff --git a/apmec/tests/unit/mem/test_vim_client.py b/apmec/tests/unit/mem/test_vim_client.py new file mode 100644 index 0000000..8130dc5 --- /dev/null +++ b/apmec/tests/unit/mem/test_vim_client.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock + +from sqlalchemy.orm import exc as orm_exc + +from apmec.extensions import meo +from apmec import manager +from apmec.tests.unit import base +from apmec.mem import vim_client + + +class TestVIMClient(base.TestCase): + + def setUp(self): + super(TestVIMClient, self).setUp() + self.vim_info = {'id': 'aaaa', 'name': 'VIM0', + 'auth_cred': {'password': '****'}, 'type': 'test_vim'} + + def test_get_vim_without_defined_default_vim(self): + vimclient = vim_client.VimClient() + service_plugins = mock.Mock() + meo_plugin = mock.Mock() + meo_plugin.get_default_vim.side_effect = \ + orm_exc.NoResultFound() + service_plugins.get.return_value = meo_plugin + with mock.patch.object(manager.ApmecManager, 'get_service_plugins', + return_value=service_plugins): + self.assertRaises(meo.VimDefaultNotDefined, + vimclient.get_vim, None) diff --git a/apmec/tests/unit/mem/tosca/__init__.py b/apmec/tests/unit/mem/tosca/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/mem/tosca/test_utils.py b/apmec/tests/unit/mem/tosca/test_utils.py new file mode 100644 index 0000000..e4f620b --- /dev/null +++ b/apmec/tests/unit/mem/tosca/test_utils.py @@ -0,0 +1,268 @@ +# Copyright 2016 - Nokia +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import codecs +import os + +import testtools +import yaml + +from apmec.catalogs.tosca import utils as toscautils +from toscaparser import tosca_template +from translator.hot import tosca_translator + + +def _get_template(name): + filename = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../infra_drivers/openstack/data/", name) + f = codecs.open(filename, encoding='utf-8', errors='strict') + return f.read() + + +class TestToscaUtils(testtools.TestCase): + tosca_openwrt = _get_template('test_tosca_openwrt.yaml') + mead_dict = yaml.safe_load(tosca_openwrt) + toscautils.updateimports(mead_dict) + tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, + yaml_dict_tpl=mead_dict) + tosca_flavor = _get_template('test_tosca_flavor.yaml') + + def setUp(self): + super(TestToscaUtils, self).setUp() + + def test_updateimport(self): + importspath = os.path.abspath('./apmec/tosca/lib/') + file1 = importspath + '/apmec_defs.yaml' + file2 = importspath + '/apmec_mec_defs.yaml' + expected_imports = [file1, file2] + self.assertEqual(expected_imports, self.mead_dict['imports']) + + def test_get_mgmt_driver(self): + expected_mgmt_driver = 'openwrt' + mgmt_driver = toscautils.get_mgmt_driver(self.tosca) + self.assertEqual(expected_mgmt_driver, mgmt_driver) + + def test_get_vdu_monitoring(self): + expected_monitoring = {'vdus': {'VDU1': {'ping': { + 'actions': + {'failure': 'respawn'}, + 'name': 'ping', + 'parameters': {'count': 3, + 'interval': 10}, + 'monitoring_params': {'count': 3, + 'interval': 10}}}}} + monitoring = toscautils.get_vdu_monitoring(self.tosca) + self.assertEqual(expected_monitoring, monitoring) + + def test_get_mgmt_ports(self): + expected_mgmt_ports = {'mgmt_ip-VDU1': 'CP1'} + mgmt_ports = toscautils.get_mgmt_ports(self.tosca) + self.assertEqual(expected_mgmt_ports, mgmt_ports) + + def test_post_process_template(self): + tosca2 = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, + yaml_dict_tpl=self.mead_dict) + toscautils.post_process_template(tosca2) + invalidNodes = 0 + for nt in tosca2.nodetemplates: + if (nt.type_definition.is_derived_from(toscautils.MONITORING) or + nt.type_definition.is_derived_from(toscautils.FAILURE) or + nt.type_definition.is_derived_from(toscautils.PLACEMENT)): + invalidNodes += 1 + + self.assertEqual(0, invalidNodes) + + deletedProperties = 0 + if nt.type in toscautils.delpropmap.keys(): + for prop in toscautils.delpropmap[nt.type]: + for p in nt.get_properties_objects(): + if prop == p.name: + deletedProperties += 1 + + self.assertEqual(0, deletedProperties) + + convertedProperties = 0 + if nt.type in toscautils.convert_prop: + for prop in toscautils.convert_prop[nt.type].keys(): + for p in nt.get_properties_objects(): + if prop == p.name: + convertedProperties += 1 + + self.assertEqual(0, convertedProperties) + + def test_post_process_heat_template(self): + tosca1 = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, + yaml_dict_tpl=self.mead_dict) + toscautils.post_process_template(tosca1) + translator = tosca_translator.TOSCATranslator(tosca1, {}) + heat_template_yaml = translator.translate() + expected_heat_tpl = _get_template('hot_tosca_openwrt.yaml') + mgmt_ports = toscautils.get_mgmt_ports(self.tosca) + heat_tpl = toscautils.post_process_heat_template( + heat_template_yaml, mgmt_ports, {}, {}, {}) + + heatdict = yaml.safe_load(heat_tpl) + expecteddict = yaml.safe_load(expected_heat_tpl) + self.assertEqual(expecteddict, heatdict) + + def test_findvdus(self): + vdus = toscautils.findvdus(self.tosca) + + self.assertEqual(1, len(vdus)) + + for vdu in vdus: + self.assertEqual(True, vdu.type_definition.is_derived_from( + toscautils.APMECVDU)) + + def test_get_flavor_dict(self): + mead_dict = yaml.safe_load(self.tosca_flavor) + toscautils.updateimports(mead_dict) + tosca = tosca_template.ToscaTemplate(a_file=False, + yaml_dict_tpl=mead_dict) + expected_flavor_dict = { + "VDU1": { + "vcpus": 2, + "disk": 10, + "ram": 512 + } + } + actual_flavor_dict = toscautils.get_flavor_dict(tosca) + self.assertEqual(expected_flavor_dict, actual_flavor_dict) + + def test_add_resources_tpl_for_flavor(self): + dummy_heat_dict = yaml.safe_load(_get_template( + 'hot_flavor_and_capabilities.yaml')) + expected_dict = yaml.safe_load(_get_template('hot_flavor.yaml')) + dummy_heat_res = { + "flavor": { + "VDU1": { + "vcpus": 2, + "ram": 512, + "disk": 10 + } + } + } + toscautils.add_resources_tpl(dummy_heat_dict, dummy_heat_res) + self.assertEqual(expected_dict, dummy_heat_dict) + + def test_get_flavor_dict_extra_specs_all_numa_count(self): + tosca_fes_all_numa_count = _get_template( + 'tosca_flavor_all_numa_count.yaml') + mead_dict = yaml.safe_load(tosca_fes_all_numa_count) + toscautils.updateimports(mead_dict) + tosca = tosca_template.ToscaTemplate(a_file=False, + yaml_dict_tpl=mead_dict) + expected_flavor_dict = { + "VDU1": { + "vcpus": 8, + "disk": 10, + "ram": 4096, + "extra_specs": { + 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', + 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, + 'hw:numa_nodes': 2, 'hw:cpu_cores': 2, + 'hw:cpu_threads_policy': 'avoid' + } + } + } + actual_flavor_dict = toscautils.get_flavor_dict(tosca) + self.assertEqual(expected_flavor_dict, actual_flavor_dict) + + def test_apmec_conf_heat_extra_specs_all_numa_count(self): + tosca_fes_all_numa_count = _get_template( + 'tosca_flavor_all_numa_count.yaml') + mead_dict = yaml.safe_load(tosca_fes_all_numa_count) + toscautils.updateimports(mead_dict) + tosca = tosca_template.ToscaTemplate(a_file=False, + yaml_dict_tpl=mead_dict) + expected_flavor_dict = { + "VDU1": { + "vcpus": 8, + "disk": 10, + "ram": 4096, + "extra_specs": { + 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', + 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, + 'hw:numa_nodes': 2, 'hw:cpu_cores': 2, + 'hw:cpu_threads_policy': 'avoid', + 'aggregate_instance_extra_specs:mec': 'true' + } + } + } + actual_flavor_dict = toscautils.get_flavor_dict( + tosca, {"aggregate_instance_extra_specs:mec": "true"}) + self.assertEqual(expected_flavor_dict, actual_flavor_dict) + + def test_add_resources_tpl_for_image(self): + dummy_heat_dict = yaml.safe_load(_get_template( + 'hot_image_before_processed_image.yaml')) + expected_dict = yaml.safe_load(_get_template( + 'hot_image_after_processed_image.yaml')) + dummy_heat_res = { + "image": { + "VDU1": { + "location": "http://URL/v1/openwrt.qcow2", + "container_format": "bare", + "disk_format": "raw" + } + } + } + toscautils.add_resources_tpl(dummy_heat_dict, dummy_heat_res) + self.assertEqual(expected_dict, dummy_heat_dict) + + def test_convert_unsupported_res_prop_kilo_ver(self): + unsupported_res_prop_dict = {'OS::Neutron::Port': { + 'port_security_enabled': 'value_specs', }, } + dummy_heat_dict = yaml.safe_load(_get_template( + 'hot_tosca_openwrt.yaml')) + expected_heat_dict = yaml.safe_load(_get_template( + 'hot_tosca_openwrt_kilo.yaml')) + toscautils.convert_unsupported_res_prop(dummy_heat_dict, + unsupported_res_prop_dict) + self.assertEqual(expected_heat_dict, dummy_heat_dict) + + def test_check_for_substitution_mappings(self): + tosca_sb_map = _get_template('../../../../../etc/samples/test-mesd-' + 'mead1.yaml') + param = {'substitution_mappings': { + 'VL2': {'type': 'tosca.nodes.mec.VL', 'properties': { + 'network_name': 'net0', 'vendor': 'apmec'}}, + 'VL1': {'type': 'tosca.nodes.mec.VL', 'properties': { + 'network_name': 'net_mgmt', 'vendor': 'apmec'}}, + 'requirements': {'virtualLink2': 'VL2', + 'virtualLink1': 'VL1'}}} + template = yaml.safe_load(tosca_sb_map) + toscautils.updateimports(template) + toscautils.check_for_substitution_mappings(template, param) + self.assertNotIn('substitution_mappings', param) + + def test_get_block_storage_details(self): + tosca_vol = _get_template('tosca_block_storage.yaml') + mead_dict = yaml.safe_load(tosca_vol) + expected_dict = { + 'volumes': { + 'VB1': { + 'image': 'cirros-0.3.5-x86_64-disk', + 'size': '1' + } + }, + 'volume_attachments': { + 'CB1': { + 'instance_uuid': {'get_resource': 'VDU1'}, + 'mountpoint': '/dev/vdb', + 'volume_id': {'get_resource': 'VB1'}} + } + } + volume_details = toscautils.get_block_storage_details(mead_dict) + self.assertEqual(expected_dict, volume_details) diff --git a/apmec/tests/unit/meo/__init__.py b/apmec/tests/unit/meo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/__init__.py b/apmec/tests/unit/meo/drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/vim/__init__.py b/apmec/tests/unit/meo/drivers/vim/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/vim/test_openstack_driver.py b/apmec/tests/unit/meo/drivers/vim/test_openstack_driver.py new file mode 100644 index 0000000..f864b19 --- /dev/null +++ b/apmec/tests/unit/meo/drivers/vim/test_openstack_driver.py @@ -0,0 +1,257 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import exceptions +import mock +from oslo_config import cfg + +from apmec.extensions import meo +from apmec.meo.drivers.vim import openstack_driver +from apmec.tests.unit import base +from apmec.tests.unit.db import utils + +OPTS = [cfg.StrOpt('user_domain_id', + default='default', + help='User Domain Id'), + cfg.StrOpt('project_domain_id', + default='default', + help='Project Domain Id'), + cfg.StrOpt('auth_url', + default='http://localhost:5000/v3', + help='Keystone endpoint')] + +cfg.CONF.register_opts(OPTS, 'keystone_authtoken') +CONF = cfg.CONF + + +class FakeKeystone(mock.Mock): + pass + + +class FakeNeutronClient(mock.Mock): + pass + + +class FakeKeymgrAPI(mock.Mock): + pass + + +class mock_dict(dict): + def __getattr__(self, item): + return self.get(item) + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + +class TestOpenstack_Driver(base.TestCase): + def setUp(self): + super(TestOpenstack_Driver, self).setUp() + self._mock_keystone() + self.keystone.create_key_dir.return_value = 'test_keys' + self.config_fixture.config(group='vim_keys', openstack='/tmp/') + self.config_fixture.config(group='vim_keys', use_barbican=False) + self.openstack_driver = openstack_driver.OpenStack_Driver() + self.vim_obj = self.get_vim_obj() + self.auth_obj = utils.get_vim_auth_obj() + self.addCleanup(mock.patch.stopall) + self._mock_keymgr() + + def _mock_keystone(self): + self.keystone = mock.Mock(wraps=FakeKeystone()) + fake_keystone = mock.Mock() + fake_keystone.return_value = self.keystone + self._mock( + 'apmec.mem.keystone.Keystone', fake_keystone) + + def _mock_keymgr(self): + self.keymgr = mock.Mock(wraps=FakeKeymgrAPI()) + fake_keymgr = mock.Mock() + fake_keymgr.return_value = self.keymgr + self._mock( + 'apmec.keymgr.barbican_key_manager.BarbicanKeyManager', + fake_keymgr) + + def get_vim_obj(self): + return {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', 'type': + 'openstack', 'auth_url': 'http://localhost:5000', + 'auth_cred': {'username': 'test_user', + 'password': 'test_password', + 'user_domain_name': 'default', + 'auth_url': 'http://localhost:5000'}, + 'name': 'VIM0', + 'vim_project': {'name': 'test_project', + 'project_domain_name': 'default'}} + + def get_vim_obj_barbican(self): + return {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', 'type': + 'openstack', 'auth_url': 'http://localhost:5000', + 'auth_cred': {'username': 'test_user', + 'password': 'test_password', + 'user_domain_name': 'default', + 'key_type': 'barbican_key', + 'secret_uuid': 'fake-secret-uuid', + 'auth_url': 'http://localhost:5000'}, + 'name': 'VIM0', + 'vim_project': {'name': 'test_project', + 'project_domain_name': 'default'}} + + def test_register_keystone_v3(self): + regions = [mock_dict({'id': 'RegionOne'})] + attrs = {'regions.list.return_value': regions} + keystone_version = 'v3' + mock_ks_client = mock.Mock(version=keystone_version, **attrs) + self.keystone.get_version.return_value = keystone_version + self._test_register_vim(self.vim_obj, mock_ks_client) + mock_ks_client.regions.list.assert_called_once_with() + self.keystone.initialize_client.assert_called_once_with( + version=keystone_version, **self.auth_obj) + + def test_register_keystone_v2(self): + services_list = [mock_dict({'type': 'orchestration', 'id': + 'test_id'})] + endpoints_regions = mock_dict({'region': 'RegionOne'}) + endpoints_list = [mock_dict({'service_id': 'test_id', 'regions': + endpoints_regions})] + attrs = {'endpoints.list.return_value': endpoints_list, + 'services.list.return_value': services_list} + keystone_version = 'v2.0' + mock_ks_client = mock.Mock(version='v2.0', **attrs) + self.keystone.get_version.return_value = keystone_version + auth_obj = {'tenant_name': 'test_project', 'username': 'test_user', + 'password': 'test_password', 'auth_url': + 'http://localhost:5000/v2.0', 'tenant_id': None} + self._test_register_vim(self.vim_obj, mock_ks_client) + self.keystone.initialize_client.assert_called_once_with( + version=keystone_version, **auth_obj) + + def _test_register_vim(self, vim_obj, mock_ks_client): + self.keystone.initialize_client.return_value = mock_ks_client + fernet_attrs = {'encrypt.return_value': 'encrypted_password'} + mock_fernet_obj = mock.Mock(**fernet_attrs) + mock_fernet_key = 'test_fernet_key' + self.keystone.create_fernet_key.return_value = (mock_fernet_key, + mock_fernet_obj) + file_mock = mock.mock_open() + with mock.patch('six.moves.builtins.open', file_mock, create=True): + self.openstack_driver.register_vim(None, vim_obj) + mock_fernet_obj.encrypt.assert_called_once_with(mock.ANY) + file_mock().write.assert_called_once_with('test_fernet_key') + + @mock.patch('apmec.meo.drivers.vim.openstack_driver.os.remove') + @mock.patch('apmec.meo.drivers.vim.openstack_driver.os.path' + '.join') + def test_deregister_vim(self, mock_os_path, mock_os_remove): + vim_obj = self.get_vim_obj() + vim_id = 'my_id' + vim_obj['id'] = vim_id + file_path = CONF.vim_keys.openstack + '/' + vim_id + mock_os_path.return_value = file_path + self.openstack_driver.deregister_vim(None, vim_obj) + mock_os_remove.assert_called_once_with(file_path) + + def test_deregister_vim_barbican(self): + self.keymgr.delete.return_value = None + vim_obj = self.get_vim_obj_barbican() + self.openstack_driver.deregister_vim(None, vim_obj) + self.keymgr.delete.assert_called_once_with( + None, 'fake-secret-uuid') + + def test_encode_vim_auth_barbican(self): + self.config_fixture.config(group='vim_keys', + use_barbican=True) + fernet_attrs = {'encrypt.return_value': 'encrypted_password'} + mock_fernet_obj = mock.Mock(**fernet_attrs) + mock_fernet_key = 'test_fernet_key' + self.keymgr.store.return_value = 'fake-secret-uuid' + self.keystone.create_fernet_key.return_value = (mock_fernet_key, + mock_fernet_obj) + + vim_obj = self.get_vim_obj() + self.openstack_driver.encode_vim_auth( + None, vim_obj['id'], vim_obj['auth_cred']) + + self.keymgr.store.assert_called_once_with( + None, 'test_fernet_key') + mock_fernet_obj.encrypt.assert_called_once_with(mock.ANY) + self.assertEqual(vim_obj['auth_cred']['key_type'], + 'barbican_key') + self.assertEqual(vim_obj['auth_cred']['secret_uuid'], + 'fake-secret-uuid') + + def test_register_vim_invalid_auth(self): + attrs = {'regions.list.side_effect': exceptions.Unauthorized} + self._test_register_vim_auth(attrs) + + def test_register_vim_missing_auth(self): + attrs = {'regions.list.side_effect': exceptions.BadRequest} + self._test_register_vim_auth(attrs) + + def _test_register_vim_auth(self, attrs): + keystone_version = 'v3' + mock_ks_client = mock.Mock(version=keystone_version, **attrs) + self.keystone.get_version.return_value = keystone_version + self.keystone.initialize_client.return_value = mock_ks_client + self.assertRaises(meo.VimUnauthorizedException, + self.openstack_driver.register_vim, + None, + self.vim_obj) + mock_ks_client.regions.list.assert_called_once_with() + self.keystone.initialize_client.assert_called_once_with( + version=keystone_version, **self.auth_obj) + + def test_get_vim_resource_id(self): + resource_type = 'network' + resource_name = 'net0' + fake_networks = {'networks': [{'id': 'fake-uuid', 'name': 'net0'}]} + fake_neutron_client = FakeNeutronClient() + fake_neutron_client.list_networks.return_value = fake_networks + self.openstack_driver._get_client = mock.Mock( + return_value=fake_neutron_client) + + self.openstack_driver.get_vim_resource_id( + self.vim_obj, resource_type, resource_name) + + self.openstack_driver._get_client.assert_called_once_with( + self.vim_obj, mock.ANY) + fake_neutron_client.list_networks.assert_called_once_with( + **{'name': 'net0'}) + + def test_get_vim_resource_id_name_not_unique(self): + resource_type = 'network' + resource_name = 'net0' + fake_networks = {'networks': [{'id': 'fake-uuid-1', 'name': 'net0'}, + {'id': 'fake-uuid-2', 'name': 'net0'}]} + fake_neutron_client = FakeNeutronClient() + fake_neutron_client.list_networks.return_value = fake_networks + self.openstack_driver._get_client = mock.Mock( + return_value=fake_neutron_client) + + self.assertRaises(meo.VimGetResourceNameNotUnique, + self.openstack_driver.get_vim_resource_id, + self.vim_obj, resource_type, resource_name) + + def test_get_vim_resource_id_name_not_exist(self): + resource_type = 'network' + resource_name = 'net0' + fake_networks = {'networks': []} + fake_neutron_client = FakeNeutronClient() + fake_neutron_client.list_networks.return_value = fake_networks + self.openstack_driver._get_client = mock.Mock( + return_value=fake_neutron_client) + + self.assertRaises(meo.VimGetResourceNotFoundException, + self.openstack_driver.get_vim_resource_id, + self.vim_obj, resource_type, resource_name) diff --git a/apmec/tests/unit/meo/drivers/vnffg/__init__.py b/apmec/tests/unit/meo/drivers/vnffg/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/__init__.py b/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/__init__.py b/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py b/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py new file mode 100644 index 0000000..83dcead --- /dev/null +++ b/apmec/tests/unit/meo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py @@ -0,0 +1,241 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_utils import uuidutils + +from apmec import context +from apmec.meo.drivers.vim import openstack_driver +from apmec.tests.unit import base +from apmec.tests.unit.db import utils + + +class FakeNeutronClient(mock.Mock): + def __init__(self): + super(FakeNeutronClient, self).__init__() + self.__fc_dict = {} + self.__pp_dict = {} + self.__ppg_dict = {} + self.__chain_dict = {} + + def flow_classifier_create(self, fc_create_dict): + fc_id = uuidutils.generate_uuid() + self.__fc_dict[fc_id] = fc_create_dict + return fc_id + + def show_flow_classifier(self, fc_dict): + fc_name = fc_dict['name'] + for fc_id in self.__fc_dict: + fc = self.__fc_dict[fc_id] + if fc_name == fc['name']: + return {'id': fc_id} + + return None + + def flow_classifier_update(self, fc_id, fc_update_dict): + if fc_id not in self.__fc_dict: + return None + self.__fc_dict[fc_id] = fc_update_dict + return fc_update_dict + + def flow_classifier_delete(self, fc_id): + if fc_id not in self.__fc_dict: + raise ValueError('fc not found') + self.__fc_dict.pop(fc_id) + + def port_pair_create(self, port_pair): + pp_id = uuidutils.generate_uuid() + self.__pp_dict[pp_id] = port_pair + return pp_id + + def show_port_pair(self, port_pair_dict): + input_pp_name = port_pair_dict['name'] + for pp_id in self.__pp_dict: + port_pair = self.__pp_dict[pp_id] + if port_pair['name'] == input_pp_name: + return {'id': pp_id} + + return None + + def port_pair_group_create(self, port_pair_group): + ppg_id = uuidutils.generate_uuid() + self.__ppg_dict[ppg_id] = port_pair_group + return ppg_id + + def show_port_pair_group(self, port_pair_group_dict): + input_ppg_name = port_pair_group_dict['name'] + for ppg_id in self.__ppg_dict: + port_pair_group = self.__ppg_dict[ppg_id] + if port_pair_group['name'] == input_ppg_name: + return {'id': ppg_id} + + return None + + def port_chain_create(self, port_chain): + chain_id = uuidutils.generate_uuid() + self.__chain_dict[chain_id] = port_chain + return chain_id + + def show_port_chain(self, port_chain_dict): + input_chain_name = port_chain_dict['name'] + for chain_id in self.__chain_dict: + port_chain = self.__chain_dict[chain_id] + if port_chain['name'] == input_chain_name: + return {'id': chain_id} + return None + + def port_chain_delete(self, chain_id): + if chain_id not in self.__chain_dict: + raise ValueError('port chain delete failed') + self.__chain_dict.pop(chain_id) + + +class TestChainSFC(base.TestCase): + + def setUp(self): + super(TestChainSFC, self).setUp() + self.context = context.get_admin_context() + self.sfc_driver = openstack_driver.OpenStack_Driver() + self._mock_neutron_client() + self.addCleanup(mock.patch.stopall) + + def _mock_neutron_client(self): + self.neutron_client = mock.Mock(wraps=FakeNeutronClient()) + fake_neutron_client = mock.Mock() + fake_neutron_client.return_value = self.neutron_client + self._mock( + 'apmec.meo.drivers.vim.openstack_driver.' + 'NeutronClient', + fake_neutron_client) + + def _mock(self, target, new=mock.DEFAULT): + patcher = mock.patch(target, new) + return patcher.start() + + def test_create_flow_classifier(self): + flow_classifier = {'name': 'fake_fc', + 'source_port_range': '2005-2010', + 'ip_proto': 6, + 'destination_port_range': '80-180'} + result = self.sfc_driver.\ + create_flow_classifier(name='fake_ffg', fc=flow_classifier, + auth_attr=utils.get_vim_auth_obj()) + self.assertIsNotNone(result) + + def test_update_flow_classifier(self): + flow_classifier = {'name': 'next_fake_fc', + 'description': 'fake flow-classifier', + 'source_port_range': '2005-2010', + 'ip_proto': 6, + 'destination_port_range': '80-180'} + fc_id = self.sfc_driver.\ + create_flow_classifier(name='fake_ffg', fc=flow_classifier, + auth_attr=utils.get_vim_auth_obj()) + + self.assertIsNotNone(fc_id) + + flow_classifier['description'] = 'next fake flow-classifier' + + result = self.sfc_driver.\ + update_flow_classifier(fc_id=fc_id, + fc=flow_classifier, + auth_attr=utils.get_vim_auth_obj()) + self.assertIsNotNone(result) + + def test_delete_flow_classifier(self): + flow_classifier = {'name': 'another_fake_fc', + 'description': 'another flow-classifier', + 'source_port_range': '1999-2005', + 'ip_proto': 6, + 'destination_port_range': '80-100'} + fc_id = self.sfc_driver.\ + create_flow_classifier(name='fake_ffg', fc=flow_classifier, + auth_attr=utils.get_vim_auth_obj()) + + self.assertIsNotNone(fc_id) + + try: + self.sfc_driver.\ + delete_flow_classifier(fc_id=fc_id, + auth_attr=utils.get_vim_auth_obj()) + except Exception: + self.assertTrue(True) + + def test_create_chain(self): + auth_attr = utils.get_vim_auth_obj() + flow_classifier = {'name': 'test_create_chain_fc', + 'description': 'fc for testing create chain', + 'source_port_range': '1997-2008', + 'ip_proto': 6, + 'destination_port_range': '80-100'} + fc_id = self.sfc_driver.\ + create_flow_classifier(name='fake_ffg', fc=flow_classifier, + auth_attr=auth_attr) + + self.assertIsNotNone(fc_id) + + mea_1 = {'name': 'test_create_chain_mea_1', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + mea_2 = {'name': 'test_create_chain_mea_2', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + mea_3 = {'name': 'test_create_chain_mea_3', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + meas = [mea_1, mea_2, mea_3] + + result = self.sfc_driver.create_chain(name='fake_ffg', + fc_id=fc_id, + meas=meas, + auth_attr=auth_attr) + + self.assertIsNotNone(result) + + def test_delete_chain(self): + auth_attr = utils.get_vim_auth_obj() + flow_classifier = {'name': 'test_delete_chain_fc', + 'description': 'fc for testing delete chain', + 'source_port_range': '1000-2000', + 'ip_proto': 6, + 'destination_port_range': '80-180'} + fc_id = self.sfc_driver.\ + create_flow_classifier(name='fake_ffg', fc=flow_classifier, + auth_attr=auth_attr) + + self.assertIsNotNone(fc_id) + + mea_1 = {'name': 'test_delete_chain_mea_1', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + mea_2 = {'name': 'test_delete_chain_mea_2', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + mea_3 = {'name': 'test_delete_chain_mea_3', + 'connection_points': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()]} + meas = [mea_1, mea_2, mea_3] + + chain_id = self.sfc_driver.create_chain(name='fake_ffg', + fc_id=fc_id, + meas=meas, + auth_attr=auth_attr) + + self.assertIsNotNone(chain_id) + + try: + self.sfc_driver.delete_chain(chain_id, + auth_attr=auth_attr) + except Exception: + self.assertTrue(True) diff --git a/apmec/tests/unit/meo/drivers/workflow/__init__.py b/apmec/tests/unit/meo/drivers/workflow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apmec/tests/unit/meo/drivers/workflow/test_workflow_generator.py b/apmec/tests/unit/meo/drivers/workflow/test_workflow_generator.py new file mode 100644 index 0000000..43ccce4 --- /dev/null +++ b/apmec/tests/unit/meo/drivers/workflow/test_workflow_generator.py @@ -0,0 +1,172 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from apmec import context +from apmec.meo.drivers.workflow import workflow_generator +from apmec.tests.unit import base + + +def get_dummy_mes(): + return {u'mes': {'description': '', + 'tenant_id': u'a81900a92bda40588c52699e1873a92f', + 'vim_id': u'96025dd5-ca16-49f3-9823-958eb04260c4', + 'mea_ids': '', u'attributes': {}, + u'mesd_id': u'b8587afb-6099-4f56-abce-572c62e3d61d', + u'name': u'test_create_mes'}, + 'mead_details': {u'mea1': {'instances': ['MEA1'], + 'id': u'dec09ed4-f355-4ec8-a00b-8548f6575a80'}, + u'mea2': {'instances': ['MEA2'], + 'id': u'9f8f2af7-6407-4f79-a6fe-302c56172231'}}, + 'placement_attr': {}} + + +def get_dummy_param(): + return {u'mea1': {'substitution_mappings': {u'VL1b8587afb-60': { + 'type': 'tosca.nodes.mec.VL', 'properties': { + 'network_name': u'net_mgmt', + 'vendor': 'apmec'}}, 'requirements': { + 'virtualLink2': u'VL2b8587afb-60', + 'virtualLink1': u'VL1b8587afb-60'}, u'VL2b8587afb-60': { + 'type': 'tosca.nodes.mec.VL', + 'properties': {'network_name': u'net0', + 'vendor': 'apmec'}}}}, + u'mesd': {u'vl2_name': u'net0', u'vl1_name': u'net_mgmt'}} + + +def get_dummy_create_workflow(): + return {'std.create_mea_dummy': {'input': ['mea'], + 'tasks': { + 'wait_mea_active_MEA2': { + 'action': 'apmec.show_mea mea=<% $.mea_id_MEA2 %>', + 'retry': {'count': 10, 'delay': 10, + 'continue-on': '<% $.status_MEA2 = ' + '"PENDING_CREATE" %>', + 'break-on': '<% $.status_MEA2 = "ERROR" %>'}, + 'publish': { + 'status_MEA2': '<% task(wait_mea_active_MEA2).' + 'result.mea.status %>', + 'mgmt_url_MEA2': ' <% task(wait_mea_active_MEA2).' + 'result.mea.mgmt_url %>'}, + 'on-success': [{ + 'delete_mea_MEA2': '<% $.status_MEA2=' + '"ERROR" %>'}]}, + 'create_mea_MEA2': { + 'action': 'apmec.create_mea body=<% $.mea.MEA2 %>', + 'input': {'body': '<% $.mea.MEA2 %>'}, + 'publish': { + 'status_MEA2': '<% task(create_mea_MEA2).' + 'result.mea.status %>', + 'vim_id_MEA2': '<% task(create_mea_MEA2).' + 'result.mea.vim_id %>', + 'mgmt_url_MEA2': '<% task(create_mea_MEA2).' + 'result.mea.mgmt_url %>', + 'mea_id_MEA2': '<% task(create_mea_MEA2)' + '.result.mea.id %>'}, + 'on-success': ['wait_mea_active_MEA2']}, + 'create_mea_MEA1': { + 'action': 'apmec.create_mea body=<% $.mea.MEA1 %>', + 'input': {'body': '<% $.mea.MEA1 %>'}, + 'publish': { + 'status_MEA1': '<% task(create_mea_MEA1).' + 'result.mea.status %>', + 'mea_id_MEA1': '<% task(create_mea_MEA1).' + 'result.mea.id %>', + 'mgmt_url_MEA1': '<% task(create_mea_MEA1).' + 'result.mea.mgmt_url %>', + 'vim_id_MEA1': '<% task(create_mea_MEA1).' + 'result.mea.vim_id %>'}, + 'on-success': ['wait_mea_active_MEA1']}, + 'wait_mea_active_MEA1': { + 'action': 'apmec.show_mea mea=<% $.mea_id_MEA1 %>', + 'retry': {'count': 10, 'delay': 10, + 'continue-on': '<% $.status_MEA1 = "PENDING_' + 'CREATE" %>', + 'break-on': '<% $.status_MEA1 = "ERROR" %>'}, + 'publish': { + 'status_MEA1': '<% task(wait_mea_active_MEA1).' + 'result.mea.status %>', + 'mgmt_url_MEA1': ' <% task(wait_mea_active_MEA1).' + 'result.mea.mgmt_url %>'}, + 'on-success': [{'delete_mea_MEA1': '<% $.status_MEA1=' + '"ERROR" %>'}]}, + 'delete_mea_MEA1': {'action': 'apmec.delete_mea mea=<% ' + '$.mea_id_MEA1%>'}, + 'delete_mea_MEA2': {'action': 'apmec.delete_mea mea=<% ' + '$.mea_id_MEA2%>'}}, + 'type': 'direct', 'output': { + 'status_MEA1': '<% $.status_MEA1 %>', + 'status_MEA2': '<% $.status_MEA2 %>', + 'mgmt_url_MEA2': '<% $.mgmt_url_MEA2 %>', + 'mgmt_url_MEA1': '<% $.mgmt_url_MEA1 %>', + 'vim_id_MEA2': '<% $.vim_id_MEA2 %>', + 'mea_id_MEA1': '<% $.mea_id_MEA1 %>', + 'mea_id_MEA2': '<% $.mea_id_MEA2 %>', + 'vim_id_MEA1': '<% $.vim_id_MEA1 %>'}}, + 'version': '2.0'} + + +def dummy_delete_mes_obj(): + return {'mea_ids': u"{'MEA1': '5de5eca6-3e21-4bbd-a9d7-86458de75f0c'}"} + + +def get_dummy_delete_workflow(): + return {'version': '2.0', + 'std.delete_mea_dummy': {'input': ['mea_id_MEA1'], + 'tasks': {'delete_mea_MEA1': { + 'action': 'apmec.delete_mea mea=<% $.mea_id_MEA1%>'}}, + 'type': 'direct'}} + + +class FakeMistral(object): + def __init__(self): + pass + + +class FakeMEOPlugin(object): + + def __init__(self, context, client, resource, action): + self.context = context + self.client = client + self.wg = workflow_generator.WorkflowGenerator(resource, action) + + def prepare_workflow(self, **kwargs): + self.wg.task(**kwargs) + + +class TestWorkflowGenerator(base.TestCase): + def setUp(self): + super(TestWorkflowGenerator, self).setUp() + self.mistral_client = FakeMistral() + + def test_prepare_workflow_create(self): + fPlugin = FakeMEOPlugin(context, self.mistral_client, + resource='mea', action='create') + fPlugin.prepare_workflow(mes=get_dummy_mes(), params=get_dummy_param()) + wf_def_values = [fPlugin.wg.definition[k] for + k in fPlugin.wg.definition] + self.assertIn(get_dummy_create_workflow()['std.create_mea_dummy'], + wf_def_values) + self.assertEqual(get_dummy_create_workflow()['version'], + fPlugin.wg.definition['version']) + + def test_prepare_workflow_delete(self): + fPlugin = FakeMEOPlugin(context, self.mistral_client, + resource='mea', action='delete') + fPlugin.prepare_workflow(mes=dummy_delete_mes_obj()) + wf_def_values = [fPlugin.wg.definition[k] for + k in fPlugin.wg.definition] + self.assertIn(get_dummy_delete_workflow()['std.delete_mea_dummy'], + wf_def_values) + self.assertEqual(get_dummy_delete_workflow()['version'], + fPlugin.wg.definition['version']) diff --git a/apmec/tests/unit/meo/test_nfvo_plugin.py b/apmec/tests/unit/meo/test_nfvo_plugin.py new file mode 100644 index 0000000..2ec9d30 --- /dev/null +++ b/apmec/tests/unit/meo/test_nfvo_plugin.py @@ -0,0 +1,355 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import codecs +from datetime import datetime +import mock +import os +from oslo_utils import uuidutils + +from mock import patch + +from apmec import context +from apmec.db.common_services import common_services_db_plugin +from apmec.db.meo import meo_db +from apmec.extensions import meo +from apmec.meo import meo_plugin +from apmec.plugins.common import constants +from apmec.tests.unit.db import base as db_base +from apmec.tests.unit.db import utils + +SECRET_PASSWORD = '***' +DUMMY_mes_2 = 'ba6bf017-f6f7-45f1-a280-57b073bf78ef' + + +def dummy_get_vim(*args, **kwargs): + vim_obj = dict() + vim_obj['auth_cred'] = utils.get_vim_auth_obj() + vim_obj['type'] = 'openstack' + return vim_obj + + +def _get_template(name): + filename = os.path.abspath(os.path.join(os.path.dirname(__file__), + '../../etc/samples/' + str(name))) + f = codecs.open(filename, encoding='utf-8', errors='strict') + return f.read() + + +class FakeDriverManager(mock.Mock): + def invoke(self, *args, **kwargs): + if any(x in ['create', 'create_chain', 'create_flow_classifier'] for + x in args): + return uuidutils.generate_uuid() + elif 'execute_workflow' in args: + mock_execution = mock.Mock() + mock_execution.id.return_value = \ + "ba6bf017-f6f7-45f1-a280-57b073bf78ea" + return mock_execution + elif ('prepare_and_create_workflow' in args and + 'delete' == kwargs['action'] and + DUMMY_mes_2 == kwargs['kwargs']['mes']['id']): + raise meo.NoTasksException() + elif ('prepare_and_create_workflow' in args and + 'create' == kwargs['action'] and + utils.DUMMY_mes_2_NAME == kwargs['kwargs']['mes']['mes']['name']): + raise meo.NoTasksException() + + +def get_by_name(): + return False + + +def get_by_id(): + return False + + +def dummy_get_vim_auth(*args, **kwargs): + return {'vim_auth': {u'username': u'admin', 'password': 'devstack', + u'project_name': u'mec', u'user_id': u'', + u'user_domain_name': u'Default', + u'auth_url': u'http://10.0.4.207/identity/v3', + u'project_id': u'', + u'project_domain_name': u'Default'}, + 'vim_id': u'96025dd5-ca16-49f3-9823-958eb04260c4', + 'vim_type': u'openstack', 'vim_name': u'VIM0'} + + +class FakeClient(mock.Mock): + def __init__(self, auth): + pass + + +class FakeMEMPlugin(mock.Mock): + + def __init__(self): + super(FakeMEMPlugin, self).__init__() + self.mea1_mead_id = 'eb094833-995e-49f0-a047-dfb56aaf7c4e' + self.mea1_mea_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effe07' + self.mea3_mead_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45' + self.mea2_mead_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45' + self.mea3_mea_id = '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b' + self.mea3_update_mea_id = '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5' + + self.cp11_id = 'd18c8bae-898a-4932-bff8-d5eac981a9c9' + self.cp12_id = 'c8906342-3e30-4b2a-9401-a251a7a9b5dd' + self.cp32_id = '3d1bd2a2-bf0e-44d1-87af-a2c6b2cad3ed' + self.cp32_update_id = '064c0d99-5a61-4711-9597-2a44dc5da14b' + + def get_mead(self, *args, **kwargs): + if 'MEA1' in args: + return {'id': self.mea1_mead_id, + 'name': 'MEA1', + 'attributes': {'mead': _get_template( + 'test-mesd-mead1.yaml')}} + elif 'MEA2' in args: + return {'id': self.mea3_mead_id, + 'name': 'MEA2', + 'attributes': {'mead': _get_template( + 'test-mesd-mead2.yaml')}} + + def get_meads(self, *args, **kwargs): + if {'name': ['MEA1']} in args: + return [{'id': self.mea1_mead_id}] + elif {'name': ['MEA3']} in args: + return [{'id': self.mea3_mead_id}] + else: + return [] + + def get_meas(self, *args, **kwargs): + if {'mead_id': [self.mea1_mead_id]} in args: + return [{'id': self.mea1_mea_id}] + elif {'mead_id': [self.mea3_mead_id]} in args: + return [{'id': self.mea3_mea_id}] + else: + return None + + def get_mea(self, *args, **kwargs): + if self.mea1_mea_id in args: + return self.get_dummy_mea1() + elif self.mea3_mea_id in args: + return self.get_dummy_mea3() + elif self.mea3_update_mea_id in args: + return self.get_dummy_mea3_update() + + def get_mea_resources(self, *args, **kwargs): + if self.mea1_mea_id in args: + return self.get_dummy_mea1_details() + elif self.mea3_mea_id in args: + return self.get_dummy_mea3_details() + elif self.mea3_update_mea_id in args: + return self.get_dummy_mea3_update_details() + + def get_dummy_mea1_details(self): + return [{'name': 'CP11', 'id': self.cp11_id}, + {'name': 'CP12', 'id': self.cp12_id}] + + def get_dummy_mea3_details(self): + return [{'name': 'CP32', 'id': self.cp32_id}] + + def get_dummy_mea3_update_details(self): + return [{'name': 'CP32', 'id': self.cp32_update_id}] + + def get_dummy_mea1(self): + return {'description': 'dummy_mea_description', + 'mead_id': self.mea1_mead_id, + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mea1', + 'attributes': {}} + + def get_dummy_mea3(self): + return {'description': 'dummy_mea_description', + 'mead_id': self.mea3_mead_id, + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mea2', + 'attributes': {}} + + def get_dummy_mea3_update(self): + return {'description': 'dummy_mea_description', + 'mead_id': self.mea3_mead_id, + 'vim_id': u'6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437', + 'name': 'dummy_mea_update', + 'attributes': {}} + + +class TestMeoPlugin(db_base.SqlTestCase): + def setUp(self): + super(TestMeoPlugin, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self._mock_driver_manager() + mock.patch('apmec.meo.meo_plugin.MeoPlugin._get_vim_from_mea', + side_effect=dummy_get_vim).start() + self.meo_plugin = meo_plugin.MeoPlugin() + mock.patch('apmec.db.common_services.common_services_db_plugin.' + 'CommonServicesPluginDb.create_event' + ).start() + self._cos_db_plugin =\ + common_services_db_plugin.CommonServicesPluginDb() + + def _mock_driver_manager(self): + self._driver_manager = mock.Mock(wraps=FakeDriverManager()) + self._driver_manager.__contains__ = mock.Mock( + return_value=True) + fake_driver_manager = mock.Mock() + fake_driver_manager.return_value = self._driver_manager + self._mock( + 'apmec.common.driver_manager.DriverManager', fake_driver_manager) + + def _insert_dummy_vim(self): + session = self.context.session + vim_db = meo_db.Vim( + id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='fake_vim', + description='fake_vim_description', + type='openstack', + status='Active', + deleted_at=datetime.min, + placement_attr={'regions': ['RegionOne']}) + vim_auth_db = meo_db.VimAuth( + vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + password='encrypted_pw', + auth_url='http://localhost:5000', + vim_project={'name': 'test_project'}, + auth_cred={'username': 'test_user', 'user_domain_id': 'default', + 'project_domain_id': 'default', + 'key_type': 'fernet_key'}) + session.add(vim_db) + session.add(vim_auth_db) + session.flush() + + def _insert_dummy_vim_barbican(self): + session = self.context.session + vim_db = meo_db.Vim( + id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', + name='fake_vim', + description='fake_vim_description', + type='openstack', + status='Active', + deleted_at=datetime.min, + placement_attr={'regions': ['RegionOne']}) + vim_auth_db = meo_db.VimAuth( + vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', + password='encrypted_pw', + auth_url='http://localhost:5000', + vim_project={'name': 'test_project'}, + auth_cred={'username': 'test_user', 'user_domain_id': 'default', + 'project_domain_id': 'default', + 'key_type': 'barbican_key', + 'secret_uuid': 'fake-secret-uuid'}) + session.add(vim_db) + session.add(vim_auth_db) + session.flush() + + def test_create_vim(self): + vim_dict = utils.get_vim_obj() + vim_type = 'openstack' + res = self.meo_plugin.create_vim(self.context, vim_dict) + self._cos_db_plugin.create_event.assert_any_call( + self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_VIM, + tstamp=mock.ANY) + self._driver_manager.invoke.assert_any_call( + vim_type, 'register_vim', + context=self.context, vim_obj=vim_dict['vim']) + self.assertIsNotNone(res) + self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password']) + self.assertIn('id', res) + self.assertIn('placement_attr', res) + self.assertIn('created_at', res) + self.assertIn('updated_at', res) + + def test_delete_vim(self): + self._insert_dummy_vim() + vim_type = u'openstack' + vim_id = '6261579e-d6f3-49ad-8bc3-a9cb974778ff' + vim_obj = self.meo_plugin._get_vim(self.context, vim_id) + self.meo_plugin.delete_vim(self.context, vim_id) + self._driver_manager.invoke.assert_called_once_with( + vim_type, 'deregister_vim', + context=self.context, + vim_obj=vim_obj) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_VIM, + tstamp=mock.ANY) + + def test_update_vim(self): + vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'vim_project': {'name': 'new_project'}, + 'auth_cred': {'username': 'new_user', + 'password': 'new_password'}}} + vim_type = u'openstack' + vim_auth_username = vim_dict['vim']['auth_cred']['username'] + vim_project = vim_dict['vim']['vim_project'] + self._insert_dummy_vim() + res = self.meo_plugin.update_vim(self.context, vim_dict['vim']['id'], + vim_dict) + vim_obj = self.meo_plugin._get_vim( + self.context, vim_dict['vim']['id']) + vim_obj['updated_at'] = None + self._driver_manager.invoke.assert_called_with( + vim_type, 'register_vim', + context=self.context, + vim_obj=vim_obj) + self.assertIsNotNone(res) + self.assertIn('id', res) + self.assertIn('placement_attr', res) + self.assertEqual(vim_project, res['vim_project']) + self.assertEqual(vim_auth_username, res['auth_cred']['username']) + self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password']) + self.assertIn('updated_at', res) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_VIM, + tstamp=mock.ANY) + + def test_update_vim_barbican(self): + vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'vim_project': {'name': 'new_project'}, + 'auth_cred': {'username': 'new_user', + 'password': 'new_password'}}} + vim_type = u'openstack' + vim_auth_username = vim_dict['vim']['auth_cred']['username'] + vim_project = vim_dict['vim']['vim_project'] + self._insert_dummy_vim_barbican() + old_vim_obj = self.meo_plugin._get_vim( + self.context, vim_dict['vim']['id']) + res = self.meo_plugin.update_vim(self.context, vim_dict['vim']['id'], + vim_dict) + vim_obj = self.meo_plugin._get_vim( + self.context, vim_dict['vim']['id']) + vim_obj['updated_at'] = None + self._driver_manager.invoke.assert_called_with( + vim_type, 'delete_vim_auth', + context=self.context, + vim_id=vim_obj['id'], + auth=old_vim_obj['auth_cred']) + self.assertIsNotNone(res) + self.assertIn('id', res) + self.assertIn('placement_attr', res) + self.assertEqual(vim_project, res['vim_project']) + self.assertEqual(vim_auth_username, res['auth_cred']['username']) + self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password']) + self.assertIn('updated_at', res) + self._cos_db_plugin.create_event.assert_called_with( + self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY, + res_state=mock.ANY, res_type=constants.RES_TYPE_VIM, + tstamp=mock.ANY) \ No newline at end of file diff --git a/apmec/tests/unit/test_alarm_receiver.py b/apmec/tests/unit/test_alarm_receiver.py new file mode 100644 index 0000000..9e98119 --- /dev/null +++ b/apmec/tests/unit/test_alarm_receiver.py @@ -0,0 +1,60 @@ +# Copyright 2015 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from webob import Request + +from apmec.alarm_receiver import AlarmReceiver +from apmec.tests.unit import base + + +class TestAlarmReceiver(base.TestCase): + def setUp(self): + '''url: + + http://apmec:9896/v1.0/meas/mea-uuid/mon-policy-name/ + action-name/8ef785 + ''' + super(TestAlarmReceiver, self).setUp() + self.alarmrc = AlarmReceiver(None) + self.alarm_url = { + '00_base': 'http://apmec:9896/v1.0', + '01_url_base': '/meas/mea-uuid/', + '02_mea_id': 'mea-uuid', + '03_monitoring_policy_name': 'mon-policy-name', + '04_action_name': 'action-name', + '05_key': 'KEY' + } + self.mea_id = 'mea-uuid' + self.ordered_url = self._generate_alarm_url() + + def _generate_alarm_url(self): + return 'http://apmec:9896/v1.0/meas/mea-uuid/mon-policy-name/'\ + 'action-name/8ef785' + + def test_handle_url(self): + prefix_url, p, params = self.alarmrc.handle_url(self.ordered_url) + self.assertEqual(self.alarm_url['01_url_base'], prefix_url) + self.assertEqual(self.alarm_url['02_mea_id'], p[3]) + self.assertEqual(self.alarm_url['03_monitoring_policy_name'], p[4]) + self.assertEqual(self.alarm_url['04_action_name'], p[5]) + + @mock.patch('apmec.mem.monitor_drivers.token.Token.create_token') + def test_process_request(self, mock_token): + req = Request.blank(self.ordered_url) + req.method = 'POST' + self.alarmrc.process_request(req) + self.assertIsNotNone(req.body) + self.assertIn('triggers', req.environ['PATH_INFO']) diff --git a/apmec/tests/unit/test_api_api_common.py b/apmec/tests/unit/test_api_api_common.py new file mode 100644 index 0000000..0579142 --- /dev/null +++ b/apmec/tests/unit/test_api_api_common.py @@ -0,0 +1,94 @@ +# Copyright (c) 2013 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from testtools import matchers +from webob import exc + +from apmec.api import api_common as common +from apmec.tests import base + + +class FakeController(common.ApmecController): + _resource_name = 'fake' + + +class APICommonTestCase(base.BaseTestCase): + def setUp(self): + super(APICommonTestCase, self).setUp() + self.controller = FakeController(None) + + def test_prepare_request_body(self): + body = { + 'fake': { + 'name': 'terminator', + 'model': 'T-800', + } + } + params = [ + {'param-name': 'name', + 'required': True}, + {'param-name': 'model', + 'required': True}, + {'param-name': 'quote', + 'required': False, + 'default-value': "i'll be back"}, + ] + expect = { + 'fake': { + 'name': 'terminator', + 'model': 'T-800', + 'quote': "i'll be back", + } + } + actual = self.controller._prepare_request_body(body, params) + self.assertThat(expect, matchers.Equals(actual)) + + def test_prepare_request_body_none(self): + body = None + params = [ + {'param-name': 'quote', + 'required': False, + 'default-value': "I'll be back"}, + ] + expect = { + 'fake': { + 'quote': "I'll be back", + } + } + actual = self.controller._prepare_request_body(body, params) + self.assertThat(expect, matchers.Equals(actual)) + + def test_prepare_request_body_keyerror(self): + body = {'t2': {}} + params = [] + self.assertRaises(exc.HTTPBadRequest, + self.controller._prepare_request_body, + body, + params) + + def test_prepare_request_param_value_none(self): + body = { + 'fake': { + 'name': None, + } + } + params = [ + {'param-name': 'name', + 'required': True}, + ] + self.assertRaises(exc.HTTPBadRequest, + self.controller._prepare_request_body, + body, + params) diff --git a/apmec/tests/unit/test_api_v2.py b/apmec/tests/unit/test_api_v2.py new file mode 100644 index 0000000..fdd5ce7 --- /dev/null +++ b/apmec/tests/unit/test_api_v2.py @@ -0,0 +1,1449 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import mock +from oslo_config import cfg +from oslo_policy import policy as common_policy +from oslo_utils import uuidutils +import six +import six.moves.urllib.parse as urlparse +import webob +from webob import exc +import webtest + +from apmec.api import api_common +from apmec.api import extensions +from apmec.api.v1 import attributes +from apmec.api.v1 import base as v2_base +from apmec.api.v1 import router +from apmec.common import exceptions as n_exc +from apmec import context +from apmec import manager +from apmec import policy +from apmec.tests import base +from apmec.tests import fake_notifier +from apmec.tests.unit import testlib_api + + +ROOTDIR = os.path.dirname(os.path.dirname(__file__)) +EXTDIR = os.path.join(ROOTDIR, 'unit/extensions') + +_uuid = uuidutils.generate_uuid + + +def _get_path(resource, id=None, action=None, fmt=None): + path = '/%s' % resource + + if id is not None: + path = path + '/%s' % id + + if action is not None: + path = path + '/%s' % action + + if fmt is not None: + path = path + '.%s' % fmt + + return path + + +class ResourceIndexTestCase(base.BaseTestCase): + def test_index_json(self): + index = webtest.TestApp(router.Index({'foo': 'bar'})) + res = index.get('') + + self.assertIn('resources', res.json) + self.assertEqual(1, len(res.json['resources'])) + + resource = res.json['resources'][0] + self.assertIn('collection', resource) + self.assertEqual('bar', resource['collection']) + + self.assertIn('name', resource) + self.assertEqual('foo', resource['name']) + + self.assertIn('links', resource) + self.assertEqual(1, len(resource['links'])) + + link = resource['links'][0] + self.assertIn('href', link) + self.assertEqual('http://localhost/bar', link['href']) + self.assertIn('rel', link) + self.assertEqual('self', link['rel']) + + +class APIv2TestBase(base.BaseTestCase): + def setUp(self): + super(APIv2TestBase, self).setUp() + self.skip("Not ready yet") + plugin = 'apmec.apmec_plugin_base_v2.ApmecPluginBaseV2' + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + # Create the default configurations + self.config_parse() + # Update the plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override('allow_pagination', True) + cfg.CONF.set_override('allow_sorting', True) + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + instance = self.plugin.return_value + instance._ApmecPluginBaseV2__native_pagination_support = True + instance._ApmecPluginBaseV2__native_sorting_support = True + + api = router.APIRouter() + self.api = webtest.TestApp(api) + + +class _ArgMatcher(object): + """An adapter to assist mock assertions, used to custom compare.""" + + def __init__(self, cmp, obj): + self.cmp = cmp + self.obj = obj + + def __eq__(self, other): + return self.cmp(self.obj, other) + + def __ne__(self, other): + return not self.__eq__(other) + + +def _list_cmp(l1, l2): + return set(l1) == set(l2) + + +class APIv2TestCase(APIv2TestBase): + def _do_field_list(self, resource, base_fields): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource] + policy_attrs = [name for (name, info) in attr_info.items() + if info.get('required_by_policy') or + info.get('primary_key')] + fields = base_fields + fields.extend(policy_attrs) + return fields + + def _get_collection_kwargs(self, skipargs=None, **kwargs): + args_list = ['filters', 'fields', 'sorts', 'limit', 'marker', + 'page_reverse'] + if skipargs is None: + skipargs = [] + args_dict = dict((arg, mock.ANY) + for arg in set(args_list) - set(skipargs)) + args_dict.update(kwargs) + return args_dict + + def test_fields(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': 'foo'}) + fields = self._do_field_list('networks', ['foo']) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + fields = self._do_field_list('networks', ['foo', 'bar']) + self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']}) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple_with_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + fields = self._do_field_list('networks', ['foo']) + self.api.get(_get_path('networks'), {'fields': ['foo', '']}) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': ''}) + kwargs = self._get_collection_kwargs(fields=[]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': ['', '']}) + kwargs = self._get_collection_kwargs(fields=[]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar'}) + filters = {'name': ['bar']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ''}) + filters = {} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['', '']}) + filters = {} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_with_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['bar', '']}) + filters = {'name': ['bar']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_values(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']}) + filters = {'name': ['bar', 'bar2']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar', + 'tenant_id': 'bar2'}) + filters = {'name': ['bar'], 'tenant_id': ['bar2']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_fields(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'}) + filters = {'name': ['bar']} + fields = self._do_field_list('networks', ['foo']) + kwargs = self._get_collection_kwargs(filters=filters, fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_convert_to(self): + instance = self.plugin.return_value + instance.get_ports.return_value = [] + + self.api.get(_get_path('ports'), {'admin_state_up': 'true'}) + filters = {'admin_state_up': [True]} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_convert_list_to(self): + instance = self.plugin.return_value + instance.get_ports.return_value = [] + + self.api.get(_get_path('ports'), + {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']}) + filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'limit': '10'}) + kwargs = self._get_collection_kwargs(limit=10) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_great_than_max_limit(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'limit': '1001'}) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_zero(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'limit': '0'}) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_unspecific(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_negative_value(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), {'limit': -1}, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_limit_with_non_integer(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'limit': 'abc'}, expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_limit_with_infinite_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_override('pagination_max_limit', 'Infinite') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_negative_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_default('pagination_max_limit', '-1') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_non_integer_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_default('pagination_max_limit', 'abc') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_marker(self): + cfg.CONF.set_override('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + marker = _uuid() + self.api.get(_get_path('networks'), + {'marker': marker}) + kwargs = self._get_collection_kwargs(limit=1000, marker=marker) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_page_reverse(self): + calls = [] + instance = self.plugin.return_value + instance.get_networks.return_value = [] + self.api.get(_get_path('networks'), + {'page_reverse': 'True'}) + kwargs = self._get_collection_kwargs(page_reverse=True) + calls.append(mock.call.get_networks(mock.ANY, **kwargs)) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + instance = self.plugin.return_value + instance.get_networks.return_value = [] + self.api.get(_get_path('networks'), + {'page_reverse': 'False'}) + kwargs = self._get_collection_kwargs(page_reverse=False) + calls.append(mock.call.get_networks(mock.ANY, **kwargs)) + + def test_page_reverse_with_non_bool(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'page_reverse': 'abc'}) + kwargs = self._get_collection_kwargs(page_reverse=False) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_page_reverse_with_unspecific(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(page_reverse=False) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', True)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort_with_primary_key(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up', 'id'], + 'sort_dir': ['desc', 'asc', 'desc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', False)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort_without_direction(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), {'sort_key': ['name']}, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_sort_with_invalid_attribute(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'sort_key': 'abc', + 'sort_dir': 'asc'}, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_sort_with_invalid_dirs(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'sort_key': 'name', + 'sort_dir': 'abc'}, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_emulated_sort(self): + instance = self.plugin.return_value + instance._ApmecPluginBaseV2__native_pagination_support = False + instance._ApmecPluginBaseV2__native_sorting_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'sort_key': ['name', 'status'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs( + skipargs=['sorts', 'limit', 'marker', 'page_reverse']) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_emulated_sort_without_sort_field(self): + instance = self.plugin.return_value + instance._ApmecPluginBaseV2__native_pagination_support = False + instance._ApmecPluginBaseV2__native_sorting_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'sort_key': ['name', 'status'], + 'sort_dir': ['desc', 'asc'], + 'fields': ['subnets']}) + kwargs = self._get_collection_kwargs( + skipargs=['sorts', 'limit', 'marker', 'page_reverse'], + fields=_ArgMatcher(_list_cmp, ['name', + 'status', + 'id', + 'subnets', + 'shared', + 'tenant_id'])) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_emulated_pagination(self): + instance = self.plugin.return_value + instance._ApmecPluginBaseV2__native_pagination_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'limit': 10, + 'marker': 'foo', + 'page_reverse': False}) + kwargs = self._get_collection_kwargs(skipargs=['limit', + 'marker', + 'page_reverse']) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_native_pagination_without_native_sorting(self): + instance = self.plugin.return_value + instance._ApmecPluginBaseV2__native_sorting_support = False + self.assertRaises(n_exc.Invalid, router.APIRouter) + + def test_native_pagination_without_allow_sorting(self): + cfg.CONF.set_override('allow_sorting', False) + instance = self.plugin.return_value + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', True)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + +# Note: since all resources use the same controller and validation +# logic, we actually get really good coverage from testing just networks. +class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): + def setUp(self): + super(JSONV2TestCase, self).setUp() + self.skip("Not ready yet") + + def _test_list(self, req_tenant_id, real_tenant_id): + env = {} + if req_tenant_id: + env = {'apmec.context': context.Context('', req_tenant_id)} + input_dict = {'id': uuidutils.generate_uuid(), + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': real_tenant_id, + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + + res = self.api.get(_get_path('networks', + fmt=self.fmt), extra_environ=env) + res = self.deserialize(res) + self.assertIn('networks', res) + if not req_tenant_id or req_tenant_id == real_tenant_id: + # expect full list returned + self.assertEqual(1, len(res['networks'])) + output_dict = res['networks'][0] + input_dict['shared'] = False + self.assertEqual(len(input_dict), len(output_dict)) + for k, v in input_dict.items(): + self.assertEqual(v, output_dict[k]) + else: + # expect no results + self.assertEqual(0, len(res['networks'])) + + def test_list_noauth(self): + self._test_list(None, _uuid()) + + def test_list_keystone(self): + tenant_id = _uuid() + self._test_list(tenant_id, tenant_id) + + def test_list_keystone_bad(self): + tenant_id = _uuid() + self._test_list(tenant_id + "bad", tenant_id) + + def test_list_pagination(self): + id1 = str(_uuid()) + id2 = str(_uuid()) + input_dict1 = {'id': id1, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + input_dict2 = {'id': id2, + 'name': 'net2', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict1, input_dict2] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'sort_key': ['name'], + 'sort_dir': ['asc']} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(2, len(res['networks'])) + self.assertEqual(sorted([id1, id2]), + sorted([res['networks'][0]['id'], + res['networks'][1]['id']])) + + self.assertIn('networks_links', res) + next_links = [] + previous_links = [] + for r in res['networks_links']: + if r['rel'] == 'next': + next_links.append(r) + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(1, len(next_links)) + self.assertEqual(1, len(previous_links)) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + params['marker'] = [id2] + self.assertEqual(params, urlparse.parse_qs(url.query)) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + params['marker'] = [id1] + params['page_reverse'] = ['True'] + self.assertEqual(params, urlparse.parse_qs(url.query)) + + def test_list_pagination_with_last_page(self): + id = str(_uuid()) + input_dict = {'id': id, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': str(_uuid())} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(1, len(res['networks'])) + self.assertEqual(id, res['networks'][0]['id']) + + self.assertIn('networks_links', res) + previous_links = [] + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'next') + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(1, len(previous_links)) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + expect_params = params.copy() + expect_params['marker'] = [id] + expect_params['page_reverse'] = ['True'] + self.assertEqual(expect_params, urlparse.parse_qs(url.query)) + + def test_list_pagination_with_empty_page(self): + return_value = [] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': str(_uuid())} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual([], res['networks']) + + previous_links = [] + if 'networks_links' in res: + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'next') + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(1, len(previous_links)) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + expect_params = params.copy() + del expect_params['marker'] + expect_params['page_reverse'] = ['True'] + self.assertEqual(expect_params, urlparse.parse_qs(url.query)) + + def test_list_pagination_reverse_with_last_page(self): + id = str(_uuid()) + input_dict = {'id': id, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'page_reverse': ['True']} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(1, len(res['networks'])) + self.assertEqual(id, res['networks'][0]['id']) + + self.assertIn('networks_links', res) + next_links = [] + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'previous') + if r['rel'] == 'next': + next_links.append(r) + self.assertEqual(1, len(next_links)) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + expected_params = params.copy() + del expected_params['page_reverse'] + expected_params['marker'] = [id] + self.assertEqual(expected_params, + urlparse.parse_qs(url.query)) + + def test_list_pagination_reverse_with_empty_page(self): + return_value = [] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'page_reverse': ['True']} + res = self.api.get(_get_path('networks'), + params=params).json + self.assertEqual([], res['networks']) + + next_links = [] + if 'networks_links' in res: + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'previous') + if r['rel'] == 'next': + next_links.append(r) + self.assertEqual(1, len(next_links)) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(_get_path('networks'), url.path) + expect_params = params.copy() + del expect_params['marker'] + del expect_params['page_reverse'] + self.assertEqual(expect_params, urlparse.parse_qs(url.query)) + + def test_create(self): + net_id = _uuid() + data = {'network': {'name': 'net1', 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net_id, net['id']) + self.assertEqual("ACTIVE", net['status']) + + def test_create_use_defaults(self): + net_id = _uuid() + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} + full_input = {'network': {'admin_state_up': True, + 'shared': False}} + full_input['network'].update(initial_input['network']) + + return_value = {'id': net_id, 'status': "ACTIVE"} + return_value.update(full_input['network']) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt) + instance.create_network.assert_called_with(mock.ANY, + network=full_input) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net_id, net['id']) + self.assertEqual(True, net['admin_state_up']) + self.assertEqual("ACTIVE", net['status']) + + def test_create_no_keystone_env(self): + data = {'name': 'net1'} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_create_with_keystone_env(self): + tenant_id = _uuid() + net_id = _uuid() + env = {'apmec.context': context.Context('', tenant_id)} + # tenant_id should be fetched from env + initial_input = {'network': {'name': 'net1'}} + full_input = {'network': {'admin_state_up': True, + 'shared': False, 'tenant_id': tenant_id}} + full_input['network'].update(initial_input['network']) + + return_value = {'id': net_id, 'status': "ACTIVE"} + return_value.update(full_input['network']) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt, + extra_environ=env) + + instance.create_network.assert_called_with(mock.ANY, + network=full_input) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + + def test_create_bad_keystone_tenant(self): + tenant_id = _uuid() + data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} + env = {'apmec.context': context.Context('', tenant_id + "bad")} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True, + extra_environ=env) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_create_no_body(self): + data = {'whoa': None} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_create_no_resource(self): + data = {} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_create_missing_attr(self): + data = {'port': {'what': 'who', 'tenant_id': _uuid()}} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_create_readonly_attr(self): + data = {'network': {'name': 'net1', 'tenant_id': _uuid(), + 'status': "ACTIVE"}} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_create_bulk(self): + data = {'networks': [{'name': 'net1', + 'admin_state_up': True, + 'tenant_id': _uuid()}, + {'name': 'net2', + 'admin_state_up': True, + 'tenant_id': _uuid()}]} + + def side_effect(context, network): + net = network.copy() + net['network'].update({'subnets': []}) + return net['network'] + + instance = self.plugin.return_value + instance.create_network.side_effect = side_effect + instance.get_networks_count.return_value = 0 + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + + def test_create_bulk_no_networks(self): + data = {'networks': []} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + + def test_create_bulk_missing_attr(self): + data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_create_bulk_partial_body(self): + data = {'ports': [{'device_id': 'device_1', + 'tenant_id': _uuid()}, + {'tenant_id': _uuid()}]} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_create_attr_not_specified(self): + net_id = _uuid() + tenant_id = _uuid() + device_id = _uuid() + initial_input = {'port': {'name': '', 'network_id': net_id, + 'tenant_id': tenant_id, + 'device_id': device_id, + 'admin_state_up': True}} + full_input = {'port': {'admin_state_up': True, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_owner': ''}} + full_input['port'].update(initial_input['port']) + return_value = {'id': _uuid(), 'status': 'ACTIVE', + 'admin_state_up': True, + 'mac_address': 'ca:fe:de:ad:be:ef', + 'device_id': device_id, + 'device_owner': ''} + return_value.update(initial_input['port']) + + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': + six.text_type(tenant_id)} + instance.get_ports_count.return_value = 1 + instance.create_port.return_value = return_value + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt) + instance.create_port.assert_called_with(mock.ANY, port=full_input) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + res = self.deserialize(res) + self.assertIn('port', res) + port = res['port'] + self.assertEqual(net_id, port['network_id']) + self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address']) + + def test_create_return_extra_attr(self): + net_id = _uuid() + data = {'network': {'name': 'net1', 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id, 'v2attrs:something': "123"} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net_id, net['id']) + self.assertEqual("ACTIVE", net['status']) + self.assertNotIn('v2attrs:something', net) + + def test_fields(self): + return_value = {'name': 'net1', 'admin_state_up': True, + 'subnets': []} + + instance = self.plugin.return_value + instance.get_network.return_value = return_value + + self.api.get(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt)) + + def _test_delete(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + if req_tenant_id: + env = {'apmec.context': context.Context('', req_tenant_id)} + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': real_tenant_id, + 'shared': False} + instance.delete_network.return_value = None + + res = self.api.delete(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + extra_environ=env, + expect_errors=expect_errors) + self.assertEqual(expected_code, res.status_int) + + def test_delete_noauth(self): + self._test_delete(None, _uuid(), exc.HTTPNoContent.code) + + def test_delete_keystone(self): + tenant_id = _uuid() + self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code) + + def test_delete_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_delete(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def _test_get(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + shared = False + if req_tenant_id: + env = {'apmec.context': context.Context('', req_tenant_id)} + if req_tenant_id.endswith('another'): + shared = True + env['apmec.context'].roles = ['tenant_admin'] + + data = {'tenant_id': real_tenant_id, 'shared': shared} + instance = self.plugin.return_value + instance.get_network.return_value = data + + res = self.api.get(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + extra_environ=env, + expect_errors=expect_errors) + self.assertEqual(expected_code, res.status_int) + return res + + def test_get_noauth(self): + self._test_get(None, _uuid(), 200) + + def test_get_keystone(self): + tenant_id = _uuid() + self._test_get(tenant_id, tenant_id, 200) + + def test_get_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_get(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def test_get_keystone_shared_network(self): + tenant_id = _uuid() + self._test_get(tenant_id + "another", tenant_id, 200) + + def test_get_keystone_strip_admin_only_attribute(self): + tenant_id = _uuid() + # Inject rule in policy engine + policy.init() + common_policy._rules['get_network:name'] = common_policy.parse_rule( + "rule:admin_only") + res = self._test_get(tenant_id, tenant_id, 200) + res = self.deserialize(res) + try: + self.assertNotIn('name', res['network']) + finally: + del common_policy._rules['get_network:name'] + + def _test_update(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + if req_tenant_id: + env = {'apmec.context': context.Context('', req_tenant_id)} + # leave out 'name' field intentionally + data = {'network': {'admin_state_up': True}} + return_value = {'subnets': []} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': real_tenant_id, + 'shared': False} + instance.update_network.return_value = return_value + + res = self.api.put(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + self.serialize(data), + extra_environ=env, + expect_errors=expect_errors) + # Ensure id attribute is included in fields returned by GET call + # in update procedure. + self.assertEqual(1, instance.get_network.call_count) + self.assertIn('id', instance.get_network.call_args[1]['fields']) + self.assertEqual(expected_code, res.status_int) + + def test_update_noauth(self): + self._test_update(None, _uuid(), 200) + + def test_update_keystone(self): + tenant_id = _uuid() + self._test_update(tenant_id, tenant_id, 200) + + def test_update_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_update(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def test_update_readonly_field(self): + data = {'network': {'status': "NANANA"}} + res = self.api.put(_get_path('networks', id=_uuid()), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_invalid_attribute_field(self): + data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}} + res = self.api.put(_get_path('networks', id=_uuid()), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + +class SubresourceTest(base.BaseTestCase): + def setUp(self): + super(SubresourceTest, self).setUp() + self.skip("Not ready yet") + + plugin = 'apmec.tests.unit.test_api_v2.TestSubresourcePlugin' + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in (attributes.RESOURCE_ATTRIBUTE_MAP).items(): + self.saved_attr_map[resource] = attrs.copy() + + self.config_parse() + self.setup_coreplugin(plugin) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + + router.SUB_RESOURCES['dummy'] = { + 'collection_name': 'dummies', + 'parent': {'collection_name': 'networks', + 'member_name': 'network'} + } + attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = { + 'foo': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True} + } + api = router.APIRouter() + self.api = webtest.TestApp(api) + + def tearDown(self): + router.SUB_RESOURCES = {} + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + super(SubresourceTest, self).tearDown() + + def test_index_sub_resource(self): + instance = self.plugin.return_value + + self.api.get('/networks/id1/dummies') + instance.get_network_dummies.assert_called_once_with(mock.ANY, + filters=mock.ANY, + fields=mock.ANY, + network_id='id1') + + def test_show_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id)) + instance.get_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1', + fields=mock.ANY) + + def test_create_sub_resource(self): + instance = self.plugin.return_value + + body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}} + self.api.post_json('/networks/id1/dummies', body) + instance.create_network_dummy.assert_called_once_with(mock.ANY, + network_id='id1', + dummy=body) + + def test_update_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + body = {'dummy': {'foo': 'bar'}} + self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), + body) + instance.update_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1', + dummy=body) + + def test_delete_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id)) + instance.delete_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1') + + +class V2Views(base.BaseTestCase): + + def setUp(self): + super(V2Views, self).setUp() + self.skip("Not ready yet") + + def _view(self, keys, collection, resource): + data = dict((key, 'value') for key in keys) + data['fake'] = 'value' + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection] + controller = v2_base.Controller(None, collection, resource, attr_info) + res = controller._view(context.get_admin_context(), data) + self.assertNotIn('fake', res) + for key in keys: + self.assertIn(key, res) + + def test_network(self): + keys = ('id', 'name', 'subnets', 'admin_state_up', 'status', + 'tenant_id') + self._view(keys, 'networks', 'network') + + def test_port(self): + keys = ('id', 'network_id', 'mac_address', 'fixed_ips', + 'device_id', 'admin_state_up', 'tenant_id', 'status') + self._view(keys, 'ports', 'port') + + def test_subnet(self): + keys = ('id', 'network_id', 'tenant_id', 'gateway_ip', + 'ip_version', 'cidr', 'enable_dhcp') + self._view(keys, 'subnets', 'subnet') + + +class NotificationTest(APIv2TestBase): + + def setUp(self): + super(NotificationTest, self).setUp() + self.skip("Not ready yet") + fake_notifier.reset() + + def _resource_op_notifier(self, opname, resource, expected_errors=False): + initial_input = {resource: {'name': 'myname'}} + instance = self.plugin.return_value + instance.get_networks.return_value = initial_input + instance.get_networks_count.return_value = 0 + expected_code = exc.HTTPCreated.code + if opname == 'create': + initial_input[resource]['tenant_id'] = _uuid() + res = self.api.post_json( + _get_path('networks'), + initial_input, expect_errors=expected_errors) + if opname == 'update': + res = self.api.put_json( + _get_path('networks', id=_uuid()), + initial_input, expect_errors=expected_errors) + expected_code = exc.HTTPOk.code + if opname == 'delete': + initial_input[resource]['tenant_id'] = _uuid() + res = self.api.delete( + _get_path('networks', id=_uuid()), + expect_errors=expected_errors) + expected_code = exc.HTTPNoContent.code + + expected_events = ('.'.join([resource, opname, "start"]), + '.'.join([resource, opname, "end"])) + self.assertEqual(len(expected_events), + len(fake_notifier.NOTIFICATIONS)) + for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): + self.assertEqual('INFO', msg['priority']) + self.assertEqual(event, msg['event_type']) + + self.assertEqual(expected_code, res.status_int) + + def test_network_create_notifer(self): + self._resource_op_notifier('create', 'network') + + def test_network_delete_notifer(self): + self._resource_op_notifier('delete', 'network') + + def test_network_update_notifer(self): + self._resource_op_notifier('update', 'network') + + +class ExtensionTestCase(base.BaseTestCase): + def setUp(self): + super(ExtensionTestCase, self).setUp() + self.skip("Not ready yet") + plugin = 'apmec.apmec_plugin_base_v2.ApmecPluginBaseV2' + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in (attributes.RESOURCE_ATTRIBUTE_MAP).items(): + self.saved_attr_map[resource] = attrs.copy() + + # Create the default configurations + self.config_parse() + + # Update the plugin and extensions path + self.setup_coreplugin(plugin) + cfg.CONF.set_override('api_extensions_path', EXTDIR) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + + # Instantiate mock plugin and enable the V2attributes extension + manager.ApmecManager.get_plugin().supported_extension_aliases = ( + ["v2attrs"]) + + api = router.APIRouter() + self.api = webtest.TestApp(api) + + def tearDown(self): + super(ExtensionTestCase, self).tearDown() + self.api = None + self.plugin = None + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_extended_create(self): + net_id = _uuid() + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(), + 'v2attrs:something_else': "abc"}} + data = {'network': {'admin_state_up': True, 'shared': False}} + data['network'].update(initial_input['network']) + + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id, + 'v2attrs:something': "123"} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post_json(_get_path('networks'), initial_input) + + instance.create_network.assert_called_with(mock.ANY, + network=data) + self.assertEqual(exc.HTTPCreated.code, res.status_int) + self.assertIn('network', res.json) + net = res.json['network'] + self.assertEqual(net_id, net['id']) + self.assertEqual("ACTIVE", net['status']) + self.assertEqual("123", net['v2attrs:something']) + self.assertNotIn('v2attrs:something_else', net) + + +class TestSubresourcePlugin(object): + def get_network_dummies(self, context, network_id, + filters=None, fields=None): + return [] + + def get_network_dummy(self, context, id, network_id, + fields=None): + return {} + + def create_network_dummy(self, context, network_id, dummy): + return {} + + def update_network_dummy(self, context, id, network_id, dummy): + return {} + + def delete_network_dummy(self, context, id, network_id): + return + + +class ListArgsTestCase(base.BaseTestCase): + def test_list_args(self): + path = '/?fields=4&foo=3&fields=2&bar=1' + request = webob.Request.blank(path) + expect_val = ['2', '4'] + actual_val = api_common.list_args(request, 'fields') + self.assertEqual(expect_val, sorted(actual_val)) + + def test_list_args_with_empty(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + self.assertEqual([], api_common.list_args(request, 'fields')) + + +class FiltersTestCase(base.BaseTestCase): + def setUp(self): + super(FiltersTestCase, self).setUp() + self.skip("Not ready yet") + + def test_all_skip_args(self): + path = '/?fields=4&fields=3&fields=2&fields=1' + request = webob.Request.blank(path) + self.assertEqual({}, api_common.get_filters(request, None, + ["fields"])) + + def test_blank_values(self): + path = '/?foo=&bar=&baz=&qux=' + request = webob.Request.blank(path) + self.assertEqual({}, api_common.get_filters(request, {})) + + def test_no_attr_info(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, {}) + self.assertEqual(expect_val, actual_val) + + def test_attr_info_without_conversion(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + attr_info = {'foo': {'key': 'val'}} + expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(expect_val, actual_val) + + def test_attr_info_with_convert_list_to(self): + path = '/?foo=key=4&bar=3&foo=key=2&qux=1' + request = webob.Request.blank(path) + attr_info = { + 'foo': { + 'convert_list_to': attributes.convert_kvp_list_to_dict, + } + } + expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(expect_val, actual_val) + + def test_attr_info_with_convert_to(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + attr_info = {'foo': {'convert_to': attributes.convert_to_int}} + expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(expect_val, actual_val) + + +class CreateResourceTestCase(base.BaseTestCase): + def test_resource_creation(self): + resource = v2_base.create_resource('fakes', 'fake', None, {}) + self.assertIsInstance(resource, webob.dec.wsgify) diff --git a/apmec/tests/unit/test_api_v2_extension.py b/apmec/tests/unit/test_api_v2_extension.py new file mode 100644 index 0000000..71bb019 --- /dev/null +++ b/apmec/tests/unit/test_api_v2_extension.py @@ -0,0 +1,114 @@ +# Copyright 2014 Intel Corporation. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_utils import uuidutils +from webob import exc +import webtest + +from apmec.api import extensions +from apmec.api.v1 import attributes +from apmec.tests.unit import test_api_v2 +from apmec.tests.unit import test_extensions +from apmec.tests.unit import testlib_api + + +class ExtensionTestCase(testlib_api.WebTestCase): + def _resotre_attr_map(self): + attributes.RESOURCE_ATTRIBUTE_MAP = self._saved_attr_map + + def _setUpExtension(self, plugin, service_type, + resource_attribute_map, extension_class, + resource_prefix, plural_mappings=None, + translate_resource_name=False, + allow_pagination=False, allow_sorting=False, + supported_extension_aliases=None, + ): + + self._resource_prefix = resource_prefix + self._plural_mappings = plural_mappings or {} + self._translate_resource_name = translate_resource_name + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self._saved_attr_map = attributes.RESOURCE_ATTRIBUTE_MAP.copy() + # Restore the global RESOURCE_ATTRIBUTE_MAP + self.addCleanup(self._resotre_attr_map) + + # Create the default configurations + self.config_parse() + + # just stubbing core plugin with plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override('core_plugin', plugin) + if service_type: + cfg.CONF.set_override('service_plugins', [plugin]) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + instance = self.plugin.return_value + if service_type: + instance.get_plugin_type.return_value = service_type + if supported_extension_aliases is not None: + instance.supported_extension_aliases = supported_extension_aliases + if allow_pagination: + cfg.CONF.set_override('allow_pagination', True) + # instance.__native_pagination_support = True + native_pagination_attr_name = ("_%s__native_pagination_support" + % instance.__class__.__name__) + setattr(instance, native_pagination_attr_name, True) + if allow_sorting: + cfg.CONF.set_override('allow_sorting', True) + # instance.__native_sorting_support = True + native_sorting_attr_name = ("_%s__native_sorting_support" + % instance.__class__.__name__) + setattr(instance, native_sorting_attr_name, True) + + class ExtensionTestExtensionManager(object): + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + resource_attribute_map) + return extension_class.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + ext_mgr = ExtensionTestExtensionManager() + self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) + self.api = webtest.TestApp(self.ext_mdw) + + def _test_entity_delete(self, entity): + """Does the entity deletion based on naming convention.""" + entity_id = uuidutils.generate_uuid() + path = self._resource_prefix + '/' if self._resource_prefix else '' + path += self._plural_mappings.get(entity, entity + 's') + if self._translate_resource_name: + path = path.replace('_', '-') + res = self.api.delete( + test_api_v2._get_path(path, id=entity_id, fmt=self.fmt)) + delete_entity = getattr(self.plugin.return_value, "delete_" + entity) + delete_entity.assert_called_with(mock.ANY, entity_id) + self.assertEqual(exc.HTTPNoContent.code, res.status_int) diff --git a/apmec/tests/unit/test_api_v2_resource.py b/apmec/tests/unit/test_api_v2_resource.py new file mode 100644 index 0000000..dc0b49e --- /dev/null +++ b/apmec/tests/unit/test_api_v2_resource.py @@ -0,0 +1,318 @@ +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import oslo_i18n +from webob import exc +import webtest + +from apmec.api.v1 import resource as wsgi_resource +from apmec.common import exceptions as n_exc +from apmec import context +from apmec.tests import base +from apmec import wsgi + + +class RequestTestCase(base.BaseTestCase): + def setUp(self): + super(RequestTestCase, self).setUp() + self.req = wsgi_resource.Request({'foo': 'bar'}) + + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = b"" + self.assertIsNone(request.get_content_type()) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual("application/json", result) + + def test_content_type_from_accept(self): + content_type = 'application/json' + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = ("application/json; q=0.3, ") + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.json') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual("application/json", result) + + def test_context_with_apmec_context(self): + self.skip("Not ready yet") + ctxt = context.Context('fake_user', 'fake_tenant') + self.req.environ['apmec.context'] = ctxt + self.assertEqual(ctxt, self.req.context) + + def test_context_without_apmec_context(self): + self.assertTrue(self.req.context.is_admin) + + def test_best_match_language(self): + # Test that we are actually invoking language negotiation by webop + request = wsgi.Request.blank('/') + oslo_i18n.get_available_languages = mock.MagicMock() + oslo_i18n.get_available_languages.return_value = [ + 'known-language', 'es', 'zh'] + request.headers['Accept-Language'] = 'known-language' + language = request.best_match_language() + self.assertEqual('known-language', language) + + # If the Accept-Leader is an unknown language, missing or empty, + # the best match locale should be None + request.headers['Accept-Language'] = 'unknown-language' + language = request.best_match_language() + self.assertIsNone(language) + request.headers['Accept-Language'] = '' + language = request.best_match_language() + self.assertIsNone(language) + request.headers.pop('Accept-Language') + language = request.best_match_language() + self.assertIsNone(language) + + +class ResourceTestCase(base.BaseTestCase): + + def test_unmapped_apmec_error_with_json(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.ApmecException): + message = msg + expected_res = {'body': { + 'ApmecError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) + self.assertEqual(expected_res, + wsgi.JSONDeserializer().deserialize(res.body)) + + @mock.patch('oslo_i18n.translate') + def test_unmapped_apmec_error_localized(self, mock_translation): + msg_translation = 'Translated error' + mock_translation.return_value = msg_translation + msg = _('Unmapped error') + + class TestException(n_exc.ApmecException): + message = msg + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) + self.assertIn(msg_translation, + str(wsgi.JSONDeserializer().deserialize(res.body))) + + def test_mapped_apmec_error_with_json(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.ApmecException): + message = msg + expected_res = {'body': { + 'ApmecError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + faults = {TestException: exc.HTTPGatewayTimeout} + resource = webtest.TestApp(wsgi_resource.Resource(controller, + faults=faults)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) + self.assertEqual(expected_res, + wsgi.JSONDeserializer().deserialize(res.body)) + + @mock.patch('oslo_i18n.translate') + def test_mapped_apmec_error_localized(self, mock_translation): + msg_translation = 'Translated error' + mock_translation.return_value = msg_translation + msg = _('Unmapped error') + + class TestException(n_exc.ApmecException): + message = msg + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + faults = {TestException: exc.HTTPGatewayTimeout} + resource = webtest.TestApp(wsgi_resource.Resource(controller, + faults=faults)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) + self.assertIn(msg_translation, + str(wsgi.JSONDeserializer().deserialize(res.body))) + + @staticmethod + def _make_request_with_side_effect(side_effect): + controller = mock.MagicMock() + controller.test.side_effect = side_effect + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + routing_args = {'action': 'test'} + environ = {'wsgiorg.routing_args': (None, routing_args)} + res = resource.get('', extra_environ=environ, expect_errors=True) + return res + + def test_http_error(self): + res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout()) + # verify that the exception structure is the one expected + # by the python-apmecclient + self.assertEqual(exc.HTTPGatewayTimeout().explanation, + res.json['ApmecError']['message']) + self.assertEqual('HTTPGatewayTimeout', + res.json['ApmecError']['type']) + self.assertEqual('', res.json['ApmecError']['detail']) + self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) + + def test_unhandled_error_with_json(self): + expected_res = {'body': {'ApmecError': + {'detail': '', + 'message': + _('Request Failed: internal server error' + ' while processing your request.'), + 'type': 'HTTPInternalServerError'}}} + controller = mock.MagicMock() + controller.test.side_effect = Exception() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) + self.assertEqual(expected_res, + wsgi.JSONDeserializer().deserialize(res.body)) + + def test_status_200(self): + controller = mock.MagicMock() + controller.test = lambda request: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + res = resource.get('', extra_environ=environ) + self.assertEqual(200, res.status_int) + + def test_status_204(self): + controller = mock.MagicMock() + controller.test = lambda request: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})} + res = resource.delete('', extra_environ=environ) + self.assertEqual(204, res.status_int) + + def _test_error_log_level(self, map_webob_exc, expect_log_info=False, + use_fault_map=True): + class TestException(n_exc.ApmecException): + message = 'Test Exception' + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + faults = {TestException: map_webob_exc} if use_fault_map else {} + resource = webtest.TestApp(wsgi_resource.Resource(controller, faults)) + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + with mock.patch.object(wsgi_resource, 'LOG') as log: + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(map_webob_exc.code, res.status_int) + self.assertEqual(expect_log_info, log.info.called) + self.assertNotEqual(expect_log_info, log.exception.called) + + def test_4xx_error_logged_info_level(self): + self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True) + + def test_non_4xx_error_logged_exception_level(self): + self._test_error_log_level(exc.HTTPServiceUnavailable, + expect_log_info=False) + + def test_unmapped_error_logged_exception_level(self): + self._test_error_log_level(exc.HTTPInternalServerError, + expect_log_info=False, use_fault_map=False) + + def test_no_route_args(self): + controller = mock.MagicMock() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) + + def test_post_with_body(self): + controller = mock.MagicMock() + controller.test = lambda request, body: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + res = resource.post('', params='{"key": "val"}', + extra_environ=environ) + self.assertEqual(200, res.status_int) diff --git a/apmec/tests/unit/test_attributes.py b/apmec/tests/unit/test_attributes.py new file mode 100644 index 0000000..dc44fde --- /dev/null +++ b/apmec/tests/unit/test_attributes.py @@ -0,0 +1,801 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from apmec.api.v1 import attributes +from apmec.common import exceptions as n_exc +from apmec.tests import base + + +class TestAttributes(base.BaseTestCase): + + def _construct_dict_and_constraints(self): + """Constructs a test dictionary and a definition of constraints. + + :return: A (dictionary, constraint) tuple + """ + constraints = {'key1': {'type:values': ['val1', 'val2'], + 'required': True}, + 'key2': {'type:string': None, + 'required': False}, + 'key3': {'type:dict': {'k4': {'type:string': None, + 'required': True}}, + 'required': True}} + + dictionary = {'key1': 'val1', + 'key2': 'a string value', + 'key3': {'k4': 'a string value'}} + + return dictionary, constraints + + def test_is_attr_set(self): + data = attributes.ATTR_NOT_SPECIFIED + self.assertFalse(attributes.is_attr_set(data)) + + data = None + self.assertFalse(attributes.is_attr_set(data)) + + data = "I'm set" + self.assertTrue(attributes.is_attr_set(data)) + + def test_validate_values(self): + msg = attributes._validate_values(4, [4, 6]) + self.assertIsNone(msg) + + msg = attributes._validate_values(4, (4, 6)) + self.assertIsNone(msg) + + msg = attributes._validate_values(7, [4, 6]) + self.assertEqual("'7' is not in [4, 6]", msg) + + msg = attributes._validate_values(7, (4, 6)) + self.assertEqual("'7' is not in (4, 6)", msg) + + def test_validate_not_empty_string(self): + msg = attributes._validate_not_empty_string(' ', None) + self.assertEqual(u"' ' Blank strings are not permitted", msg) + + def test_validate_not_empty_string_or_none(self): + msg = attributes._validate_not_empty_string_or_none(' ', None) + self.assertEqual(u"' ' Blank strings are not permitted", msg) + + msg = attributes._validate_not_empty_string_or_none(None, None) + self.assertIsNone(msg) + + def test_validate_string_or_none(self): + msg = attributes._validate_not_empty_string_or_none('test', None) + self.assertIsNone(msg) + + msg = attributes._validate_not_empty_string_or_none(None, None) + self.assertIsNone(msg) + + def test_validate_string(self): + msg = attributes._validate_string(None, None) + self.assertEqual("'None' is not a valid string", msg) + + # 0 == len(data) == max_len + msg = attributes._validate_string("", 0) + self.assertIsNone(msg) + + # 0 == len(data) < max_len + msg = attributes._validate_string("", 9) + self.assertIsNone(msg) + + # 0 < len(data) < max_len + msg = attributes._validate_string("123456789", 10) + self.assertIsNone(msg) + + # 0 < len(data) == max_len + msg = attributes._validate_string("123456789", 9) + self.assertIsNone(msg) + + # 0 < max_len < len(data) + msg = attributes._validate_string("1234567890", 9) + self.assertEqual("'1234567890' exceeds maximum length of 9", msg) + + msg = attributes._validate_string("123456789", None) + self.assertIsNone(msg) + + def test_validate_no_whitespace(self): + data = 'no_white_space' + result = attributes._validate_no_whitespace(data) + self.assertEqual(data, result) + + self.assertRaises(n_exc.InvalidInput, + attributes._validate_no_whitespace, + 'i have whitespace') + + self.assertRaises(n_exc.InvalidInput, + attributes._validate_no_whitespace, + 'i\thave\twhitespace') + + def test_validate_range(self): + msg = attributes._validate_range(1, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(5, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(9, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(1, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(5, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(9, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(0, [1, 9]) + self.assertEqual("'0' is too small - must be at least '1'", msg) + + msg = attributes._validate_range(10, (1, 9)) + self.assertEqual("'10' is too large - must be no larger than '9'", + msg) + + msg = attributes._validate_range("bogus", (1, 9)) + self.assertEqual("'bogus' is not an integer", msg) + + msg = attributes._validate_range(10, (attributes.UNLIMITED, + attributes.UNLIMITED)) + self.assertIsNone(msg) + + msg = attributes._validate_range(10, (1, attributes.UNLIMITED)) + self.assertIsNone(msg) + + msg = attributes._validate_range(1, (attributes.UNLIMITED, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(-1, (0, attributes.UNLIMITED)) + self.assertEqual("'-1' is too small - must be at least '0'", msg) + + msg = attributes._validate_range(10, (attributes.UNLIMITED, 9)) + self.assertEqual("'10' is too large - must be no larger than '9'", msg) + + def _test_validate_mac_address(self, validator, allow_none=False): + mac_addr = "ff:16:3e:4f:00:00" + msg = validator(mac_addr) + self.assertIsNone(msg) + + mac_addr = "ffa:16:3e:4f:00:00" + msg = validator(mac_addr) + err_msg = "'%s' is not a valid MAC address" + self.assertEqual(err_msg % mac_addr, msg) + + mac_addr = "123" + msg = validator(mac_addr) + self.assertEqual(err_msg % mac_addr, msg) + + mac_addr = None + msg = validator(mac_addr) + if allow_none: + self.assertIsNone(msg) + else: + self.assertEqual(err_msg % mac_addr, msg) + + def test_validate_mac_address(self): + self._test_validate_mac_address(attributes._validate_mac_address) + + def test_validate_mac_address_or_none(self): + self._test_validate_mac_address( + attributes._validate_mac_address_or_none, allow_none=True) + + def test_validate_ip_address(self): + ip_addr = '1.1.1.1' + msg = attributes._validate_ip_address(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1111.1.1.1' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) + + ip_addr = '1.1.1.1 has whitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) + + ip_addr = '111.1.1.1\twhitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) + + ip_addr = '111.1.1.1\nwhitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) + + def test_validate_ip_pools(self): + pools = [[{'end': '10.0.0.254'}], + [{'start': '10.0.0.254'}], + [{'start': '1000.0.0.254', + 'end': '1.1.1.1'}], + [{'start': '10.0.0.2', 'end': '10.0.0.254', + 'forza': 'juve'}], + [{'start': '10.0.0.2', 'end': '10.0.0.254'}, + {'end': '10.0.0.254'}], + [None], + None] + for pool in pools: + msg = attributes._validate_ip_pools(pool) + self.assertIsNotNone(msg) + + pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'}, + {'start': '11.0.0.2', 'end': '11.1.1.1'}], + [{'start': '11.0.0.2', 'end': '11.0.0.100'}]] + for pool in pools: + msg = attributes._validate_ip_pools(pool) + self.assertIsNone(msg) + + def test_validate_fixed_ips(self): + fixed_ips = [ + {'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1111.1.1.1'}], + 'error_msg': "'1111.1.1.1' is not a valid IP address"}, + {'data': [{'subnet_id': 'invalid', + 'ip_address': '1.1.1.1'}], + 'error_msg': "'invalid' is not a valid UUID"}, + {'data': None, + 'error_msg': "Invalid data format for fixed IP: 'None'"}, + {'data': "1.1.1.1", + 'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"}, + {'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'], + 'error_msg': "Invalid data format for fixed IP: " + "'00000000-ffff-ffff-ffff-000000000000'"}, + {'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']], + 'error_msg': "Invalid data format for fixed IP: " + "'['00000000-ffff-ffff-ffff-000000000000', " + "'1.1.1.1']'"}, + {'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}, + {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}], + 'error_msg': "Duplicate IP address '1.1.1.1'"}] + for fixed in fixed_ips: + msg = attributes._validate_fixed_ips(fixed['data']) + self.assertEqual(fixed['error_msg'], msg) + + fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}], + [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}, + {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.2'}]] + for fixed in fixed_ips: + msg = attributes._validate_fixed_ips(fixed) + self.assertIsNone(msg) + + def test_validate_nameservers(self): + mes_pools = [['1.1.1.2', '1.1.1.2'], + ['www.hostname.com', 'www.hostname.com'], + ['77.hostname.com'], + ['1000.0.0.1'], + None] + + for mes in mes_pools: + msg = attributes._validate_nameservers(mes, None) + self.assertIsNotNone(msg) + + mes_pools = [['100.0.0.2'], + ['www.hostname.com'], + ['www.great.marathons.to.travel'], + ['valid'], + ['www.internal.hostname.com']] + + for mes in mes_pools: + msg = attributes._validate_nameservers(mes, None) + self.assertIsNone(msg) + + def test_validate_hostroutes(self): + hostroute_pools = [[{'destination': '100.0.0.0/24'}], + [{'nexthop': '10.0.2.20'}], + [{'nexthop': '10.0.2.20', + 'forza': 'juve', + 'destination': '100.0.0.0/8'}], + [{'nexthop': '1110.0.2.20', + 'destination': '100.0.0.0/8'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}, + {'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}], + [None], + None] + for host_routes in hostroute_pools: + msg = attributes._validate_hostroutes(host_routes, None) + self.assertIsNotNone(msg) + + hostroute_pools = [[{'destination': '100.0.0.0/24', + 'nexthop': '10.0.2.20'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}, + {'nexthop': '10.0.2.20', + 'destination': '101.0.0.0/8'}]] + for host_routes in hostroute_pools: + msg = attributes._validate_hostroutes(host_routes, None) + self.assertIsNone(msg) + + def test_validate_ip_address_or_none(self): + ip_addr = None + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1.1.1.1' + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1111.1.1.1' + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertEqual("'%s' is not a valid IP address" % ip_addr, + msg) + + def test_hostname_pattern(self): + data = '@openstack' + msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN) + self.assertIsNotNone(msg) + + data = 'www.openstack.org' + msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN) + self.assertIsNone(msg) + + def test_uuid_pattern(self): + data = 'garbage' + msg = attributes._validate_regex(data, attributes.UUID_PATTERN) + self.assertIsNotNone(msg) + + data = '00000000-ffff-ffff-ffff-000000000000' + msg = attributes._validate_regex(data, attributes.UUID_PATTERN) + self.assertIsNone(msg) + + def test_mac_pattern(self): + # Valid - 3 octets + base_mac = "fa:16:3e:00:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNone(msg) + + # Valid - 4 octets + base_mac = "fa:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNone(msg) + + # Invalid - not unicast + base_mac = "01:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "a:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "ffa:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "01163e4f0000" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "01-16-3e-4f-00-00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "00:16:3:f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "12:3:4:5:67:89ab" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + def _test_validate_subnet(self, validator, allow_none=False): + # Valid - IPv4 + cidr = "10.0.2.0/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 without final octets + cidr = "fe80::/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 with final octets + cidr = "fe80::/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - uncompressed ipv6 address + cidr = "fe80:0:0:0:0:0:0:0/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001:0db8:0:0:1::1/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001:0db8::1:0:0:1/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001::0:1:0:0:1100/120" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - abbreviated ipv4 address + cidr = "10/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Invalid - IPv4 missing mask + cidr = "10.0.2.0" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "10.0.2.0/32"} + self.assertEqual(error, msg) + + # Valid - IPv4 with non-zero masked bits is ok + for i in range(1, 255): + cidr = "192.168.1.%s/24" % i + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Invalid - IPv6 without final octets, missing mask + cidr = "fe80::" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "fe80::/128"} + self.assertEqual(error, msg) + + # Invalid - IPv6 with final octets, missing mask + cidr = "fe80::0" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "fe80::/128"} + self.assertEqual(error, msg) + + # Invalid - Address format error + cidr = 'invalid' + msg = validator(cidr, None) + error = "'%s' is not a valid IP subnet" % cidr + self.assertEqual(error, msg) + + cidr = None + msg = validator(cidr, None) + if allow_none: + self.assertIsNone(msg) + else: + error = "'%s' is not a valid IP subnet" % cidr + self.assertEqual(error, msg) + + def test_validate_subnet(self): + self._test_validate_subnet(attributes._validate_subnet) + + def test_validate_subnet_or_none(self): + self._test_validate_subnet(attributes._validate_subnet_or_none, + allow_none=True) + + def _test_validate_regex(self, validator, allow_none=False): + pattern = '[hc]at' + + data = None + msg = validator(data, pattern) + if allow_none: + self.assertIsNone(msg) + else: + self.assertEqual("'None' is not a valid input", msg) + + data = 'bat' + msg = validator(data, pattern) + self.assertEqual("'%s' is not a valid input" % data, msg) + + data = 'hat' + msg = validator(data, pattern) + self.assertIsNone(msg) + + data = 'cat' + msg = validator(data, pattern) + self.assertIsNone(msg) + + def test_validate_regex(self): + self._test_validate_regex(attributes._validate_regex) + + def test_validate_regex_or_none(self): + self._test_validate_regex(attributes._validate_regex_or_none, + allow_none=True) + + def test_validate_uuid(self): + msg = attributes._validate_uuid('garbage') + self.assertEqual("'garbage' is not a valid UUID", msg) + + msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000') + self.assertIsNone(msg) + + def test_validate_uuid_list(self): + # check not a list + uuids = [None, + 123, + 'e5069610-744b-42a7-8bd8-ceac1a229cd4', + '12345678123456781234567812345678', + {'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}] + for uuid in uuids: + msg = attributes._validate_uuid_list(uuid) + error = "'%s' is not a list" % uuid + self.assertEqual(error, msg) + + # check invalid uuid in a list + invalid_uuid_lists = [[None], + [123], + [123, 'e5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['123', '12345678123456781234567812345678'], + ['t5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['e5069610-744b-42a7-8bd8-ceac1a229cd44'], + ['e50696100-744b-42a7-8bd8-ceac1a229cd4'], + ['e5069610-744bb-42a7-8bd8-ceac1a229cd4']] + for uuid_list in invalid_uuid_lists: + msg = attributes._validate_uuid_list(uuid_list) + error = "'%s' is not a valid UUID" % uuid_list[0] + self.assertEqual(error, msg) + + # check duplicate items in a list + duplicate_uuids = ['e5069610-744b-42a7-8bd8-ceac1a229cd4', + 'f3eeab00-8367-4524-b662-55e64d4cacb5', + 'e5069610-744b-42a7-8bd8-ceac1a229cd4'] + msg = attributes._validate_uuid_list(duplicate_uuids) + error = ("Duplicate items in the list: " + "'%s'" % ', '.join(duplicate_uuids)) + self.assertEqual(error, msg) + + # check valid uuid lists + valid_uuid_lists = [['e5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['f3eeab00-8367-4524-b662-55e64d4cacb5'], + ['e5069610-744b-42a7-8bd8-ceac1a229cd4', + 'f3eeab00-8367-4524-b662-55e64d4cacb5']] + for uuid_list in valid_uuid_lists: + msg = attributes._validate_uuid_list(uuid_list) + self.assertIsNone(msg) + + def test_validate_dict_type(self): + for value in (None, True, '1', []): + self.assertEqual("'%s' is not a dictionary" % value, + attributes._validate_dict(value)) + + def test_validate_dict_without_constraints(self): + msg = attributes._validate_dict({}) + self.assertIsNone(msg) + + # Validate a dictionary without constraints. + msg = attributes._validate_dict({'key': 'value'}) + self.assertIsNone(msg) + + def test_validate_a_valid_dict_with_constraints(self): + dictionary, constraints = self._construct_dict_and_constraints() + + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_dict_with_invalid_validator(self): + dictionary, constraints = self._construct_dict_and_constraints() + + constraints['key1'] = {'type:unsupported': None, 'required': True} + msg = attributes._validate_dict(dictionary, constraints) + self.assertEqual("Validator 'type:unsupported' does not exist.", + msg) + + def test_validate_dict_not_required_keys(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key2'] + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg, 'Field that was not required by the specs was' + 'required by the validator.') + + def test_validate_dict_required_keys(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key1'] + msg = attributes._validate_dict(dictionary, constraints) + self.assertIn('Expected keys:', msg) + + def test_validate_dict_wrong_values(self): + dictionary, constraints = self._construct_dict_and_constraints() + + dictionary['key1'] = 'UNSUPPORTED' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNotNone(msg) + + def test_validate_dict_convert_boolean(self): + dictionary, constraints = self._construct_dict_and_constraints() + + constraints['key_bool'] = { + 'type:boolean': None, + 'required': False, + 'convert_to': attributes.convert_to_boolean} + dictionary['key_bool'] = 'true' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg) + # Explicitly comparing with literal 'True' as assertTrue + # succeeds also for 'true' + self.assertTrue(dictionary['key_bool']) + + def test_subdictionary(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key3']['k4'] + dictionary['key3']['k5'] = 'a string value' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIn('Expected keys:', msg) + + def test_validate_dict_or_none(self): + dictionary, constraints = self._construct_dict_and_constraints() + + # Check whether None is a valid value. + msg = attributes._validate_dict_or_none(None, constraints) + self.assertIsNone(msg, 'Validation of a None dictionary failed.') + + # Check validation of a regular dictionary. + msg = attributes._validate_dict_or_none(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_dict_or_empty(self): + dictionary, constraints = self._construct_dict_and_constraints() + + # Check whether an empty dictionary is valid. + msg = attributes._validate_dict_or_empty({}, constraints) + self.assertIsNone(msg, 'Validation of a None dictionary failed.') + + # Check validation of a regular dictionary. + msg = attributes._validate_dict_or_none(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_non_negative(self): + for value in (-1, '-2'): + self.assertEqual("'%s' should be non-negative" % value, + attributes._validate_non_negative(value)) + + for value in (0, 1, '2', True, False): + msg = attributes._validate_non_negative(value) + self.assertIsNone(msg) + + +class TestConvertToBoolean(base.BaseTestCase): + + def test_convert_to_boolean_bool(self): + self.assertTrue(attributes.convert_to_boolean(True)) + self.assertFalse(attributes.convert_to_boolean(False)) + + def test_convert_to_boolean_int(self): + self.assertFalse(attributes.convert_to_boolean(0)) + self.assertTrue(attributes.convert_to_boolean(1)) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_boolean, + 7) + + def test_convert_to_boolean_str(self): + self.assertTrue(attributes.convert_to_boolean('True')) + self.assertTrue(attributes.convert_to_boolean('true')) + self.assertFalse(attributes.convert_to_boolean('False')) + self.assertFalse(attributes.convert_to_boolean('false')) + self.assertFalse(attributes.convert_to_boolean('0')) + self.assertTrue(attributes.convert_to_boolean('1')) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_boolean, + '7') + + +class TestConvertToInt(base.BaseTestCase): + + def test_convert_to_int_int(self): + self.assertEqual(-1, attributes.convert_to_int(-1)) + self.assertEqual(0, attributes.convert_to_int(0)) + self.assertEqual(1, attributes.convert_to_int(1)) + + def test_convert_to_int_str(self): + self.assertEqual(4, attributes.convert_to_int('4')) + self.assertEqual(6, attributes.convert_to_int('6')) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_int, + 'garbage') + + def test_convert_to_int_none(self): + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_int, + None) + + def test_convert_none_to_empty_list_none(self): + self.assertEqual( + [], attributes.convert_none_to_empty_list(None)) + + def test_convert_none_to_empty_dict(self): + self.assertEqual( + {}, attributes.convert_none_to_empty_dict(None)) + + def test_convert_none_to_empty_list_value(self): + values = ['1', 3, [], [1], {}, {'a': 3}] + for value in values: + self.assertEqual( + value, attributes.convert_none_to_empty_list(value)) + + +class TestConvertKvp(base.BaseTestCase): + + def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self): + result = attributes.convert_kvp_list_to_dict(['True']) + self.assertEqual({}, result) + + def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self): + self.skip("Not ready yet") + result = attributes.convert_kvp_list_to_dict( + ['a=b', 'a=c', 'a=c', 'b=a']) + self.assertEqual({'a': ['c', 'b'], 'b': ['a']}, result) + + def test_convert_kvp_list_to_dict_succeeds_for_values(self): + result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d']) + self.assertEqual({'a': ['b'], 'c': ['d']}, result) + + def test_convert_kvp_str_to_list_fails_for_missing_key(self): + with testtools.ExpectedException(n_exc.InvalidInput): + attributes.convert_kvp_str_to_list('=a') + + def test_convert_kvp_str_to_list_fails_for_missing_equals(self): + with testtools.ExpectedException(n_exc.InvalidInput): + attributes.convert_kvp_str_to_list('a') + + def test_convert_kvp_str_to_list_succeeds_for_one_equals(self): + result = attributes.convert_kvp_str_to_list('a=') + self.assertEqual(['a', ''], result) + + def test_convert_kvp_str_to_list_succeeds_for_two_equals(self): + result = attributes.convert_kvp_str_to_list('a=a=a') + self.assertEqual(['a', 'a=a'], result) + + +class TestConvertToList(base.BaseTestCase): + + def test_convert_to_empty_list(self): + for item in (None, [], (), {}): + self.assertEqual([], attributes.convert_to_list(item)) + + def test_convert_to_list_string(self): + for item in ('', 'foo'): + self.assertEqual([item], attributes.convert_to_list(item)) + + def test_convert_to_list_iterable(self): + for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']): + self.assertEqual(list(item), attributes.convert_to_list(item)) + + def test_convert_to_list_non_iterable(self): + for item in (True, False, 1, 1.2, object()): + self.assertEqual([item], attributes.convert_to_list(item)) diff --git a/apmec/tests/unit/test_auth.py b/apmec/tests/unit/test_auth.py new file mode 100644 index 0000000..cd41ab1 --- /dev/null +++ b/apmec/tests/unit/test_auth.py @@ -0,0 +1,100 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_middleware import request_id +import webob + +from apmec import auth +from apmec.tests import base + + +class ApmecKeystoneContextTestCase(base.BaseTestCase): + def setUp(self): + super(ApmecKeystoneContextTestCase, self).setUp() + self.skip("Not ready yet") + + @webob.dec.wsgify + def fake_app(req): + self.context = req.environ['apmec.context'] + return webob.Response() + + self.context = None + self.middleware = auth.ApmecKeystoneContext(fake_app) + self.request = webob.Request.blank('/') + self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' + + def test_no_user_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + response = self.request.get_response(self.middleware) + self.assertEqual('401 Unauthorized', response.status) + + def test_with_user_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + response = self.request.get_response(self.middleware) + self.assertEqual('200 OK', response.status) + self.assertEqual('testuserid', self.context.user_id) + self.assertEqual('testuserid', self.context.user) + + def test_with_tenant_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'test_user_id' + response = self.request.get_response(self.middleware) + self.assertEqual('200 OK', response.status) + self.assertEqual('testtenantid', self.context.tenant_id) + self.assertEqual('testtenantid', self.context.tenant) + + def test_roles_no_admin(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5' + response = self.request.get_response(self.middleware) + self.assertEqual('200 OK', response.status) + self.assertEqual(['role1', 'role2', + 'role3', 'role4', 'role5'], + self.context.roles) + self.assertFalse(self.context.is_admin) + + def test_roles_with_admin(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,' + 'AdMiN') + response = self.request.get_response(self.middleware) + self.assertEqual('200 OK', response.status) + self.assertEqual(['role1', 'role2', 'role3', + 'role4', 'role5', 'AdMiN'], + self.context.roles) + self.assertEqual(True, self.context.is_admin) + + def test_with_user_tenant_name(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_PROJECT_NAME'] = 'testtenantname' + self.request.headers['X_USER_NAME'] = 'testusername' + response = self.request.get_response(self.middleware) + self.assertEqual('200 OK', response.status) + self.assertEqual('testuserid', self.context.user_id) + self.assertEqual('testusername', self.context.user_name) + self.assertEqual('testtenantid', self.context.tenant_id) + self.assertEqual('testtenantname', self.context.tenant_name) + + def test_request_id_extracted_from_env(self): + req_id = 'dummy-request-id' + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.environ[request_id.ENV_REQUEST_ID] = req_id + self.request.get_response(self.middleware) + self.assertEqual(req_id, self.context.request_id) diff --git a/apmec/tests/unit/test_common_log.py b/apmec/tests/unit/test_common_log.py new file mode 100644 index 0000000..5c59c5d --- /dev/null +++ b/apmec/tests/unit/test_common_log.py @@ -0,0 +1,80 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from apmec.common import log as call_log +from apmec.tests import base + + +MODULE_NAME = 'apmec.tests.unit.test_common_log' + + +class TargetKlass(object): + + @call_log.log + def test_method(self, arg1, arg2, *args, **kwargs): + pass + + +class TestCallLog(base.BaseTestCase): + def setUp(self): + super(TestCallLog, self).setUp() + self.skip("Not ready yet") + self.klass = TargetKlass() + self.expected_format = ('%(class_name)s method %(method_name)s ' + 'called with arguments %(args)s %(kwargs)s') + self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass', + 'method_name': 'test_method', + 'args': (), + 'kwargs': {}} + + def test_call_log_all_args(self): + self.expected_data['args'] = (10, 20) + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, 20) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_all_kwargs(self): + self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(arg1=10, arg2=20) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_known_args_unknown_args_kwargs(self): + self.expected_data['args'] = (10, 20, 30) + self.expected_data['kwargs'] = {'arg4': 40} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, 20, 30, arg4=40) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_known_args_kwargs_unknown_kwargs(self): + self.expected_data['args'] = (10,) + self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, arg2=20, arg3=30, arg4=40) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_password_mask_args_kwargs(self): + auth_cred = {'userame': 'demo', 'password': 'changeit'} + self.expected_data['kwargs'] = {'password': '***'} + self.expected_data['args'] = ({'userame': 'demo', 'password': '***'}) + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(auth_cred, password='guessme') + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) diff --git a/apmec/tests/unit/test_common_services_plugin.py b/apmec/tests/unit/test_common_services_plugin.py new file mode 100644 index 0000000..979d9ec --- /dev/null +++ b/apmec/tests/unit/test_common_services_plugin.py @@ -0,0 +1,160 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_utils import timeutils + +from apmec import context +from apmec.db.common_services import common_services_db_plugin +from apmec.extensions import common_services +from apmec.plugins.common_services import common_services_plugin +from apmec.tests.unit.db import base as db_base + + +class TestCommonServicesPlugin(db_base.SqlTestCase): + def setUp(self): + super(TestCommonServicesPlugin, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self.event_db_plugin =\ + common_services_db_plugin.CommonServicesPluginDb() + self.coreutil_plugin = common_services_plugin.CommonServicesPlugin() + + def _get_dummy_event_obj(self): + return { + 'resource_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', + 'resource_state': 'ACTIVE', + 'resource_type': 'MEA', + 'event_details': '', + 'event_type': 'scale_up', + 'timestamp': timeutils.parse_strtime('2016-07-20T05:43:52.765172') + } + + def test_create_event(self): + evt_obj = self._get_dummy_event_obj() + result = self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + self.assertIsNotNone(result) + self.assertIn('id', result) + self.assertIn('resource_id', result) + self.assertIn('resource_state', result) + self.assertIn('resource_type', result) + self.assertIn('event_type', result) + self.assertIn('event_details', result) + self.assertIn('timestamp', result) + + def test_event_not_found(self): + self.assertRaises(common_services.EventNotFoundException, + self.coreutil_plugin.get_event, self.context, '99') + + def test_InvalidModelInputExceptionNotThrown(self): + evt_obj = self._get_dummy_event_obj() + result = self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + try: + self.coreutil_plugin.get_event(self, context, str(result['id'])) + except common_services.InvalidModelException: + self.assertTrue(False) + except Exception: + self.assertTrue(True) + + def test_get_event_by_id(self): + evt_obj = self._get_dummy_event_obj() + evt_created = self.event_db_plugin.create_event( + self.context, evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + self.assertIsNotNone(evt_created) + evt_get = self.coreutil_plugin.get_event(self.context, + evt_created['id']) + self.assertEqual(evt_created['resource_id'], evt_get['resource_id']) + self.assertEqual(evt_created['resource_state'], + evt_get['resource_state']) + self.assertEqual(evt_created['resource_type'], + evt_get['resource_type']) + self.assertEqual(evt_created['event_type'], evt_get['event_type']) + self.assertEqual(evt_created['event_details'], + evt_get['event_details']) + self.assertEqual(evt_created['timestamp'], evt_get['timestamp']) + + def test_get_events(self): + evt_obj = self._get_dummy_event_obj() + self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + result = self.coreutil_plugin.get_events(self.context) + self.assertTrue(len(result)) + + def test_get_events_filtered_invalid_id(self): + evt_obj = self._get_dummy_event_obj() + self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + result = self.coreutil_plugin.get_events(self.context, {'id': 'xyz'}) + self.assertFalse(len(result)) + + def test_get_events_filtered_valid_id(self): + evt_obj = self._get_dummy_event_obj() + self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + result = self.coreutil_plugin.get_events(self.context, {'id': '1'}) + self.assertTrue(len(result)) + + def test_get_events_valid_fields(self): + evt_obj = self._get_dummy_event_obj() + self.event_db_plugin.create_event(self.context, + evt_obj['resource_id'], + evt_obj['resource_type'], + evt_obj['resource_state'], + evt_obj['event_type'], + evt_obj['timestamp'], + evt_obj['event_details']) + result = self.coreutil_plugin.get_events(self.context, {'id': '1'}, + ['id', 'event_type']) + self.assertTrue(len(result)) + self.assertIn('id', result[0]) + self.assertNotIn('resource_id', result[0]) + self.assertNotIn('resource_state', result[0]) + self.assertNotIn('resource_type', result[0]) + self.assertIn('event_type', result[0]) + self.assertNotIn('event_details', result[0]) + self.assertNotIn('timestamp', result[0]) diff --git a/apmec/tests/unit/test_common_utils.py b/apmec/tests/unit/test_common_utils.py new file mode 100644 index 0000000..85ac8f5 --- /dev/null +++ b/apmec/tests/unit/test_common_utils.py @@ -0,0 +1,38 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from apmec.common import utils +from apmec.tests import base + + +class TestDict2Tuples(base.BaseTestCase): + def test_dict(self): + input_dict = {'foo': 'bar', '42': 'baz', 'aaa': 'zzz'} + expected = (('42', 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) + output_tuple = utils.dict2tuple(input_dict) + self.assertEqual(expected, output_tuple) + + +class TestChangeMemory(testtools.TestCase): + def test_change_memory_from_mb_to_gb(self): + actual_val = utils.change_memory_unit("1024 mb", "GB") + expected_val = 1 + self.assertEqual(expected_val, actual_val) + + def test_change_memory_from_gb_to_mb(self): + actual_val = utils.change_memory_unit("1 GB", "MB") + expected_val = 1024 + self.assertEqual(expected_val, actual_val) diff --git a/apmec/tests/unit/test_config.py b/apmec/tests/unit/test_config.py new file mode 100644 index 0000000..1123783 --- /dev/null +++ b/apmec/tests/unit/test_config.py @@ -0,0 +1,47 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import mock +from oslo_config import cfg + +from apmec.common import config # noqa +from apmec.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('0.0.0.0', cfg.CONF.bind_host) + self.assertEqual(9896, cfg.CONF.bind_port) + self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config) + self.assertEqual('', cfg.CONF.api_extensions_path) + self.assertEqual('policy.json', cfg.CONF.policy_file) + self.assertEqual('keystone', cfg.CONF.auth_strategy) + self.assertTrue(cfg.CONF.allow_bulk) + relative_dir = os.path.join(os.path.dirname(__file__), + '..', '..', '..') + absolute_dir = os.path.abspath(relative_dir) + self.assertEqual(absolute_dir, cfg.CONF.state_path) + self.assertEqual('apmec', cfg.CONF.control_exchange) + + def test_load_paste_app_not_found(self): + self.config(api_paste_config='no_such_file.conf') + with mock.patch.object(cfg.CONF, 'find_file', return_value=None) as ff: + e = self.assertRaises(cfg.ConfigFilesNotFoundError, + config.load_paste_app, 'app') + ff.assert_called_once_with('no_such_file.conf') + self.assertEqual(['no_such_file.conf'], e.config_files) diff --git a/apmec/tests/unit/test_db_migration.py b/apmec/tests/unit/test_db_migration.py new file mode 100644 index 0000000..fa4f065 --- /dev/null +++ b/apmec/tests/unit/test_db_migration.py @@ -0,0 +1,157 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# @author Mark McClain (DreamHost) + +import sys + +import mock + +from apmec.db.migration import cli +from apmec.tests import base + + +class TestCli(base.BaseTestCase): + def setUp(self): + super(TestCli, self).setUp() + self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') + self.do_alembic_cmd = self.do_alembic_cmd_p.start() + self.mock_alembic_err = mock.patch('alembic.util.err').start() + self.mock_alembic_err.side_effect = SystemExit + + def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}): + with mock.patch.object(sys, 'argv', argv): + cli.main() + self.do_alembic_cmd.assert_has_calls( + [mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)] + ) + + def test_stamp(self): + self._main_test_helper( + ['prog', 'stamp', 'foo'], + 'stamp', + ('foo',), + {'sql': False} + ) + + self._main_test_helper( + ['prog', 'stamp', 'foo', '--sql'], + 'stamp', + ('foo',), + {'sql': True} + ) + + def test_current(self): + self._main_test_helper(['prog', 'current'], 'current') + + def test_history(self): + self._main_test_helper(['prog', 'history'], 'history') + + def test_check_migration(self): + with mock.patch.object(cli, 'validate_head_file') as validate: + self._main_test_helper(['prog', 'check_migration'], 'branches') + validate.assert_called_once_with(mock.ANY) + + def test_database_sync_revision(self): + with mock.patch.object(cli, 'update_head_file') as update: + self._main_test_helper( + ['prog', 'revision', '--autogenerate', '-m', 'message'], + 'revision', + (), + {'message': 'message', 'sql': False, 'autogenerate': True} + ) + update.assert_called_once_with(mock.ANY) + + update.reset_mock() + self._main_test_helper( + ['prog', 'revision', '--sql', '-m', 'message'], + 'revision', + (), + {'message': 'message', 'sql': True, 'autogenerate': False} + ) + update.assert_called_once_with(mock.ANY) + + def test_upgrade(self): + self._main_test_helper( + ['prog', 'upgrade', '--sql', 'head'], + 'upgrade', + ('head',), + {'sql': True} + ) + + self._main_test_helper( + ['prog', 'upgrade', '--delta', '3'], + 'upgrade', + ('+3',), + {'sql': False} + ) + + def _test_validate_head_file_helper(self, heads, file_content=None): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = heads + fc.return_value.get_current_head.return_value = heads[0] + with mock.patch('six.moves.builtins.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = file_content + + with mock.patch('os.path.isfile') as is_file: + is_file.return_value = file_content is not None + + if file_content in heads: + cli.validate_head_file(mock.sentinel.config) + else: + self.assertRaises( + SystemExit, + cli.validate_head_file, + mock.sentinel.config + ) + self.mock_alembic_err.assert_called_once_with(mock.ANY) + fc.assert_called_once_with(mock.sentinel.config) + + def test_validate_head_file_multiple_heads(self): + self._test_validate_head_file_helper(['a', 'b']) + + def test_validate_head_file_missing_file(self): + self._test_validate_head_file_helper(['a']) + + def test_validate_head_file_wrong_contents(self): + self._test_validate_head_file_helper(['a'], 'b') + + def test_validate_head_success(self): + self._test_validate_head_file_helper(['a'], 'a') + + def test_update_head_file_multiple_heads(self): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = ['a', 'b'] + self.assertRaises( + SystemExit, + cli.update_head_file, + mock.sentinel.config + ) + self.mock_alembic_err.assert_called_once_with(mock.ANY) + fc.assert_called_once_with(mock.sentinel.config) + + def test_update_head_file_success(self): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = ['a'] + fc.return_value.get_current_head.return_value = 'a' + with mock.patch('six.moves.builtins.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + cli.update_head_file(mock.sentinel.config) + mock_open.return_value.write.assert_called_once_with('a') + fc.assert_called_once_with(mock.sentinel.config) diff --git a/apmec/tests/unit/test_db_purge_delete.py b/apmec/tests/unit/test_db_purge_delete.py new file mode 100644 index 0000000..729c749 --- /dev/null +++ b/apmec/tests/unit/test_db_purge_delete.py @@ -0,0 +1,81 @@ +# Copyright 2016 Brocade Communications System, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + + +from apmec.common import exceptions +from apmec import context +from apmec.db.migration import purge_tables +from apmec.tests.unit.db import base as db_base + + +class FakeConfig(mock.Mock): + pass + + +class TestDbPurgeDelete(db_base.SqlTestCase): + def setUp(self): + super(TestDbPurgeDelete, self).setUp() + self.addCleanup(mock.patch.stopall) + self.context = context.get_admin_context() + self._mock_config() + mock.patch('sqlalchemy.Table').start() + mock.patch('apmec.db.migration.purge_tables._purge_resource_tables' + ).start() + mock.patch('apmec.db.migration.purge_tables._purge_events_table', + ).start() + mock.patch('apmec.db.migration.purge_tables.' + '_generate_associated_tables_map').start() + mock.patch('apmec.db.migration.purge_tables.get_engine').start() + + def _mock_config(self): + self.config = mock.Mock(wraps=FakeConfig()) + fake_config = mock.Mock() + fake_config.return_value = self.config + self._mock( + 'alembic.config.__init__', fake_config) + + def test_age_not_integer_input(self): + self.assertRaises(exceptions.InvalidInput, purge_tables.purge_deleted, + self.config, 'invalid', 'abc') + + def test_age_negative_integer_input(self): + self.assertRaises(exceptions.InvalidInput, purge_tables.purge_deleted, + self.config, 'invalid', '-90') + + def test_invalid_granularity_input(self): + self.assertRaises(exceptions.InvalidInput, purge_tables.purge_deleted, + self.config, 'mea', '90', 'decade') + + def test_purge_delete_call_mea(self): + purge_tables.purge_deleted(self.config, 'mea', '90', 'days') + purge_tables._purge_resource_tables.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY) + + def test_purge_delete_call_mead(self): + purge_tables.purge_deleted(self.config, 'mead', '90', 'days') + purge_tables._purge_resource_tables.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY) + + def test_purge_delete_call_vim(self): + purge_tables.purge_deleted(self.config, 'vims', '90', 'days') + purge_tables._purge_resource_tables.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY) + + def test_purge_delete_call_events(self): + purge_tables.purge_deleted(self.config, 'events', '90', 'days') + purge_tables._purge_events_table.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY) diff --git a/apmec/tests/unit/test_extension_extended_attribute.py b/apmec/tests/unit/test_extension_extended_attribute.py new file mode 100644 index 0000000..4693916 --- /dev/null +++ b/apmec/tests/unit/test_extension_extended_attribute.py @@ -0,0 +1,117 @@ +# Copyright 2013 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for extension extended attribute +""" + +import webob.exc as webexc + +import apmec +from apmec.api import extensions +from apmec.api.v1 import attributes +from apmec.common import config +from apmec import manager +from apmec.tests import base +from apmec.tests.unit.extensions import extendedattribute as extattr +from apmec.tests.unit import test_api_v2 +from apmec.tests.unit import testlib_api +from apmec import wsgi + +_uuid = test_api_v2._uuid +_get_path = test_api_v2._get_path +extensions_path = ':'.join(apmec.tests.unit.extensions.__path__) + + +class ExtensionExtendedAttributeTestCase(base.BaseTestCase): + def setUp(self): + super(ExtensionExtendedAttributeTestCase, self).setUp() + self.skip("Not ready yet") + plugin = ( + "apmec.tests.unit.test_extension_extended_attribute." + "ExtensionExtendedAttributeTestPlugin" + ) + + # point config file to: apmec/tests/etc/apmec.conf.test + self.config_parse() + + self.setup_coreplugin(plugin) + + ext_mgr = extensions.ExtensionManager(extensions_path) + ext_mgr.extend_resources("1.0", {}) + extensions.ExtensionManager._instance = ext_mgr + + app = config.load_paste_app('extensions_test_app') + self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in (attributes.RESOURCE_ATTRIBUTE_MAP).items(): + self.saved_attr_map[resource] = attrs.copy() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + extattr.EXTENDED_ATTRIBUTES_2_0) + self.agentscheduler_dbMinxin = manager.ApmecManager.get_plugin() + self.addCleanup(self.restore_attribute_map) + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def _do_request(self, method, path, data=None, params=None, action=None): + content_type = 'application/json' + body = None + if data is not None: # empty dict is valid + body = wsgi.Serializer().serialize(data, content_type) + + req = testlib_api.create_request( + path, body, content_type, + method, query_string=params) + res = req.get_response(self._api) + if res.status_code >= 400: + raise webexc.HTTPClientError(detail=res.body, code=res.status_code) + if res.status_code != webexc.HTTPNoContent.code: + return res.json + + def _ext_test_resource_create(self, attr=None): + data = { + "ext_test_resource": { + "tenant_id": self._tenant_id, + "name": "test", + extattr.EXTENDED_ATTRIBUTE: attr + } + } + + res = self._do_request('POST', _get_path('ext_test_resources'), data) + return res['ext_test_resource'] + + def test_ext_test_resource_create(self): + ext_test_resource = self._ext_test_resource_create() + attr = _uuid() + ext_test_resource = self._ext_test_resource_create(attr) + self.assertEqual(attr, ext_test_resource[extattr.EXTENDED_ATTRIBUTE]) + + def test_ext_test_resource_get(self): + attr = _uuid() + obj = self._ext_test_resource_create(attr) + obj_id = obj['id'] + res = self._do_request('GET', _get_path( + 'ext_test_resources/{0}'.format(obj_id))) + obj2 = res['ext_test_resource'] + self.assertEqual(attr, obj2[extattr.EXTENDED_ATTRIBUTE]) diff --git a/apmec/tests/unit/test_extensions.py b/apmec/tests/unit/test_extensions.py new file mode 100644 index 0000000..5b952a8 --- /dev/null +++ b/apmec/tests/unit/test_extensions.py @@ -0,0 +1,546 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_log import log as logging +from oslo_serialization import jsonutils +import routes +import webob +import webtest + +from apmec.api import extensions +from apmec.common import config +from apmec.plugins.common import constants +from apmec.tests import base +from apmec.tests.unit import extension_stubs as ext_stubs +import apmec.tests.unit.extensions +from apmec.tests.unit import testlib_api +from apmec import wsgi + + +LOG = logging.getLogger(__name__) +extensions_path = ':'.join(apmec.tests.unit.extensions.__path__) + + +class ExtensionsTestApp(wsgi.Router): + + def __init__(self, options={}): + mapper = routes.Mapper() + controller = ext_stubs.StubBaseAppController() + mapper.resource("dummy_resource", "/dummy_resources", + controller=controller) + super(ExtensionsTestApp, self).__init__(mapper) + + +class PluginInterfaceTest(base.BaseTestCase): + def test_issubclass_hook(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + @abc.abstractmethod + def f(self): + pass + + self.assertTrue(issubclass(A, B)) + + def test_issubclass_hook_class_without_abstract_methods(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + def f(self): + pass + + self.assertFalse(issubclass(A, B)) + + def test_issubclass_hook_not_all_methods_implemented(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + @abc.abstractmethod + def f(self): + pass + + @abc.abstractmethod + def g(self): + pass + + self.assertFalse(issubclass(A, B)) + + +class ResourceExtensionTest(base.BaseTestCase): + + class ResourceExtensionController(wsgi.Controller): + + def index(self, request): + return "resource index" + + def show(self, request, id): + return {'data': {'id': id}} + + def notimplemented_function(self, request, id): + return webob.exc.HTTPNotImplemented() + + def custom_member_action(self, request, id): + return {'member_action': 'value'} + + def custom_collection_action(self, request, **kwargs): + return {'collection': 'value'} + + class DummySvcPlugin(wsgi.Controller): + def get_plugin_type(self): + return constants.DUMMY + + def index(self, request, **kwargs): + return "resource index" + + def custom_member_action(self, request, **kwargs): + return {'member_action': 'value'} + + def collection_action(self, request, **kwargs): + return {'collection': 'value'} + + def show(self, request, id): + return {'data': {'id': id}} + + def test_exceptions_notimplemented(self): + controller = self.ResourceExtensionController() + member = {'notimplemented_function': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + # Ideally we would check for a 501 code here but webtest doesn't take + # anything that is below 200 or above 400 so we can't actually check + # it. It throws webtest.AppError instead. + try: + test_app.get("/tweedles/some_id/notimplemented_function") + # Shouldn't be reached + self.assertTrue(False) + except webtest.AppError as e: + self.assertIn('501', str(e)) + + def test_resource_can_be_added_as_extension(self): + res_ext = extensions.ResourceExtension( + 'tweedles', self.ResourceExtensionController()) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + index_response = test_app.get("/tweedles") + self.assertEqual(200, index_response.status_int) + self.assertEqual(b"resource index", index_response.body) + + show_response = test_app.get("/tweedles/25266") + self.assertEqual({'data': {'id': "25266"}}, show_response.json) + + def test_resource_gets_prefix_of_plugin(self): + class DummySvcPlugin(wsgi.Controller): + def index(self, request): + return "" + + def get_plugin_type(self): + return constants.DUMMY + + res_ext = extensions.ResourceExtension( + 'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc") + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + index_response = test_app.get("/dummy_svc/tweedles") + self.assertEqual(200, index_response.status_int) + + def test_resource_extension_with_custom_member_action(self): + controller = self.ResourceExtensionController() + member = {'custom_member_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/some_id/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual("value", + jsonutils.loads(response.body)['member_action']) + + def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self): + controller = self.DummySvcPlugin() + member = {'custom_member_action': "GET"} + collections = {'collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + path_prefix="/dummy_svc", + member_actions=member, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/dummy_svc/tweedles/1/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual("value", + jsonutils.loads(response.body)['member_action']) + + response = test_app.get("/dummy_svc/tweedles/collection_action") + self.assertEqual(200, response.status_int) + self.assertEqual("value", + jsonutils.loads(response.body)['collection']) + + def test_plugin_prefix_with_parent_resource(self): + controller = self.DummySvcPlugin() + parent = dict(member_name="tenant", + collection_name="tenants") + member = {'custom_member_action': "GET"} + collections = {'collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, parent, + path_prefix="/dummy_svc", + member_actions=member, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + index_response = test_app.get("/dummy_svc/tenants/1/tweedles") + self.assertEqual(200, index_response.status_int) + + response = test_app.get("/dummy_svc/tenants/1/" + "tweedles/1/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['member_action'], + "value") + + response = test_app.get("/dummy_svc/tenants/2/" + "tweedles/collection_action") + self.assertEqual(200, response.status_int) + self.assertEqual("value", + jsonutils.loads(response.body)['collection']) + + def test_resource_extension_for_get_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/custom_collection_action") + self.assertEqual(200, response.status_int) + LOG.debug(jsonutils.loads(response.body)) + self.assertEqual("value", jsonutils.loads(response.body)['collection']) + + def test_resource_extension_for_put_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "PUT"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.put("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual('value', jsonutils.loads(response.body)['collection']) + + def test_resource_extension_for_post_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "POST"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.post("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual('value', jsonutils.loads(response.body)['collection']) + + def test_resource_extension_for_delete_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "DELETE"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.delete("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual('value', jsonutils.loads(response.body)['collection']) + + def test_resource_ext_for_formatted_req_on_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/custom_collection_action.json") + + self.assertEqual(200, response.status_int) + self.assertEqual('value', jsonutils.loads(response.body)['collection']) + + def test_resource_ext_for_nested_resource_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + parent = dict(collection_name='beetles', member_name='beetle') + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections, + parent=parent) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/beetles/beetle_id" + "/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual('value', jsonutils.loads(response.body)['collection']) + + def test_resource_extension_with_custom_member_action_and_attr_map(self): + controller = self.ResourceExtensionController() + member = {'custom_member_action': "GET"} + params = { + 'tweedles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', 'is_visible': True}, + } + } + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member, + attr_map=params) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/some_id/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual('value', + jsonutils.loads(response.body)['member_action']) + + def test_returns_404_for_non_existent_extension(self): + test_app = _setup_extensions_test_app(SimpleExtensionManager(None)) + + response = test_app.get("/non_extistant_extension", status='*') + + self.assertEqual(404, response.status_int) + + +class ActionExtensionTest(base.BaseTestCase): + + def setUp(self): + super(ActionExtensionTest, self).setUp() + self.skip("Not ready yet") + self.extension_app = _setup_extensions_test_app() + + def test_extended_action_for_adding_extra_data(self): + action_name = 'FOXNSOX:add_tweedle' + action_params = dict(name='Beetle') + req_body = jsonutils.dumps({action_name: action_params}) + response = self.extension_app.post('/dummy_resources/1/action', + req_body, + content_type='application/json') + self.assertEqual("Tweedle Beetle Added.", response.body) + + def test_extended_action_for_deleting_extra_data(self): + action_name = 'FOXNSOX:delete_tweedle' + action_params = dict(name='Bailey') + req_body = jsonutils.dumps({action_name: action_params}) + response = self.extension_app.post("/dummy_resources/1/action", + req_body, + content_type='application/json') + self.assertEqual("Tweedle Bailey Deleted.", response.body) + + def test_returns_404_for_non_existent_action(self): + non_existent_action = 'blah_action' + action_params = dict(name="test") + req_body = jsonutils.dumps({non_existent_action: action_params}) + + response = self.extension_app.post("/dummy_resources/1/action", + req_body, + content_type='application/json', + status='*') + + self.assertEqual(404, response.status_int) + + def test_returns_404_for_non_existent_resource(self): + action_name = 'add_tweedle' + action_params = dict(name='Beetle') + req_body = jsonutils.dumps({action_name: action_params}) + + response = self.extension_app.post("/asdf/1/action", req_body, + content_type='application/json', + status='*') + self.assertEqual(404, response.status_int) + + +class RequestExtensionTest(base.BaseTestCase): + + def test_headers_can_be_extended(self): + def extend_headers(req, res): + assert req.headers['X-NEW-REQUEST-HEADER'] == "sox" + res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data" + return res + + app = self._setup_app_with_request_handler(extend_headers, 'GET') + response = app.get("/dummy_resources/1", + headers={'X-NEW-REQUEST-HEADER': "sox"}) + + self.assertEqual("response_header_data", + response.headers['X-NEW-RESPONSE-HEADER']) + + def test_extend_get_resource_response(self): + def extend_response_data(req, res): + data = jsonutils.loads(res.body) + data['FOXNSOX:extended_key'] = req.GET.get('extended_key') + res.body = jsonutils.dump_as_bytes(data) + return res + + app = self._setup_app_with_request_handler(extend_response_data, 'GET') + response = app.get("/dummy_resources/1?extended_key=extended_data") + + self.assertEqual(200, response.status_int) + response_data = jsonutils.loads(response.body) + self.assertEqual('extended_data', + response_data['FOXNSOX:extended_key']) + self.assertEqual('knox', response_data['fort']) + + def test_get_resources(self): + self.skip("Not ready yet") + app = _setup_extensions_test_app() + + response = app.get("/dummy_resources/1?chewing=newblue") + + response_data = jsonutils.loads(response.body) + self.assertEqual('newblue', response_data['FOXNSOX:googoose']) + self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands']) + + def test_edit_previously_uneditable_field(self): + + def _update_handler(req, res): + data = jsonutils.loads(res.body) + data['uneditable'] = req.params['uneditable'] + res.body = jsonutils.dump_as_bytes(data) + return res + + base_app = webtest.TestApp(setup_base_app(self)) + response = base_app.put("/dummy_resources/1", + {'uneditable': "new_value"}) + self.assertEqual("original_value", response.json['uneditable']) + + ext_app = self._setup_app_with_request_handler(_update_handler, + 'PUT') + ext_response = ext_app.put("/dummy_resources/1", + {'uneditable': "new_value"}) + self.assertEqual("new_value", ext_response.json['uneditable']) + + def _setup_app_with_request_handler(self, handler, verb): + req_ext = extensions.RequestExtension(verb, + '/dummy_resources/:(id)', + handler) + manager = SimpleExtensionManager(None, None, req_ext) + return _setup_extensions_test_app(manager) + + +class ExtensionManagerTest(base.BaseTestCase): + + def test_invalid_extensions_are_not_registered(self): + + class InvalidExtension(object): + """Invalid extension. + + This Extension doesn't implement extension methods : + get_name, get_description, get_namespace and get_updated + """ + def get_alias(self): + return "invalid_extension" + + ext_mgr = extensions.ExtensionManager('') + ext_mgr.add_extension(InvalidExtension()) + ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension")) + + self.assertIn('valid_extension', ext_mgr.extensions) + self.assertNotIn('invalid_extension', ext_mgr.extensions) + + +class ExtensionControllerTest(testlib_api.WebTestCase): + + def setUp(self): + super(ExtensionControllerTest, self).setUp() + self.skip("Not ready yet") + self.test_app = _setup_extensions_test_app() + + def test_index_gets_all_registerd_extensions(self): + response = self.test_app.get("/extensions." + self.fmt) + res_body = self.deserialize(response) + foxnsox = res_body["extensions"][0] + + self.assertEqual("FOXNSOX", foxnsox["alias"]) + self.assertEqual("http://www.fox.in.socks/api/ext/pie/v1.0", + foxnsox["namespace"]) + + def test_extension_can_be_accessed_by_alias(self): + response = self.test_app.get("/extensions/FOXNSOX." + self.fmt) + foxnsox_extension = self.deserialize(response) + foxnsox_extension = foxnsox_extension['extension'] + self.assertEqual("FOXNSOX", foxnsox_extension["alias"]) + self.assertEqual("http://www.fox.in.socks/api/ext/pie/v1.0", + foxnsox_extension["namespace"]) + + def test_show_returns_not_found_for_non_existent_extension(self): + response = self.test_app.get("/extensions/non_existent" + self.fmt, + status="*") + + self.assertEqual(404, response.status_int) + + +def app_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + return ExtensionsTestApp(conf) + + +def setup_base_app(test): + base.BaseTestCase.config_parse() + app = config.load_paste_app('extensions_test_app') + return app + + +def setup_extensions_middleware(extension_manager=None): + extension_manager = (extension_manager or + extensions.ExtensionManager(extensions_path)) + base.BaseTestCase.config_parse() + app = config.load_paste_app('extensions_test_app') + return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager) + + +def _setup_extensions_test_app(extension_manager=None): + return webtest.TestApp(setup_extensions_middleware(extension_manager)) + + +class SimpleExtensionManager(object): + + def __init__(self, resource_ext=None, action_ext=None, request_ext=None): + self.resource_ext = resource_ext + self.action_ext = action_ext + self.request_ext = request_ext + + def get_resources(self): + resource_exts = [] + if self.resource_ext: + resource_exts.append(self.resource_ext) + return resource_exts + + def get_actions(self): + action_exts = [] + if self.action_ext: + action_exts.append(self.action_ext) + return action_exts + + def get_request_extensions(self): + request_extensions = [] + if self.request_ext: + request_extensions.append(self.request_ext) + return request_extensions diff --git a/apmec/tests/unit/test_policy.py b/apmec/tests/unit/test_policy.py new file mode 100644 index 0000000..bb0655b --- /dev/null +++ b/apmec/tests/unit/test_policy.py @@ -0,0 +1,557 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test of Policy Engine For Apmec""" + +import fixtures +import mock +import six + +from oslo_policy import policy as common_policy +from oslo_serialization import jsonutils as json +from oslo_utils import importutils +from six.moves.urllib import request as urlrequest + +import apmec +from apmec.api.v1 import attributes +from apmec.common import exceptions +from apmec import context +from apmec import manager +from apmec import policy +from apmec.tests import base + + +class PolicyFileTestCase(base.BaseTestCase): + def setUp(self): + super(PolicyFileTestCase, self).setUp() + self.skipTest("Not ready yet") + policy.reset() + self.addCleanup(policy.reset) + self.context = context.Context('fake', 'fake', is_admin=False) + self.target = {} + self.tempdir = self.useFixture(fixtures.TempDir()) + + def test_modified_policy_reloads(self): + def fake_find_config_file(_1, _2): + return self.tempdir.join('policy') + + with mock.patch.object(apmec.common.utils, + 'find_config_file', + new=fake_find_config_file): + tmpfilename = fake_find_config_file(None, None) + action = "example:test" + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": ""}""") + policy.init() + policy.enforce(self.context, action, self.target) + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": "!"}""") + # NOTE(vish): reset stored policy cache so we don't have to + # sleep(1) + policy._POLICY_CACHE = {} + policy.init() + self.assertRaises(exceptions.PolicyNotAuthorized, + policy.enforce, + self.context, + action, + self.target) + + +class PolicyTestCase(base.BaseTestCase): + def setUp(self): + super(PolicyTestCase, self).setUp() + self.skipTest("Not ready yet") + policy.reset() + self.addCleanup(policy.reset) + # NOTE(vish): preload rules to circumvent reloading from file + policy.init() + rules = { + "true": '@', + "example:allowed": '@', + "example:denied": '!', + "example:get_http": "http:http://www.example.com", + "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", + "example:early_and_fail": "! and @", + "example:early_or_success": "@ or !", + "example:lowercase_admin": "role:admin or role:sysadmin", + "example:uppercase_admin": "role:ADMIN or role:sysadmin", + } + # NOTE(vish): then overload underlying rules + common_policy.set_rules(common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in rules.items()))) + self.context = context.Context('fake', 'fake', roles=['member']) + self.target = {} + + def test_enforce_nonexistent_action_throws(self): + action = "example:noexist" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_bad_action_throws(self): + action = "example:denied" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_check_bad_action_noraise(self): + action = "example:denied" + result = policy.check(self.context, action, self.target) + self.assertFalse(result) + + def test_check_non_existent_action(self): + action = "example:idonotexist" + result_1 = policy.check(self.context, action, self.target) + self.assertFalse(result_1) + result_2 = policy.check(self.context, action, self.target, + might_not_exist=True) + self.assertTrue(result_2) + + def test_enforce_good_action(self): + action = "example:allowed" + result = policy.enforce(self.context, action, self.target) + self.assertEqual(True, result) + + def test_enforce_http_true(self): + + def fakeurlopen(url, post_data): + return six.StringIO("True") + + with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen): + action = "example:get_http" + target = {} + result = policy.enforce(self.context, action, target) + self.assertEqual(True, result) + + def test_enforce_http_false(self): + + def fakeurlopen(url, post_data): + return six.StringIO("False") + + with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen): + action = "example:get_http" + target = {} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_templatized_enforcement(self): + target_mine = {'tenant_id': 'fake'} + target_not_mine = {'tenant_id': 'another'} + action = "example:my_file" + policy.enforce(self.context, action, target_mine) + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target_not_mine) + + def test_early_AND_enforcement(self): + action = "example:early_and_fail" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_early_OR_enforcement(self): + action = "example:early_or_success" + policy.enforce(self.context, action, self.target) + + def test_ignore_case_role_check(self): + lowercase_action = "example:lowercase_admin" + uppercase_action = "example:uppercase_admin" + # NOTE(dprince) we mix case in the Admin role here to ensure + # case is ignored + admin_context = context.Context('admin', 'fake', roles=['AdMiN']) + policy.enforce(admin_context, lowercase_action, self.target) + policy.enforce(admin_context, uppercase_action, self.target) + + +class DefaultPolicyTestCase(base.BaseTestCase): + + def setUp(self): + super(DefaultPolicyTestCase, self).setUp() + self.skipTest("Not ready yet") + policy.reset() + policy.init() + self.addCleanup(policy.reset) + + self.rules = { + "default": '', + "example:exist": '!', + } + + self._set_rules('default') + + self.context = context.Context('fake', 'fake') + + def _set_rules(self, default_rule): + rules = common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in self.rules.items()), default_rule) + common_policy.set_rules(rules) + + def test_policy_called(self): + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, "example:exist", {}) + + def test_not_found_policy_calls_default(self): + policy.enforce(self.context, "example:noexist", {}) + + def test_default_not_found(self): + self._set_rules("default_noexist") + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, "example:noexist", {}) + + +FAKE_RESOURCE_NAME = 'something' +FAKE_RESOURCE = {"%ss" % FAKE_RESOURCE_NAME: + {'attr': {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'enforce_policy': True, + 'validate': {'type:dict': + {'sub_attr_1': {'type:string': None}, + 'sub_attr_2': {'type:string': None}}} + }}} + + +class ApmecPolicyTestCase(base.BaseTestCase): + + def setUp(self): + super(ApmecPolicyTestCase, self).setUp() + self.skipTest("Not ready yet") + policy.reset() + policy.init() + self.addCleanup(policy.reset) + self.admin_only_legacy = "role:admin" + self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s" + # Add a Fake 'something' resource to RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCE) + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + "context_is_admin": "role:admin", + "admin_or_network_owner": "rule:context_is_admin or " + "tenant_id:%(network:tenant_id)s", + "admin_or_owner": ("rule:context_is_admin or " + "tenant_id:%(tenant_id)s"), + "admin_only": "rule:context_is_admin", + "regular_user": "role:user", + "shared": "field:networks:shared=True", + "external": "field:networks:router:external=True", + "default": '@', + + "create_network": "rule:admin_or_owner", + "create_network:shared": "rule:admin_only", + "update_network": '@', + "update_network:shared": "rule:admin_only", + + "get_network": "rule:admin_or_owner or " + "rule:shared or " + "rule:external", + "create_port:mac": "rule:admin_or_network_owner", + "create_something": "rule:admin_or_owner", + "create_something:attr": "rule:admin_or_owner", + "create_something:attr:sub_attr_1": "rule:admin_or_owner", + "create_something:attr:sub_attr_2": "rule:admin_only", + + "get_firewall_policy": "rule:admin_or_owner or " + "rule:shared", + "get_firewall_rule": "rule:admin_or_owner or " + "rule:shared" + }.items()) + + def fakepolicyinit(): + common_policy.set_rules(common_policy.Rules(self.rules)) + + def remove_fake_resource(): + del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] + + self.patcher = mock.patch.object(apmec.policy, + 'init', + new=fakepolicyinit) + self.patcher.start() + self.addCleanup(remove_fake_resource) + self.context = context.Context('fake', 'fake', roles=['user']) + plugin_klass = importutils.import_class( + "apmec.db.db_base_plugin_v2.ApmecDbPluginV2") + self.manager_patcher = mock.patch('apmec.manager.ApmecManager') + fake_manager = self.manager_patcher.start() + fake_manager_instance = fake_manager.return_value + fake_manager_instance.plugin = plugin_klass() + + def _test_action_on_attr(self, context, action, attr, value, + exception=None): + action = "%s_network" % action + target = {'tenant_id': 'the_owner', attr: value} + if exception: + self.assertRaises(exception, policy.enforce, + context, action, target) + else: + result = policy.enforce(context, action, target) + self.assertEqual(True, result) + + def _test_nonadmin_action_on_attr(self, action, attr, value, + exception=None): + user_context = context.Context('', "user", roles=['user']) + self._test_action_on_attr(user_context, action, attr, + value, exception) + + def test_nonadmin_write_on_private_fails(self): + self._test_nonadmin_action_on_attr('create', 'shared', False, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_read_on_private_fails(self): + self._test_nonadmin_action_on_attr('get', 'shared', False, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_write_on_shared_fails(self): + self._test_nonadmin_action_on_attr('create', 'shared', True, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_read_on_shared_succeeds(self): + self._test_nonadmin_action_on_attr('get', 'shared', True) + + def _test_enforce_adminonly_attribute(self, action): + admin_context = context.get_admin_context() + target = {'shared': True} + result = policy.enforce(admin_context, action, target) + self.assertEqual(True, result) + + def test_enforce_adminonly_attribute_create(self): + self._test_enforce_adminonly_attribute('create_network') + + def test_enforce_adminonly_attribute_update(self): + self._test_enforce_adminonly_attribute('update_network') + + def test_enforce_adminonly_attribute_no_context_is_admin_policy(self): + del self.rules[policy.ADMIN_CTX_POLICY] + self.rules['admin_only'] = common_policy.parse_rule( + self.admin_only_legacy) + self.rules['admin_or_owner'] = common_policy.parse_rule( + self.admin_or_owner_legacy) + self._test_enforce_adminonly_attribute('create_network') + + def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): + action = "create_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self): + del self.rules[policy.ADMIN_CTX_POLICY] + self.rules['admin_only'] = common_policy.parse_rule( + self.admin_only_legacy) + self.rules['admin_or_owner'] = common_policy.parse_rule( + self.admin_or_owner_legacy) + action = "create_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def _test_build_subattribute_match_rule(self, validate_value): + bk = FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( + validate_value) + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} + self.assertFalse(policy._build_subattr_match_rule( + 'attr', + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr'], + action, + target)) + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk + + def test_build_subattribute_match_rule_empty_dict_validator(self): + self._test_build_subattribute_match_rule({}) + + def test_build_subattribute_match_rule_wrong_validation_info(self): + self._test_build_subattribute_match_rule( + {'type:dict': 'wrong_stuff'}) + + def test_enforce_subattribute(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} + result = policy.enforce(self.context, action, target, None) + self.assertEqual(True, result) + + def test_enforce_admin_only_subattribute(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', + 'sub_attr_2': 'y'}} + result = policy.enforce(context.get_admin_context(), + action, target, None) + self.assertEqual(True, result) + + def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', + 'sub_attr_2': 'y'}} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target, None) + + def test_enforce_regularuser_on_read(self): + action = "get_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_firewall_policy_shared(self): + action = "get_firewall_policy" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_firewall_rule_shared(self): + action = "get_firewall_rule" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_tenant_id_check(self): + # Trigger a policy with rule admin_or_owner + action = "create_network" + target = {'tenant_id': 'fake'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_tenant_id_check_parent_resource(self): + + def fakegetnetwork(*args, **kwargs): + return {'tenant_id': 'fake'} + + action = "create_port:mac" + with mock.patch.object(manager.ApmecManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_plugin_failure(self): + + def fakegetnetwork(*args, **kwargs): + raise NotImplementedError('Blast!') + + # the policy check and plugin method we use in this test are irrelevant + # so long that we verify that, if *f* blows up, the behavior of the + # policy engine to propagate the exception is preserved + action = "create_port:mac" + with mock.patch.object(manager.ApmecManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + self.assertRaises(NotImplementedError, + policy.enforce, + self.context, + action, + target) + + def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): + + def fakegetnetwork(*args, **kwargs): + return {'tenant_id': 'fake'} + + del self.rules['admin_or_network_owner'] + self.rules['admin_or_network_owner'] = common_policy.parse_rule( + "role:admin or tenant_id:%(network_tenant_id)s") + action = "create_port:mac" + with mock.patch.object(manager.ApmecManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_tenant_id_check_no_target_field_raises(self): + # Try and add a bad rule + self.assertRaises( + exceptions.PolicyInitError, + common_policy.parse_rule, + 'tenant_id:(wrong_stuff)') + + def _test_enforce_tenant_id_raises(self, bad_rule): + self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule) + # Trigger a policy with rule admin_or_owner + action = "create_network" + target = {'tenant_id': 'fake'} + policy.init() + self.assertRaises(exceptions.PolicyCheckError, + policy.enforce, + self.context, action, target) + + def test_enforce_tenant_id_check_malformed_target_field_raises(self): + self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') + + def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): + self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') + + def test_get_roles_context_is_admin_rule_missing(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + "some_other_rule": "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + # 'admin' role is expected for bw compatibility + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_role_check(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_rule_check(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "rule:some_other_rule", + "some_other_rule": "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_or_check(self): + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2", + "rule1": "role:admin_1", + "rule2": "role:admin_2" + }.items()) + self.assertEqual(['admin_1', 'admin_2'], + policy.get_admin_roles()) + + def test_get_roles_with_other_rules(self): + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "role:xxx or other:value", + }.items()) + self.assertEqual(['xxx'], policy.get_admin_roles()) + + def _test_set_rules_with_deprecated_policy(self, input_rules, + expected_rules): + policy._set_rules(json.dumps(input_rules)) + # verify deprecated policy has been removed + for pol in input_rules.keys(): + self.assertNotIn(pol, common_policy._rules) + # verify deprecated policy was correctly translated. Iterate + # over items for compatibility with unittest2 in python 2.6 + for rule in expected_rules: + self.assertIn(rule, common_policy._rules) + self.assertEqual(expected_rules[rule], + str(common_policy._rules[rule])) + + def test_set_rules_with_deprecated_view_policy(self): + self._test_set_rules_with_deprecated_policy( + {'extension:router:view': 'rule:admin_or_owner'}, + {'get_network:router:external': 'rule:admin_or_owner'}) + + def test_set_rules_with_deprecated_set_policy(self): + expected_policies = ['create_network:provider:network_type', + 'create_network:provider:physical_network', + 'create_network:provider:segmentation_id', + 'update_network:provider:network_type', + 'update_network:provider:physical_network', + 'update_network:provider:segmentation_id'] + self._test_set_rules_with_deprecated_policy( + {'extension:provider_network:set': 'rule:admin_only'}, + dict((policy, 'rule:admin_only') for policy in + expected_policies)) diff --git a/apmec/tests/unit/test_post_mortem_debug.py b/apmec/tests/unit/test_post_mortem_debug.py new file mode 100644 index 0000000..eaa9156 --- /dev/null +++ b/apmec/tests/unit/test_post_mortem_debug.py @@ -0,0 +1,99 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import mock +from six import moves + +from apmec.tests import base +from apmec.tests import post_mortem_debug + + +class TestTesttoolsExceptionHandler(base.BaseTestCase): + + def test_exception_handler(self): + try: + self.assertTrue(False) + except Exception: + exc_info = sys.exc_info() + with mock.patch('traceback.print_exception') as mock_print_exception: + with mock.patch('pdb.post_mortem') as mock_post_mortem: + with mock.patch.object(post_mortem_debug, + 'get_ignored_traceback', + return_value=mock.Mock()): + post_mortem_debug.exception_handler(exc_info) + + # traceback will become post_mortem_debug.FilteredTraceback + filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY) + mock_print_exception.assert_called_once_with(*filtered_exc_info) + mock_post_mortem.assert_called_once_with(mock.ANY) + + +class TestFilteredTraceback(base.BaseTestCase): + + def test_filter_traceback(self): + tb1 = mock.Mock() + tb2 = mock.Mock() + tb1.tb_next = tb2 + tb2.tb_next = None + ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2) + for attr in ['lasti', 'lineno', 'frame']: + attr_name = 'tb_%s' % attr + self.assertEqual(getattr(tb1, attr_name, None), + getattr(ftb1, attr_name, None)) + self.assertIsNone(ftb1.tb_next) + + +class TestGetIgnoredTraceback(base.BaseTestCase): + + def _test_get_ignored_traceback(self, ignored_bit_array, expected): + root_tb = mock.Mock() + + tb = root_tb + tracebacks = [tb] + for x in moves.xrange(len(ignored_bit_array) - 1): + tb.tb_next = mock.Mock() + tb = tb.tb_next + tracebacks.append(tb) + tb.tb_next = None + + tb = root_tb + for ignored in ignored_bit_array: + if ignored: + tb.tb_frame.f_globals = ['__unittest'] + else: + tb.tb_frame.f_globals = [] + tb = tb.tb_next + + actual = post_mortem_debug.get_ignored_traceback(root_tb) + if expected is not None: + expected = tracebacks[expected] + self.assertEqual(expected, actual) + + def test_no_ignored_tracebacks(self): + self._test_get_ignored_traceback([0, 0, 0], None) + + def test_single_member_trailing_chain(self): + self._test_get_ignored_traceback([0, 0, 1], 2) + + def test_two_member_trailing_chain(self): + self._test_get_ignored_traceback([0, 1, 1], 1) + + def test_first_traceback_ignored(self): + self._test_get_ignored_traceback([1, 0, 0], None) + + def test_middle_traceback_ignored(self): + self._test_get_ignored_traceback([0, 1, 0], None) diff --git a/apmec/tests/unit/test_tacker_context.py b/apmec/tests/unit/test_tacker_context.py new file mode 100644 index 0000000..ac553ad --- /dev/null +++ b/apmec/tests/unit/test_tacker_context.py @@ -0,0 +1,141 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_context import context as oslo_context +from testtools import matchers + +from apmec import context +from apmec.tests import base + + +class TestApmecContext(base.BaseTestCase): + + def setUp(self): + super(TestApmecContext, self).setUp() + self.skip("Not ready yet") + db_api = 'apmec.db.api.get_session' + self._db_api_session_patcher = mock.patch(db_api) + self.db_api_session = self._db_api_session_patcher.start() + + def test_apmec_context_create(self): + ctx = context.Context('user_id', 'tenant_id') + self.assertEqual('user_id', ctx.user_id) + self.assertEqual('tenant_id', ctx.project_id) + self.assertEqual('tenant_id', ctx.tenant_id) + self.assertThat(ctx.request_id, matchers.StartsWith('req-')) + self.assertEqual('user_id', ctx.user) + self.assertEqual('tenant_id', ctx.tenant) + self.assertIsNone(ctx.user_name) + self.assertIsNone(ctx.tenant_name) + + def test_apmec_context_create_logs_unknown_kwarg(self): + with mock.patch.object(context.LOG, 'debug') as mock_log: + context.Context('user_id', 'tenant_id', foo=None) + self.assertEqual(1, mock_log.call_count) + + def test_apmec_context_create_with_name(self): + ctx = context.Context('user_id', 'tenant_id', + tenant_name='tenant_name', user_name='user_name') + # Check name is set + self.assertEqual('user_name', ctx.user_name) + self.assertEqual('tenant_name', ctx.tenant_name) + # Check user/tenant contains its ID even if user/tenant_name is passed + self.assertEqual('user_id', ctx.user) + self.assertEqual('tenant_id', ctx.tenant) + + def test_apmec_context_create_with_request_id(self): + ctx = context.Context('user_id', 'tenant_id', request_id='req_id_xxx') + self.assertEqual('req_id_xxx', ctx.request_id) + + def test_apmec_context_to_dict(self): + ctx = context.Context('user_id', 'tenant_id') + ctx_dict = ctx.to_dict() + self.assertEqual('user_id', ctx_dict['user_id']) + self.assertEqual('tenant_id', ctx_dict['project_id']) + self.assertEqual(ctx.request_id, ctx_dict['request_id']) + self.assertEqual('user_id', ctx_dict['user']) + self.assertEqual('tenant_id', ctx_dict['tenant']) + self.assertIsNone(ctx_dict['user_name']) + self.assertIsNone(ctx_dict['tenant_name']) + self.assertIsNone(ctx_dict['project_name']) + + def test_apmec_context_to_dict_with_name(self): + ctx = context.Context('user_id', 'tenant_id', + tenant_name='tenant_name', user_name='user_name') + ctx_dict = ctx.to_dict() + self.assertEqual('user_name', ctx_dict['user_name']) + self.assertEqual('tenant_name', ctx_dict['tenant_name']) + self.assertEqual('tenant_name', ctx_dict['project_name']) + + def test_apmec_context_admin_to_dict(self): + self.db_api_session.return_value = 'fakesession' + ctx = context.get_admin_context() + ctx_dict = ctx.to_dict() + self.assertIsNone(ctx_dict['user_id']) + self.assertIsNone(ctx_dict['tenant_id']) + self.assertIsNotNone(ctx.session) + self.assertNotIn('session', ctx_dict) + + def test_apmec_context_admin_without_session_to_dict(self): + ctx = context.get_admin_context_without_session() + ctx_dict = ctx.to_dict() + self.assertIsNone(ctx_dict['user_id']) + self.assertIsNone(ctx_dict['tenant_id']) + self.assertFalse(hasattr(ctx, 'session')) + + def test_apmec_context_with_load_roles_true(self): + ctx = context.get_admin_context() + self.assertIn('admin', ctx.roles) + + def test_apmec_context_with_load_roles_false(self): + ctx = context.get_admin_context(load_admin_roles=False) + self.assertFalse(ctx.roles) + + def test_apmec_context_elevated_retains_request_id(self): + ctx = context.Context('user_id', 'tenant_id') + self.assertFalse(ctx.is_admin) + req_id_before = ctx.request_id + + elevated_ctx = ctx.elevated() + self.assertTrue(elevated_ctx.is_admin) + self.assertEqual(req_id_before, elevated_ctx.request_id) + + def test_apmec_context_overwrite(self): + ctx1 = context.Context('user_id', 'tenant_id') + self.assertEqual(oslo_context.get_current().request_id, + ctx1.request_id) + + # If overwrite is not specified, request_id should be updated. + ctx2 = context.Context('user_id', 'tenant_id') + self.assertNotEqual(ctx2.request_id, ctx1.request_id) + self.assertEqual(oslo_context.get_current().request_id, + ctx2.request_id) + + # If overwrite is specified, request_id should be kept. + ctx3 = context.Context('user_id', 'tenant_id', overwrite=False) + self.assertNotEqual(ctx3.request_id, ctx2.request_id) + self.assertEqual(oslo_context.get_current().request_id, + ctx2.request_id) + + def test_apmec_context_get_admin_context_not_update_local_store(self): + ctx = context.Context('user_id', 'tenant_id') + req_id_before = oslo_context.get_current().request_id + self.assertEqual(req_id_before, ctx.request_id) + + ctx_admin = context.get_admin_context() + self.assertEqual(req_id_before, + oslo_context.get_current().request_id) + self.assertNotEqual(req_id_before, ctx_admin.request_id) diff --git a/apmec/tests/unit/test_tosca_templates_under_samples.py b/apmec/tests/unit/test_tosca_templates_under_samples.py new file mode 100644 index 0000000..90e8073 --- /dev/null +++ b/apmec/tests/unit/test_tosca_templates_under_samples.py @@ -0,0 +1,91 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import testtools +from toscaparser import tosca_template +from toscaparser.utils import yamlparser +from translator.hot import tosca_translator + +from apmec.catalogs.tosca import utils + + +class TestSamples(testtools.TestCase): + """Sample tosca validation. + + Helps to validate the tosca templates provided in the samples folder + to make sure whether its valid YAML, valid TOSCA and + possible to translate into HOT template. + """ + + def _get_list_of_sample(self, tosca_files): + if tosca_files: + base_path = (os.path.dirname(os.path.abspath(__file__)) + + '/../../../samples/tosca-templates/mead/') + if isinstance(tosca_files, list): + list_of_samples = [] + for tosca_file in tosca_files: + sample = base_path + tosca_file + list_of_samples.append(sample) + return list_of_samples + + def _test_samples(self, files): + if files: + for f in self._get_list_of_sample(files): + with open(f, 'r') as _f: + yaml_dict = None + try: + yaml_dict = yamlparser.simple_ordered_parse(_f.read()) + except: # noqa + pass + self.assertIsNotNone( + yaml_dict, + "Yaml parser failed to parse %s" % f) + + utils.updateimports(yaml_dict) + + tosca = None + try: + tosca = tosca_template.ToscaTemplate( + a_file=False, + yaml_dict_tpl=yaml_dict) + except: # noqa + pass + + self.assertIsNotNone( + tosca, + "Tosca parser failed to parse %s" % f) + utils.post_process_template(tosca) + hot = None + try: + hot = tosca_translator.TOSCATranslator(tosca, + {}).translate() + except: # noqa + pass + + self.assertIsNotNone( + hot, + "Heat-translator failed to translate %s" % f) + + def test_scale_sample(self, tosca_file=['tosca-mead-scale.yaml']): + self._test_samples(tosca_file) + + def test_alarm_sample(self, tosca_file=['tosca-mead-alarm-scale.yaml']): + self._test_samples(tosca_file) + + def test_list_samples(self, + files=['tosca-mead-scale.yaml', + 'tosca-mead-alarm-scale.yaml']): + self._test_samples(files) diff --git a/apmec/tests/unit/test_wsgi.py b/apmec/tests/unit/test_wsgi.py new file mode 100644 index 0000000..b19b192 --- /dev/null +++ b/apmec/tests/unit/test_wsgi.py @@ -0,0 +1,752 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket + +import mock +from oslo_config import cfg +import six.moves.urllib.request as urllibrequest +import testtools +import webob +import webob.exc + +from apmec.common import exceptions as exception +from apmec.tests import base +from apmec import wsgi + +CONF = cfg.CONF + +TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', 'var')) + + +class TestWSGIServer(base.BaseTestCase): + """WSGI server tests.""" + + def test_start_random_port(self): + server = wsgi.Server("test_random_port") + server.start(None, 0, host="127.0.0.1") + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + @mock.patch('oslo_service.service.ProcessLauncher') + def test_start_multiple_workers(self, ProcessLauncher): + launcher = ProcessLauncher.return_value + + server = wsgi.Server("test_multiple_processes") + server.start(None, 0, host="127.0.0.1", workers=2) + launcher.running = True + launcher.launch_service.assert_called_once_with(server._server, + workers=2) + + server.stop() + self.assertFalse(launcher.running) + + server.wait() + launcher.wait.assert_called_once_with() + + def test_start_random_port_with_ipv6(self): + server = wsgi.Server("test_random_port") + server.start(None, 0, host="::1") + self.assertEqual("::1", server.host) + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + def test_ipv6_listen_called_with_scope(self): + self.skipTest("Not ready yet") + server = wsgi.Server("test_app") + + with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: + with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: + mock_get_addr.return_value = [ + (socket.AF_INET6, + socket.SOCK_STREAM, + socket.IPPROTO_TCP, + '', + ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) + ] + with mock.patch.object(server, 'pool') as mock_pool: + server.start(None, + 1234, + host="fe80::204:acff:fe96:da87%eth0") + + mock_get_addr.assert_called_once_with( + "fe80::204:acff:fe96:da87%eth0", + 1234, + socket.AF_UNSPEC, + socket.SOCK_STREAM + ) + + mock_listen.assert_called_once_with( + ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), + family=socket.AF_INET6, + backlog=cfg.CONF.backlog + ) + + mock_pool.spawn.assert_has_calls([ + mock.call( + server._run, + None, + mock_listen.return_value) + ]) + + def test_app(self): + self.skipTest("Not ready yet") + greetings = 'Hello, World!!!' + + def hello_world(env, start_response): + if env['PATH_INFO'] != '/': + start_response('404 Not Found', + [('Content-Type', 'text/plain')]) + return ['Not Found\r\n'] + start_response('200 OK', [('Content-Type', 'text/plain')]) + return [greetings] + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllibrequest.urlopen('http://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + +class SerializerTest(base.BaseTestCase): + def test_serialize_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + input_dict = {'servers': {'test': 'pass'}} + content_type = 'application/unknown' + serializer = wsgi.Serializer() + + self.assertRaises( + exception.InvalidContentType, serializer.serialize, + input_dict, content_type) + + def test_get_deserialize_handler_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + content_type = 'application/unknown' + serializer = wsgi.Serializer() + + self.assertRaises( + exception.InvalidContentType, + serializer.get_deserialize_handler, content_type) + + def test_serialize_content_type_json(self): + """Test serialize with content type json.""" + input_data = {'servers': ['test=pass']} + content_type = 'application/json' + serializer = wsgi.Serializer() + result = serializer.serialize(input_data, content_type) + + self.assertEqual(b'{"servers": ["test=pass"]}', result) + + def test_deserialize_raise_bad_request(self): + """Test serialize verifies that exception is raises.""" + content_type = 'application/unknown' + data_string = 'test' + serializer = wsgi.Serializer() + + self.assertRaises( + webob.exc.HTTPBadRequest, + serializer.deserialize, data_string, content_type) + + def test_deserialize_json_content_type(self): + """Test Serializer.deserialize with content type json.""" + content_type = 'application/json' + data_string = '{"servers": ["test=pass"]}' + serializer = wsgi.Serializer() + result = serializer.deserialize(data_string, content_type) + + self.assertEqual({'body': {u'servers': [u'test=pass']}}, result) + + +class RequestDeserializerTest(testtools.TestCase): + def setUp(self): + super(RequestDeserializerTest, self).setUp() + + class JSONDeserializer(object): + def deserialize(self, data, action='default'): + return b'pew_json' + + self.body_deserializers = {'application/json': JSONDeserializer()} + self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) + + def test_get_deserializer(self): + """Test RequestDeserializer.get_body_deserializer.""" + expected_json_serializer = self.deserializer.get_body_deserializer( + 'application/json') + + self.assertEqual( + expected_json_serializer, + self.body_deserializers['application/json']) + + def test_get_expected_content_type(self): + """Test RequestDeserializer.get_expected_content_type.""" + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + + self.assertEqual( + 'application/json', + self.deserializer.get_expected_content_type(request)) + + def test_get_action_args(self): + """Test RequestDeserializer.get_action_args.""" + env = { + 'wsgiorg.routing_args': [None, { + 'controller': None, + 'format': None, + 'action': 'update', + 'id': 12}]} + expected = {'action': 'update', 'id': 12} + + self.assertEqual( + expected, self.deserializer.get_action_args(env)) + + def test_deserialize(self): + """Test RequestDeserializer.deserialize.""" + with mock.patch.object( + self.deserializer, 'get_action_args') as mock_method: + mock_method.return_value = {'action': 'create'} + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + deserialized = self.deserializer.deserialize(request) + expected = ('create', {}, 'application/json') + + self.assertEqual(expected, deserialized) + + def test_get_body_deserializer_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + content_type = 'application/unknown' + deserializer = wsgi.RequestDeserializer() + self.assertRaises( + exception.InvalidContentType, + deserializer.get_body_deserializer, content_type) + + +class ResponseSerializerTest(testtools.TestCase): + def setUp(self): + super(ResponseSerializerTest, self).setUp() + + class JSONSerializer(object): + def serialize(self, data, action='default'): + return b'pew_json' + + class HeadersSerializer(object): + def serialize(self, response, data, action): + response.status_int = 404 + + self.body_serializers = {'application/json': JSONSerializer()} + + self.serializer = wsgi.ResponseSerializer( + self.body_serializers, HeadersSerializer()) + + def test_serialize_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + self.assertRaises( + exception.InvalidContentType, + self.serializer.serialize, + {}, 'application/unknown') + + def test_get_body_serializer(self): + """Verify that exception InvalidContentType is raised.""" + self.assertRaises( + exception.InvalidContentType, + self.serializer.get_body_serializer, 'application/unknown') + + def test_get_serializer(self): + """Test ResponseSerializer.get_body_serializer.""" + content_type = 'application/json' + self.assertEqual( + self.serializer.get_body_serializer(content_type), + self.body_serializers[content_type]) + + def test_serialize_json_response(self): + response = self.serializer.serialize({}, 'application/json') + + self.assertEqual('application/json', response.headers['Content-Type']) + self.assertEqual(b'pew_json', response.body) + self.assertEqual(404, response.status_int) + + def test_serialize_response_None(self): + response = self.serializer.serialize( + None, 'application/json') + + self.assertEqual('application/json', response.headers['Content-Type']) + self.assertEqual(b'', response.body) + self.assertEqual(404, response.status_int) + + +class RequestTest(base.BaseTestCase): + + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = b"" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.headers["Content-Type"] = "text/html" + request.body = b"fake
" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + + self.assertEqual("application/json", result) + + def test_content_type_with_given_content_types(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/new-type;" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_from_accept(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.json') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + + self.assertEqual("application/json", result) + + def test_content_type_accept_with_given_content_types(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/new_type" + result = request.best_match_content_type() + + self.assertEqual('application/json', result) + + +class ActionDispatcherTest(base.BaseTestCase): + def test_dispatch(self): + """Test ActionDispatcher.dispatch.""" + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + + self.assertEqual( + 'pants', + serializer.dispatch('pants', action='create')) + + def test_dispatch_action_None(self): + """Test ActionDispatcher.dispatch with none action.""" + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + ' pants' + serializer.default = lambda x: x + ' trousers' + + self.assertEqual( + 'Two trousers', + serializer.dispatch('Two', action=None)) + + def test_dispatch_default(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + ' pants' + serializer.default = lambda x: x + ' trousers' + + self.assertEqual( + 'Two trousers', + serializer.dispatch('Two', action='update')) + + +class ResponseHeadersSerializerTest(base.BaseTestCase): + def test_default(self): + serializer = wsgi.ResponseHeaderSerializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'fake') + + self.assertEqual(200, response.status_int) + + def test_custom(self): + class Serializer(wsgi.ResponseHeaderSerializer): + def update(self, response, data): + response.status_int = 404 + response.headers['X-Custom-Header'] = data['v'] + serializer = Serializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'update') + + self.assertEqual(404, response.status_int) + self.assertEqual('123', response.headers['X-Custom-Header']) + + +class DictSerializerTest(base.BaseTestCase): + + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + self.assertEqual( + '', serializer.serialize({}, 'NonExistentAction')) + + +class JSONDictSerializerTest(base.BaseTestCase): + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = b'{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace(b'\n', b'').replace(b' ', b'') + + self.assertEqual(expected_json, result) + + def test_json_with_utf8(self): + data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual(as_dict, + deserializer.deserialize(data)) + + def test_json_with_unicode(self): + data = b'{"a": "\u7f51\u7edc"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual(as_dict, + deserializer.deserialize(data)) + + +class TextDeserializerTest(base.BaseTestCase): + + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + self.assertEqual( + {}, deserializer.deserialize({}, 'update')) + + +class JSONDeserializerTest(base.BaseTestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1'}}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual( + as_dict, deserializer.deserialize(data)) + + def test_default_raise_Malformed_Exception(self): + """Test JsonDeserializer.default. + + Test verifies JsonDeserializer.default raises exception + MalformedRequestBody correctly. + """ + data_string = "" + deserializer = wsgi.JSONDeserializer() + + self.assertRaises( + exception.MalformedRequestBody, deserializer.default, data_string) + + def test_json_with_utf8(self): + data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual(as_dict, + deserializer.deserialize(data)) + + def test_json_with_unicode(self): + data = b'{"a": "\u7f51\u7edc"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual(as_dict, + deserializer.deserialize(data)) + + +class RequestHeadersDeserializerTest(base.BaseTestCase): + + def test_default(self): + deserializer = wsgi.RequestHeadersDeserializer() + req = wsgi.Request.blank('/') + + self.assertEqual( + {}, deserializer.deserialize(req, 'nonExistent')) + + def test_custom(self): + class Deserializer(wsgi.RequestHeadersDeserializer): + def update(self, request): + return {'a': request.headers['X-Custom-Header']} + deserializer = Deserializer() + req = wsgi.Request.blank('/') + req.headers['X-Custom-Header'] = 'b' + self.assertEqual( + {'a': 'b'}, deserializer.deserialize(req, 'update')) + + +class ResourceTest(base.BaseTestCase): + def test_dispatch(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return 'off' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + actual = resource.dispatch( + resource.controller, 'index', action_args={'index': 'off'}) + expected = 'off' + + self.assertEqual(expected, actual) + + def test_dispatch_unknown_controller_action(self): + class Controller(object): + def index(self, request, pants=None): + return pants + + def my_fault_body_function(): + return b'off' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + self.assertRaises( + AttributeError, resource.dispatch, + resource.controller, 'create', {}) + + def test_malformed_request_body_throws_bad_request(self): + def my_fault_body_function(): + return b'off' + + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", body=b"{mal:formed", method='POST', + headers={'Content-Type': "application/json"}) + + response = resource(request) + self.assertEqual(400, response.status_int) + + def test_wrong_content_type_throws_unsupported_media_type_error(self): + def my_fault_body_function(): + return b'off' + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", body=b"{some:json}", method='POST', + headers={'Content-Type': "xxx"}) + + response = resource(request) + self.assertEqual(400, response.status_int) + + def test_wrong_content_type_server_error(self): + def my_fault_body_function(): + return b'off' + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "unknow"}) + + response = resource(request) + self.assertEqual(500, response.status_int) + + def test_call_resource_class_bad_request(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return b'off' + + class FakeRequest(object): + def __init__(self): + self.url = 'http://where.no' + self.environ = 'environ' + self.body = b'body' + + def method(self): + pass + + def best_match_content_type(self): + return 'best_match_content_type' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = FakeRequest() + result = resource(request) + self.assertEqual(400, result.status_int) + + def test_type_error(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return b'off' + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "json"}) + + response = resource.dispatch( + request, action='index', action_args='test') + self.assertEqual(400, response.status_int) + + def test_call_resource_class_internal_error(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return b'off' + + class FakeRequest(object): + def __init__(self): + self.url = 'http://where.no' + self.environ = 'environ' + self.body = b'{"Content-Type": "json"}' + + def method(self): + pass + + def best_match_content_type(self): + return 'application/json' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = FakeRequest() + result = resource(request) + self.assertEqual(500, result.status_int) + + +class MiddlewareTest(base.BaseTestCase): + def test_process_response(self): + def application(environ, start_response): + response = 'Success' + return response + response = application('test', 'fake') + result = wsgi.Middleware(application).process_response(response) + self.assertEqual('Success', result) + + +class FaultTest(base.BaseTestCase): + def test_call_fault(self): + class MyException(object): + status_int = 415 + explanation = 'test' + + my_exceptions = MyException() + my_fault = wsgi.Fault(exception=my_exceptions) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "unknow"}) + response = my_fault(request) + self.assertEqual(415, response.status_int) + + +class TestWSGIServerWithSSL(base.BaseTestCase): + """WSGI server tests.""" + + def setUp(self): + super(TestWSGIServerWithSSL, self).setUp() + self.skip("Not ready yet") + + def test_app_using_ssl(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + def test_app_using_ssl_combined_cert_and_key(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certandkey.pem')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + def test_app_using_ipv6_and_ssl(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="::1") + + response = urllibrequest.urlopen('https://[::1]:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() diff --git a/apmec/tests/unit/testlib_api.py b/apmec/tests/unit/testlib_api.py new file mode 100644 index 0000000..b6dfe51 --- /dev/null +++ b/apmec/tests/unit/testlib_api.py @@ -0,0 +1,82 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from apmec.tests import base +from apmec import wsgi + + +class ExpectedException(testtools.ExpectedException): + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if super(ExpectedException, self).__exit__(exc_type, + exc_value, + traceback): + self.exception = exc_value + return True + return False + + +def create_request(path, body, content_type, method='GET', + query_string=None, context=None): + if query_string: + url = "%s?%s" % (path, query_string) + else: + url = path + req = wsgi.Request.blank(url) + req.method = method + req.headers = {} + req.headers['Accept'] = content_type + req.body = body + if context: + req.environ['apmec.context'] = context + return req + + +class WebTestCase(base.BaseTestCase): + fmt = 'json' + + def setUp(self): + super(WebTestCase, self).setUp() + json_deserializer = wsgi.JSONDeserializer() + self._deserializers = { + 'application/json': json_deserializer, + } + + def deserialize(self, response): + ctype = 'application/%s' % self.fmt + data = self._deserializers[ctype].deserialize(response.body)['body'] + return data + + def serialize(self, data): + ctype = 'application/%s' % self.fmt + result = wsgi.Serializer().serialize(data, ctype) + return result + + +class SubDictMatch(object): + + def __init__(self, sub_dict): + self.sub_dict = sub_dict + + def __eq__(self, super_dict): + return all(item in super_dict.items() + for item in self.sub_dict.items()) + + def __ne__(self, super_dict): + return not self.__eq__(super_dict) diff --git a/apmec/tests/utils.py b/apmec/tests/utils.py new file mode 100644 index 0000000..540b120 --- /dev/null +++ b/apmec/tests/utils.py @@ -0,0 +1,22 @@ +# Copyright 2015 Brocade Communications System, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + + +def read_file(input_file): + yaml_file = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'etc/samples/' + str(input_file))) + with open(yaml_file, 'r') as f: + return f.read() diff --git a/apmec/version.py b/apmec/version.py new file mode 100644 index 0000000..5238c02 --- /dev/null +++ b/apmec/version.py @@ -0,0 +1,17 @@ +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('apmec') diff --git a/apmec/wsgi.py b/apmec/wsgi.py new file mode 100644 index 0000000..afa8605 --- /dev/null +++ b/apmec/wsgi.py @@ -0,0 +1,997 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" +from __future__ import print_function + +import errno +import os +import socket +import ssl +import sys +import time + +import eventlet.wsgi +# eventlet.patcher.monkey_patch(all=False, socket=True, thread=True) +from oslo_config import cfg +import oslo_i18n as i18n +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_service import service as common_service +from oslo_service import systemd +from oslo_utils import encodeutils +from oslo_utils import excutils +import routes.middleware +import six +import webob.dec +import webob.exc + +from apmec.common import exceptions as exception +from apmec import context +from apmec.db import api + + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help=_("Number of backlog requests to configure " + "the socket with")), + cfg.IntOpt('tcp_keepidle', + default=600, + help=_("Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.")), + cfg.IntOpt('retry_until_window', + default=30, + help=_("Number of seconds to keep retrying to listen")), + cfg.IntOpt('max_header_line', + default=16384, + help=_("Max header line to accommodate large tokens")), + cfg.BoolOpt('use_ssl', + default=False, + help=_('Enable SSL on the API server')), + cfg.StrOpt('ssl_ca_file', + help=_("CA certificate file to use to verify " + "connecting clients")), + cfg.StrOpt('ssl_cert_file', + help=_("Certificate file to use when starting " + "the server securely")), + cfg.StrOpt('ssl_key_file', + help=_("Private key file to use when starting " + "the server securely")), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + + +def config_opts(): + return [(None, socket_opts)] + +LOG = logging.getLogger(__name__) + + +def encode_body(body): + """Encode unicode body. + + WebOb requires to encode unicode body used to update response body. + """ + return encodeutils.to_utf8(body) + + +class WorkerService(common_service.ServiceBase): + """Wraps a worker to be handled by ProcessLauncher.""" + + def __init__(self, service, application): + self._service = service + self._application = application + self._server = None + + def start(self): + # We may have just forked from parent process. A quick disposal of the + # existing sql connections avoids producing 500 errors later when they + # are discovered to be broken. + api.get_engine().pool.dispose() + self._server = self._service.pool.spawn(self._service._run, + self._application, + self._service._socket) + + def wait(self): + self._service.pool.waitall() + + def stop(self): + if isinstance(self._server, eventlet.greenthread.GreenThread): + self._server.kill() + self._server = None + + def reset(self): + pass + + +class Server(object): + """Server class to manage multiple WSGI sockets and applications.""" + + def __init__(self, name, threads=1000): + # Raise the default from 8192 to accommodate large tokens + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.pool = eventlet.GreenPool(threads) + self.name = name + self._launcher = None + self._server = None + + def _get_socket(self, host, port, backlog): + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + LOG.exception("Unable to listen on %(host)s:%(port)s", + {'host': host, 'port': port}) + sys.exit(1) + + if CONF.use_ssl: + if not os.path.exists(CONF.ssl_cert_file): + raise RuntimeError(_("Unable to find ssl_cert_file " + ": %s") % CONF.ssl_cert_file) + + # ssl_key_file is optional because the key may be embedded in the + # certificate file + if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file): + raise RuntimeError(_("Unable to find " + "ssl_key_file : %s") % CONF.ssl_key_file) + + # ssl_ca_file is optional + if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file): + raise RuntimeError(_("Unable to find ssl_ca_file " + ": %s") % CONF.ssl_ca_file) + + def wrap_ssl(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl_cert_file, + 'keyfile': CONF.ssl_key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl_ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + sock = None + retry_until = time.time() + CONF.retry_until_window + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if CONF.use_ssl: + sock = wrap_ssl(sock) + except socket.error as err: + with excutils.save_and_reraise_exception() as ctxt: + if err.errno == errno.EADDRINUSE: + ctxt.reraise = False + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for %(time)d seconds") % + {'host': host, + 'port': port, + 'time': CONF.retry_until_window}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + + def start(self, application, port, host='0.0.0.0', workers=0): + """Run a WSGI server with the given application.""" + self._host = host + self._port = port + backlog = CONF.backlog + + self._socket = self._get_socket(self._host, + self._port, + backlog=backlog) + if workers < 1: + # For the case where only one process is required. + self._server = self.pool.spawn(self._run, application, + self._socket) + systemd.notify_once() + else: + # Minimize the cost of checking for child exit by extending the + # wait interval past the default of 0.01s. + self._launcher = common_service.ProcessLauncher(CONF, + wait_interval=1.0) + self._server = WorkerService(self, application) + self._launcher.launch_service(self._server, workers=workers) + + @property + def host(self): + return self._socket.getsockname()[0] if self._socket else self._host + + @property + def port(self): + return self._socket.getsockname()[1] if self._socket else self._port + + def stop(self): + if self._launcher: + # The process launcher does not support stop or kill. + self._launcher.running = False + else: + self._server.kill() + + def wait(self): + """Wait until all servers have completed running.""" + try: + if self._launcher: + self._launcher.wait() + else: + self.pool.waitall() + except KeyboardInterrupt: + pass + + def _run(self, application, socket): + """Start a WSGI server in a new green thread.""" + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=LOG) + + +class Middleware(object): + """Base WSGI middleware wrapper. + + These classes require an application to be initialized that will be called + next. By default the middleware will simply call its wrapped app, or you + can override __call__ to customize its behavior. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = nova.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import nova.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Request(webob.Request): + + def best_match_content_type(self): + """Determine the most acceptable content-type. + + Based on: + 1) URI extension (.json) + 2) Content-type header + 3) Accept* headers + """ + # First lookup http request path + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + _format = parts[1] + if _format in ['json']: + return 'application/{0}'.format(_format) + + # Then look up content header + type_from_header = self.get_content_type() + if type_from_header: + return type_from_header + ctypes = ['application/json'] + + # Finally search in Accept-* headers + bm = self.accept.best_match(ctypes) + return bm or 'application/json' + + def get_content_type(self): + allowed_types = ("application/json") + if "Content-Type" not in self.headers: + LOG.debug("Missing Content-Type") + return None + _type = self.content_type + if _type in allowed_types: + return _type + return None + + def best_match_language(self): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + all_languages = i18n.get_available_languages('apmec') + return self.accept_language.best_match(all_languages) + + @property + def context(self): + if 'apmec.context' not in self.environ: + self.environ['apmec.context'] = context.get_admin_context() + return self.environ['apmec.context'] + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class DictSerializer(ActionDispatcher): + """Default request body serialization.""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization.""" + + def default(self, data): + def sanitizer(obj): + return six.text_type(obj) + return encode_body(jsonutils.dumps(data, default=sanitizer)) + + +class ResponseHeaderSerializer(ActionDispatcher): + """Default response headers serialization.""" + + def serialize(self, response, data, action): + self.dispatch(response, data, action=action) + + def default(self, response, data): + response.status_int = 200 + + +class ResponseSerializer(object): + """Encode the necessary pieces into a response object.""" + + def __init__(self, body_serializers=None, headers_serializer=None): + self.body_serializers = { + 'application/json': JSONDictSerializer(), + } + self.body_serializers.update(body_serializers or {}) + + self.headers_serializer = (headers_serializer or + ResponseHeaderSerializer()) + + def serialize(self, response_data, content_type, action='default'): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param response_data: dict produced by the Controller + :param content_type: expected mimetype of serialized response body + + """ + response = webob.Response() + self.serialize_headers(response, response_data, action) + self.serialize_body(response, response_data, content_type, action) + return response + + def serialize_headers(self, response, data, action): + self.headers_serializer.serialize(response, data, action) + + def serialize_body(self, response, data, content_type, action): + response.headers['Content-Type'] = content_type + if data is not None: + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) + + def get_body_serializer(self, content_type): + try: + return self.body_serializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization.""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("Cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class RequestHeadersDeserializer(ActionDispatcher): + """Default request headers deserializer.""" + + def deserialize(self, request, action): + return self.dispatch(request, action=action) + + def default(self, request): + return {} + + +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, body_deserializers=None, headers_deserializer=None): + self.body_deserializers = { + 'application/json': JSONDeserializer(), + } + self.body_deserializers.update(body_deserializers or {}) + + self.headers_deserializer = (headers_deserializer or + RequestHeadersDeserializer()) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns: tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + action_args.update(self.deserialize_headers(request, action)) + action_args.update(self.deserialize_body(request, action)) + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def deserialize_headers(self, request, action): + return self.headers_deserializer.deserialize(request, action) + + def deserialize_body(self, request, action): + try: + content_type = request.best_match_content_type() + except exception.InvalidContentType: + LOG.debug("Unrecognized Content-Type provided in request") + return {} + + if content_type is None: + LOG.debug("No Content-Type provided in request") + return {} + + if not len(request.body) > 0: + LOG.debug("Empty body provided in request") + return {} + + try: + deserializer = self.get_body_deserializer(content_type) + except exception.InvalidContentType: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to deserialize body as provided " + "Content-Type") + + return deserializer.deserialize(request.body, action) + + def get_body_deserializer(self, content_type): + try: + return self.body_deserializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = nova.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import apmec.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(explanation='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Debug(Middleware): + """Middleware for debugging. + + Helper class that can be inserted into any WSGI application chain + to get information about the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print(("*" * 40) + " REQUEST ENVIRON") + for key, value in req.environ.items(): + print(key, "=", value) + print() + resp = req.get_response(self.application) + + print(("*" * 40) + " RESPONSE HEADERS") + for (key, value) in (resp.headers).items(): + print(key, "=", value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Print contents of a wrapper string iterator when iterated.""" + print(("*" * 40) + " BODY") + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Return an instance of the WSGI Router class.""" + return cls() + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, "/svrlist", controller=sc, action="list") + + # Actions are all implicitly defined + mapper.resource("network", "networks", controller=nc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch a Request. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + language = req.best_match_language() + msg = _('The resource could not be found.') + msg = i18n.translate(msg, language) + return webob.exc.HTTPNotFound(explanation=msg) + app = match['controller'] + return app + + +class Resource(Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + """ + + def __init__(self, controller, fault_body_function, + deserializer=None, serializer=None): + """Object initialization. + + :param controller: object that implement methods created by routes lib + :param deserializer: object that can serialize the output of a + controller into a webob response + :param serializer: object that can deserialize a webob request + into necessary pieces + :param fault_body_function: a function that will build the response + body for HTTP errors raised by operations + on this resource object + + """ + self.controller = controller + self.deserializer = deserializer or RequestDeserializer() + self.serializer = serializer or ResponseSerializer() + self._fault_body_function = fault_body_function + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info("%(method)s %(url)s", {"method": request.method, + "url": request.url}) + + try: + action, args, accept = self.deserializer.deserialize(request) + except exception.InvalidContentType: + LOG.exception("InvalidContentType: Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest( + explanation=_("Unsupported Content-Type"))) + except exception.MalformedRequestBody: + LOG.exception("MalformedRequestBody: Malformed request body") + return Fault(webob.exc.HTTPBadRequest( + explanation=_("Malformed request body"))) + + try: + action_result = self.dispatch(request, action, args) + except webob.exc.HTTPException as ex: + LOG.info("HTTP exception thrown: %s", ex) + action_result = Fault(ex, + self._fault_body_function) + except Exception: + LOG.exception("Internal error") + # Do not include the traceback to avoid returning it to clients. + action_result = Fault(webob.exc.HTTPServerError(), + self._fault_body_function) + + if isinstance(action_result, dict) or action_result is None: + response = self.serializer.serialize(action_result, + accept, + action=action) + else: + response = action_result + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError as e: + msg_dict = dict(url=request.url, exception=e) + msg = _("%(url)s returned a fault: %(exception)s") % msg_dict + + LOG.info(msg) + + return response + + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" + + controller_method = getattr(self.controller, action) + try: + # NOTE(salvatore-orlando): the controller method must have + # an argument whose name is 'request' + return controller_method(request=request, **action_args) + except TypeError as exc: + LOG.exception(exc) + return Fault(webob.exc.HTTPBadRequest()) + + +def _default_body_function(wrapped_exc): + code = wrapped_exc.status_int + fault_data = { + 'Error': { + 'code': code, + 'message': wrapped_exc.explanation}} + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {'Error': 'code'}} + return fault_data, metadata + + +class Fault(webob.exc.HTTPException): + """Generates an HTTP response from a webob HTTP exception.""" + + def __init__(self, exception, body_function=None): + """Creates a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = self.wrapped_exc.status_int + self._body_function = body_function or _default_body_function + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + fault_data, metadata = self._body_function(self.wrapped_exc) + content_type = req.best_match_content_type() + serializer = { + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + return self.wrapped_exc + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Controller(object): + """WSGI app that dispatched to methods. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming wsgi.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Call the method specified in req.environ by RoutesMiddleware.""" + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] + method = getattr(self, action) + del arg_dict['controller'] + del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] + arg_dict['request'] = req + result = method(**arg_dict) + + if isinstance(result, dict) or result is None: + if result is None: + status = 204 + content_type = '' + body = None + else: + status = 200 + content_type = req.best_match_content_type() + body = self._serialize(result, content_type) + + response = webob.Response(status=status, + content_type=content_type, + body=body) + msg_dict = dict(url=req.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + LOG.debug(msg) + return response + else: + return result + + def _serialize(self, data, content_type): + """Serialize the given dict to the provided content_type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + + serializer = Serializer(_metadata) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + msg = _('The requested content type %s is invalid.') % content_type + raise webob.exc.HTTPNotAcceptable(msg) + + def _deserialize(self, data, content_type): + """Deserialize the request body to the specefied content type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type)['body'] + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Serializer(object): + """Serializes and deserializes dictionaries to certain MIME types.""" + + def __init__(self, metadata=None): + """Create a serializer based on the given WSGI environment. + + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + + """ + self.metadata = metadata or {} + + def _get_serialize_handler(self, content_type): + handlers = { + 'application/json': JSONDictSerializer(), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def serialize(self, data, content_type): + """Serialize a dictionary into the specified content type.""" + return self._get_serialize_handler(content_type).serialize(data) + + def deserialize(self, datastring, content_type): + """Deserialize a string to a dictionary. + + The string must be in the format of a supported MIME type. + + """ + try: + return self.get_deserialize_handler(content_type).deserialize( + datastring) + except Exception: + raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) + + def get_deserialize_handler(self, content_type): + handlers = { + 'application/json': JSONDeserializer(), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000..15cd6cb --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/devstack/README.rst b/devstack/README.rst new file mode 100644 index 0000000..5db340f --- /dev/null +++ b/devstack/README.rst @@ -0,0 +1 @@ +.. include:: ../doc/source/install/devstack.rst diff --git a/devstack/lib/apmec b/devstack/lib/apmec new file mode 100644 index 0000000..7b35cef --- /dev/null +++ b/devstack/lib/apmec @@ -0,0 +1,482 @@ +#!/bin/bash +# +# lib/apmec +# functions - functions specific to apmec + +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined +# ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_apmec +# - configure_apmec +# - create_apmec_accounts +# - init_apmec +# - start_apmec +# - apmec_horizon_install +# - apmec_client_install +# - apmec_create_initial_network + +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_apmec +# - cleanup_apmec + +# Apmec +# --------------- +CUSTOM_BASE="https://pineunity:Doantung92*@github.com/pineunity" +EXTRA_CUSTOM_BASE="git+https://pineunity:Doantung92*@github.com/pineunity" +#LIB_DEST=/usr/local/lib/python2.7/dist-packages + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +if is_ssl_enabled_service "apmec" || is_service_enabled tls-proxy; then + APMEC_PROTOCOL="https" +fi + +# Set up default directories +GITREPO["apmec-horizon"]=${APMECHORIZON_REPO:-${CUSTOM_BASE}/apmec-horizon.git} +GITBRANCH["apmec-horizon"]=${APMECHORIZON_BRANCH:-master} +GITDIR["apmec-horizon"]=$DEST/apmec-horizon + +GITREPO["python-apmecclient"]=${APMECHORIZON_REPO:-${CUSTOM_BASE}/python-apmecclient.git} +GITBRANCH["python-apmecclient"]=${APMECHORIZON_BRANCH:-apmec-nfv} +GITDIR["python-apmecclient"]=$DEST/python-apmecclient + +#GITREPO["mec-tosca-parser"]=${APMECHORIZON_REPO:-${CUSTOM_BASE}/mec-tosca-parser.git} +#GITBRANCH["mec-tosca-parser"]=${APMECHORIZON_BRANCH:-master} +#GITDIR["mec-tosca-parser"]=/$LIB_DEST/toscaparser + +#GITREPO["mec-heat-translator"]=${APMECHORIZON_REPO:-${CUSTOM_BASE}/mec-heat-translator.git} +#GITBRANCH["mec-heat-translator"]=${APMECHORIZON_BRANCH:-master} +#GITDIR["mec-heat-translator"]=$LIB_DEST/heat_translator + + +APMEC_DIR=$DEST/apmec +APMEC_AUTH_CACHE_DIR=${APMEC_AUTH_CACHE_DIR:-/var/cache/apmec} + +# Support entry points installation of console scripts +if [[ -d $APMEC_DIR/bin/apmec-server ]]; then + APMEC_BIN_DIR=$APMEC_DIR/bin +else + APMEC_BIN_DIR=$(get_python_exec_prefix) +fi + +APMEC_CONF_DIR=/etc/apmec +APMEC_CONF=$APMEC_CONF_DIR/apmec.conf + +# Default name for Apmec database +APMEC_DB_NAME=${APMEC_DB_NAME:-apmec} +# Default Apmec Port +APMEC_PORT=${APMEC_PORT:-9896} +# Default Apmec Internal Port when using TLS proxy +APMEC_PORT_INT=${APMEC_PORT_INT:-19896} # TODO(FIX) +# Default Apmec Host +APMEC_HOST=${APMEC_HOST:-$SERVICE_HOST} +# Default protocol +APMEC_PROTOCOL=${APMEC_PROTOCOL:-$SERVICE_PROTOCOL} +# Default admin username +APMEC_ADMIN_USERNAME=${APMEC_ADMIN_USERNAME:-apmec} +# Default auth strategy +APMEC_AUTH_STRATEGY=${APMEC_AUTH_STRATEGY:-keystone} +APMEC_USE_ROOTWRAP=${APMEC_USE_ROOTWRAP:-True} + +APMEC_RR_CONF_FILE=$APMEC_CONF_DIR/rootwrap.conf +if [[ "$APMEC_USE_ROOTWRAP" == "False" ]]; then + APMEC_RR_COMMAND="sudo" +else + APMEC_ROOTWRAP=$(get_rootwrap_location apmec) + APMEC_RR_COMMAND="sudo $APMEC_ROOTWRAP $APMEC_RR_CONF_FILE" +fi + +APMEC_NOVA_URL=${APMEC_NOVA_URL:-http://127.0.0.1:8774/v2} +APMEC_NOVA_CA_CERTIFICATES_FILE=${APMEC_NOVA_CA_CERTIFICATES_FILE:-} +APMEC_NOVA_API_INSECURE=${APMEC_NOVA_API_INSECURE:-False} + +# Tell Tempest this project is present +# TEMPEST_SERVICES+=,apmec + +HEAT_CONF_DIR=/etc/heat + +# Functions +# --------- +# Test if any Apmec services are enabled +# is_apmec_enabled +function is_apmec_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"apmec" ]] && return 0 + return 1 +} + +# create_apmec_cache_dir() - Part of the _apmec_setup_keystone() process +function create_apmec_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $APMEC_AUTH_CACHE_DIR + rm -f $APMEC_AUTH_CACHE_DIR/* +} + +# create_apmec_accounts() - Set up common required apmec accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service apmec admin # if enabled + +# Migrated from keystone_data.sh +function create_apmec_accounts { + if is_service_enabled apmec; then + create_service_user "apmec" + get_or_create_role "advsvc" + create_service_user "apmec" "advsvc" + + local apmec_service=$(get_or_create_service "apmec" \ + "mec-orchestration" "Apmec MEC Orchestration Service") + get_or_create_endpoint $apmec_service \ + "$REGION_NAME" \ + "$APMEC_PROTOCOL://$SERVICE_HOST:$APMEC_PORT/" \ + "$APMEC_PROTOCOL://$SERVICE_HOST:$APMEC_PORT/" \ + "$APMEC_PROTOCOL://$SERVICE_HOST:$APMEC_PORT/" + fi +} + +# stack.sh entry points +# --------------------- + +# init_apmec() - Initialize databases, etc. +function init_apmec { + recreate_database $APMEC_DB_NAME + + # Run Apmec db migrations + $APMEC_BIN_DIR/apmec-db-manage --config-file $APMEC_CONF upgrade head +} + +# install_apmec() - Collect source and prepare +function install_apmec { + setup_develop $APMEC_DIR +} + +function start_apmec { + local cfg_file_options="--config-file $APMEC_CONF" + local service_port=$APMEC_PORT + local service_protocol=$APMEC_PROTOCOL + if is_service_enabled tls-proxy; then + service_port=$APMEC_PORT_INT + service_protocol="http" + fi + # Start apmec conductor + run_process apmec-conductor "$APMEC_BIN_DIR/apmec-conductor $cfg_file_options" + # Start the Apmec service + run_process apmec "$APMEC_BIN_DIR/apmec-server $cfg_file_options" + echo "Waiting for Apmec to start..." + if is_ssl_enabled_service "apmec"; then + ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" + fi + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$APMEC_HOST:$service_port; do sleep 1; done"; then + die $LINENO "Apmec did not start" + fi + # Start proxy if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $APMEC_PORT $APMEC_HOST $APMEC_PORT_INT & + fi +} + +# stop_apmec() - Stop running processes (non-screen) +function stop_apmec { + stop_process apmec + stop_process apmec-conductor +} + +# cleanup_apmec() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_apmec { + sudo rm -rf $APMEC_AUTH_CACHE_DIR +} + + +function _create_apmec_conf_dir { + # Put config files in ``APMEC_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $APMEC_CONF_DIR +} + +# configure_apmec() +# Set common config for all apmec server and agents. +function configure_apmec { + _create_apmec_conf_dir + + cd $APMEC_DIR + ./tools/generate_config_file_sample.sh + cd - + + cp $APMEC_DIR/etc/apmec/apmec.conf.sample $APMEC_CONF + + iniset_rpc_backend apmec $APMEC_CONF + + iniset $APMEC_CONF database connection `database_connection_url $APMEC_DB_NAME` + iniset $APMEC_CONF DEFAULT state_path $DATA_DIR/apmec + iniset $APMEC_CONF DEFAULT use_syslog $SYSLOG + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $APMEC_CONF DEFAULT project_id + else + # Show user_name and project_name by default like in nova + iniset $APMEC_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + fi + + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $APMEC_CONF DEFAULT bind_port "$APMEC_PORT_INT" + fi + + if is_ssl_enabled_service "apmec"; then + ensure_certificates APMEC + + iniset $APMEC_CONF DEFAULT use_ssl True + iniset $APMEC_CONF DEFAULT ssl_cert_file "$APMEC_SSL_CERT" + iniset $APMEC_CONF DEFAULT ssl_key_file "$APMEC_SSL_KEY" + fi + + # server + APMEC_API_PASTE_FILE=$APMEC_CONF_DIR/api-paste.ini + APMEC_POLICY_FILE=$APMEC_CONF_DIR/policy.json + + cp $APMEC_DIR/etc/apmec/api-paste.ini $APMEC_API_PASTE_FILE + cp $APMEC_DIR/etc/apmec/policy.json $APMEC_POLICY_FILE + + # allow apmec user to administer apmec to match apmec account + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:apmec"/g' $APMEC_POLICY_FILE + + iniset $APMEC_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $APMEC_CONF DEFAULT policy_file $APMEC_POLICY_FILE + + iniset $APMEC_CONF DEFAULT auth_strategy $APMEC_AUTH_STRATEGY + _apmec_setup_keystone $APMEC_CONF keystone_authtoken + + if [[ "${APMEC_MODE}" == "all" ]]; then + iniset "/$Q_PLUGIN_CONF_FILE" ml2 extension_drivers port_security + iniset "/$Q_PLUGIN_CONF_FILE" ml2_type_flat flat_networks $PUBLIC_PHYSICAL_NETWORK,$MGMT_PHYS_NET + iniset "/$Q_PLUGIN_CONF_FILE" ovs bridge_mappings $PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE,$MGMT_PHYS_NET:$BR_MGMT + + # Experimental settings for monitor alarm auth settings, + # Will be changed according to new implementation. + iniset $APMEC_CONF alarm_auth username admin + iniset $APMEC_CONF alarm_auth password "$ADMIN_PASSWORD" + iniset $APMEC_CONF alarm_auth project_name admin + iniset $APMEC_CONF alarm_auth url http://$SERVICE_HOST:35357/v3 + + echo "Creating bridge" + sudo ovs-vsctl --may-exist add-br ${BR_MGMT} + fi + if [[ "${USE_BARBICAN}" == "True" ]]; then + iniset $APMEC_CONF vim_keys use_barbican True + fi + _apmec_setup_rootwrap +} + +# Utility Functions +#------------------ + +# _apmec_deploy_rootwrap_filters() - deploy rootwrap filters to $APMEC_CONF_ROOTWRAP_D (owned by root). +function _apmec_deploy_rootwrap_filters { + local srcdir=$1 + sudo install -d -o root -m 755 $APMEC_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/apmec/rootwrap.d/* $APMEC_CONF_ROOTWRAP_D/ +} + +# _apmec_setup_rootwrap() - configure Apmec's rootwrap +function _apmec_setup_rootwrap { + if [[ "$APMEC_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + APMEC_CONF_ROOTWRAP_D=$APMEC_CONF_DIR/rootwrap.d + if [[ -d $APMEC_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $APMEC_CONF_ROOTWRAP_D + fi + + _apmec_deploy_rootwrap_filters $APMEC_DIR + + sudo install -o root -g root -m 644 $APMEC_DIR/etc/apmec/rootwrap.conf $APMEC_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$APMEC_CONF_ROOTWRAP_D:" -i $APMEC_RR_CONF_FILE + # Specify ``rootwrap.conf`` as first parameter to apmec-rootwrap + ROOTWRAP_SUDOER_CMD="$APMEC_ROOTWRAP $APMEC_RR_CONF_FILE *" + + # Set up the rootwrap sudoers for apmec + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/apmec-rootwrap + + # Update the root_helper + iniset $APMEC_CONF agent root_helper "$APMEC_RR_COMMAND" +} + +# Configures keystone integration for apmec service and agents +function _apmec_setup_keystone { + local conf_file=$1 + local section=$2 + local use_auth_url=$3 + + # Configures keystone for metadata_agent + # metadata_agent needs auth_url to communicate with keystone + if [[ "$use_auth_url" == "True" ]]; then + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0 + fi + + create_apmec_cache_dir + configure_auth_token_middleware $conf_file $APMEC_ADMIN_USERNAME $APMEC_AUTH_CACHE_DIR $section +} + +function mec_tosca_parser_install { + sudo pip install ${EXTRA_CUSTOM_BASE}/mec-tosca-parser.git +} + +function mec_heat_translator_install { + sudo pip install ${EXTRA_CUSTOM_BASE}/mec-heat-translator.git + +} + +function apmec_horizon_install { + git_clone_by_name "apmec-horizon" + setup_dev_lib "apmec-horizon" + sudo cp $DEST/apmec-horizon/apmec_horizon/enabled/* $DEST/horizon/openstack_dashboard/enabled/ + restart_apache_server +} + +function apmec_client_install { + git_clone_by_name "python-apmecclient" + setup_dev_lib "python-apmecclient" + cd $DEST/python-apmecclient + sudo python setup.py install + cd $TOP_DIR +} + +function openstack_image_create { + image=$1 + disk_format=raw + container_format=bare + image_name=$2 + openstack --os-cloud=devstack-admin image create $image_name --public --container-format=$container_format --disk-format $disk_format --file ${image} + openstack image show $image_name -f value -c id +} + +function apmec_check_and_download_images { + local image_url + image_url[0]='http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img' + image_url[1]='https://downloads.openwrt.org/chaos_calmer/15.05/x86/kvm_guest/openwrt-15.05-x86-kvm_guest-combined-ext4.img.gz' + + local image_fname image_name glance_name gz_pattern + local length=${#image_url[@]} + local index=0 + while [ $index -lt $length ] + do + image_fname=`basename "${image_url[$index]}"` + glance_name=${image_fname%.*} + if [[ $glance_name =~ 'openwrt' ]]; then + glance_name='OpenWRT' + fi + image_name=`openstack image list | grep "$glance_name" | awk '{print $4}'` + if [[ $image_name == "" ]]; then + if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then + { + wget --progress=dot:giga -c ${image_url[$index]} -O $FILES/$image_fname + gz_pattern="\.gz$" + if [[ $image_fname =~ $gz_pattern ]]; then + new_image_fname=${image_fname%.*} + gunzip -c $FILES/$image_fname > $FILES/$new_image_fname + image_fname=$new_image_fname + fi + openstack_image_create $FILES/$image_fname $glance_name + }||{ + echo "ERROR: apmec image create for $image_fname failed" + image_fname=$image_fname"*" + sudo rm -rf $FILES/$image_fname + exit 1 + } + fi + fi + index=$(($index+1)) + done +} + +function apmec_create_initial_network { + # create necessary networks + # prepare network + echo "Deleting networks" + for net in ${NET_MGMT} ${NET0} ${NET1} + do + for i in $(openstack network list | awk "/${net}/{print \$2}") + do + openstack network delete $i + done + done + + echo "Creating networks" + NET_MGMT_ID=$(openstack network create --provider-network-type flat --provider-physical-network ${MGMT_PHYS_NET} --share ${NET_MGMT} | awk '/ id /{print $4}') + SUBNET_MGMT_ID=$(openstack subnet create ${SUBNET_MGMT} --ip-version 4 --gateway ${NETWORK_GATEWAY_MGMT} --network ${NET_MGMT_ID} --subnet-range ${FIXED_RANGE_MGMT} | awk '/ id /{print $4}') + NET0_ID=$(openstack network create --share ${NET0} | awk '/ id /{print $4}') + SUBNET0_ID=$(openstack subnet create ${SUBNET0} --ip-version 4 --gateway ${NETWORK_GATEWAY0} --network ${NET0_ID} --subnet-range ${FIXED_RANGE0} | awk '/ id /{print $4}') + NET1_ID=$(openstack network create --share ${NET1} | awk '/ id /{print $4}') + SUBNET1_ID=$(openstack subnet create ${SUBNET1} --ip-version 4 --gateway ${NETWORK_GATEWAY1} --network ${NET1_ID} --subnet-range ${FIXED_RANGE1} | awk '/ id /{print $4}') + + echo "Assign ip address to BR_MGMT" + sudo ip link set ${BR_MGMT} up + sudo ip -4 address flush dev ${BR_MGMT} + sudo ip address add ${NETWORK_GATEWAY_MGMT_IP} dev ${BR_MGMT} +} + +function apmec_register_default_vim { + # Note: These must be the same as in apmec/tests/etc/samples/local-vim.yaml + # and devstack/lib/apmec/vim_config.yaml + DEFAULT_VIM_PROJECT_NAME="mec" + DEFAULT_VIM_USER="mec_user" + DEFAULT_VIM_PASSWORD="devstack" + + echo "Create MEC VIM project $DEFAULT_VIM_PROJECT_NAME ..." + get_or_create_project $DEFAULT_VIM_PROJECT_NAME + echo "Create MEC VIM user $DEFAULT_VIM_USER ..." + get_or_create_user $DEFAULT_VIM_USER $DEFAULT_VIM_PASSWORD + get_or_add_user_project_role "admin" $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME + get_or_add_user_project_role "advsvc" $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME + + echo "Register default VIM ..." + mkdir -p $DATA_DIR/apmec + cp $APMEC_DIR/devstack/vim_config.yaml $DATA_DIR/apmec + VIM_CONFIG_FILE="$DATA_DIR/apmec/vim_config.yaml" + sed -e "s|^auth_url:.*$|auth_url: \'${KEYSTONE_SERVICE_URI}\'|" -i $VIM_CONFIG_FILE + echo "The content of VIM config file $VIM_CONFIG_FILE :" + cat $VIM_CONFIG_FILE + local default_vim_id + DEFAULT_VIM_NAME="VIM0" + + old_project=$OS_PROJECT_NAME + old_user=$OS_USERNAME + $TOP_DIR/tools/create_userrc.sh -P -u $DEFAULT_VIM_USER -C $DEFAULT_VIM_PROJECT_NAME -p $DEFAULT_VIM_PASSWORD + echo "Switch environment openrc:" + echo $(cat $TOP_DIR/accrc/$DEFAULT_VIM_PROJECT_NAME/$DEFAULT_VIM_USER) + . $TOP_DIR/accrc/$DEFAULT_VIM_PROJECT_NAME/$DEFAULT_VIM_USER + + default_vim_id=$(apmec vim-register --is-default --description "Default VIM" --config-file $VIM_CONFIG_FILE $DEFAULT_VIM_NAME -c id | grep id | awk '{print $4}') + echo "Default VIM registration done as $default_vim_id at $KEYSTONE_SERVICE_URI." + echo "Switch back to old environment openrc:" + echo $(cat $TOP_DIR/accrc/$old_project/$old_user) + . $TOP_DIR/accrc/$old_project/$old_user + + echo "Update apmec/tests/etc/samples/local-vim.yaml for functional testing" + functional_vim_file="$APMEC_DIR/apmec/tests/etc/samples/local-vim.yaml" + sed -e "s|^auth_url:.*$|auth_url: \'${KEYSTONE_SERVICE_URI}\'|" -i $functional_vim_file +} + +function modify_heat_flavor_policy_rule { + local policy_file=$HEAT_CONF_DIR/policy.yaml + touch $policy_file + # Allow non-admin projects with 'admin' roles to create flavors in Heat + echo '"resource_types:OS::Nova::Flavor": "role:admin"' >> $policy_file +} diff --git a/devstack/local.conf.example b/devstack/local.conf.example new file mode 100644 index 0000000..09e7351 --- /dev/null +++ b/devstack/local.conf.example @@ -0,0 +1,62 @@ +[[local|localrc]] +############################################################ +# Customize the following HOST_IP based on your installation +############################################################ +HOST_IP=10.18.161.164 + +ADMIN_PASSWORD=devstack +MYSQL_PASSWORD=devstack +RABBIT_PASSWORD=devstack +SERVICE_PASSWORD=$ADMIN_PASSWORD +SERVICE_TOKEN=devstack + +############################################################ +# Customize the following section based on your installation +############################################################ + +# Pip +PIP_USE_MIRRORS=False +USE_GET_PIP=1 + +#OFFLINE=False +#RECLONE=True + +# Logging +LOGFILE=$DEST/logs/stack.sh.log +VERBOSE=True +ENABLE_DEBUG_LOG_LEVEL=True +ENABLE_VERBOSE_LOG_LEVEL=True + +# Neutron ML2 with OpenVSwitch + +Q_PLUGIN=ml2 +Q_AGENT=openvswitch + +#PUBLIC NETWORK CONFIGURATION +Q_USE_PROVIDERNET_FOR_PUBLIC=False +FLOATING_RANGE=10.12.161.0/24 +Q_FLOATING_ALLOCATION_POOL="start=10.12.161.150,end=10.12.161.201" +PUBLIC_NETWORK_NAME=external +PUBLIC_NETWORK_GATEWAY=10.12.161.1 +PUBLIC_PHYSICAL_NETWORK=public + +# Required for l3-agent to connect to external-network-bridge +PUBLIC_BRIDGE=br-ext + +#PRIVATE NETWORK CONFIGURATION + +NETWORK_GATEWAY=${NETWORK_GATEWAY:-15.0.0.1} +FIXED_RANGE=${FIXED_RANGE:-15.0.0.0/24} + +enable_plugin heat https://git.openstack.org/openstack/ master +enable_plugin networking-sfc git://git.openstack.org/openstack/networking-sfc master +enable_plugin barbican https://git.openstack.org/openstack/barbican +enable_plugin apmec https://github.com/pineunity/apmec.git master + +enable_service n-novnc +enable_service n-cauth + +disable_service tempest + +#APMEC CONFIGURATION +USE_BARBICAN=True diff --git a/devstack/local.conf.standalone b/devstack/local.conf.standalone new file mode 100644 index 0000000..bed4aa4 --- /dev/null +++ b/devstack/local.conf.standalone @@ -0,0 +1,27 @@ +[[local|localrc]] +############################################################ +# Customize the following HOST_IP based on your installation +############################################################ +HOST_IP=127.0.0.1 +SERVICE_HOST=127.0.0.1 +SERVICE_PASSWORD=devstack +ADMIN_PASSWORD=devstack +SERVICE_TOKEN=devstack +DATABASE_PASSWORD=root +RABBIT_PASSWORD=password +ENABLE_HTTPD_MOD_WSGI_SERVICES=True +KEYSTONE_USE_MOD_WSGI=True + +# Logging +LOGFILE=$DEST/logs/stack.sh.log +VERBOSE=True +ENABLE_DEBUG_LOG_LEVEL=True +ENABLE_VERBOSE_LOG_LEVEL=True +GIT_BASE=${GIT_BASE:-git://git.openstack.org} + +APMEC_MODE=standalone +USE_BARBICAN=True +enable_plugin networking-sfc ${GIT_BASE}/openstack/networking-sfc +enable_plugin barbican ${GIT_BASE}/openstack/barbican +enable_plugin mistral ${GIT_BASE}/openstack/mistral +enable_plugin apmec ${GIT_BASE}/openstack/apmec diff --git a/devstack/local.sh.mysql_fix b/devstack/local.sh.mysql_fix new file mode 100755 index 0000000..a4522ad --- /dev/null +++ b/devstack/local.sh.mysql_fix @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +#IMPORTANT: THIS FIX IS APPLICABLE ONLY FOR DEVSTACK KILO RELEASE +#For devstack master branch, the fix is already provided upstream +# `local.sh.mysql_fixup`` for user-configurable tasks to run automatically +# at the successful conclusion of ``stack.sh``. + +# NOTE: Copy this file to the root DevStack directory for it to work properly. + + + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +. $TOP_DIR/functions + +# Use openrc + stackrc + localrc for settings +. $TOP_DIR/stackrc + +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} + +echo_summary "Configuring additional parameters for mysql database" + + +if is_service_enabled mysql; then + if is_ubuntu; then + my_conf=/etc/mysql/my.cnf + mysql=mysql + elif is_suse || is_oraclelinux; then + my_conf=/etc/my.cnf + mysql=mysql + elif is_fedora; then + mysql=mariadb + my_conf=/etc/my.cnf + else + exit_distro_not_supported "mysql configuration" + fi + + sudo bash -c ". $TOP_DIR/functions && \ + iniset $my_conf mysqld max_connections 1024 && \ + iniset $my_conf mysqld query_cache_type OFF && \ + iniset $my_conf mysqld query_cache_size 0" + restart_service $mysql +fi diff --git a/devstack/plugin.sh b/devstack/plugin.sh new file mode 100644 index 0000000..48dc474 --- /dev/null +++ b/devstack/plugin.sh @@ -0,0 +1,69 @@ +# plugin.sh - Devstack extras script to install apmec + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set -o xtrace + +echo_summary "apmec's plugin.sh was called..." +. $DEST/apmec/devstack/lib/apmec +(set -o posix; set) + +# check for service enabled +if is_service_enabled apmec; then + if [[ "$1" == "stack" && "$2" == "install" ]]; then + # Perform installation of service source + echo_summary "Installing Apmec" + install_apmec + echo_summary "Installing tosca parser" + mec_tosca_parser_install + echo_summary "Installing heat translator" + mec_heat_translator_install + + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "Configuring Apmec" + configure_apmec + create_apmec_accounts + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Installing apmec horizon" + apmec_horizon_install + # Initialize and start the apmec service + echo_summary "Initializing Apmec" + init_apmec + echo_summary "Starting Apmec API and conductor" + start_apmec + echo_summary "Installing apmec client" + apmec_client_install + if [[ "${APMEC_MODE}" == "all" ]]; then + echo_summary "Modifying Heat policy.json file" + modify_heat_flavor_policy_rule + echo_summary "Setup initial apmec network" + apmec_create_initial_network + echo_summary "Check and download images for apmec initial" + apmec_check_and_download_images + echo_summary "Registering default VIM" + apmec_register_default_vim + fi + fi + + if [[ "$1" == "unstack" ]]; then + # Shut down apmec services + stop_apmec + fi + + if [[ "$1" == "clean" ]]; then + # Remove state and transient data + # Remember clean.sh first calls unstack.sh + cleanup_apmec + fi +fi + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: + diff --git a/devstack/settings b/devstack/settings new file mode 100644 index 0000000..f698a5e --- /dev/null +++ b/devstack/settings @@ -0,0 +1,51 @@ +APMEC_MODE=${APMEC_MODE:-all} +USE_BARBICAN=True + +if [ "${APMEC_MODE}" == "all" ]; then + # Nova + disable_service n-net + VIRT_DRIVER=libvirt + + #enable_service q-lbaas + #enable_service q-fwaas + + # APMEC_NEUTRON_AGENTS can be disabled by adding + # APMEC_NEUTRON_AGENTS='' in local.conf if neutron agents are running on a separate node + APMEC_NEUTRON_AGENTS=${APMEC_NEUTRON_AGENTS:-q-agt,q-dhcp,q-meta,q-l3} + for i in $(echo $APMEC_NEUTRON_AGENTS | sed 's/,/ /g') + do + enable_service $i + done + + enable_service q-svc + enable_service neutron + + enable_service apmec + # enable apmec-conductor will make systemctl enable conductor service + enable_service apmec-conductor + # apmec-horizon isn't installable from pip + LIBS_FROM_GIT=apmec-horizon + + MGMT_PHYS_NET=${MGMT_PHYS_NET:-mgmtphysnet0} + BR_MGMT=${BR_MGMT:-br-mgmt0} + NET_MGMT=${NET_MGMT:-net_mgmt} + SUBNET_MGMT=${SUBNET_MGMT:-subnet_mgmt} + FIXED_RANGE_MGMT=${FIXED_RANGE_MGMT:-192.168.120.0/24} + NETWORK_GATEWAY_MGMT=${NETWORK_GATEWAY_MGMT:-192.168.120.1} + NETWORK_GATEWAY_MGMT_IP=${NETWORK_GATEWAY_MGMT_IP:-192.168.120.1/24} + + NET0=${NET0:-net0} + SUBNET0=${SUBNET0:-subnet0} + FIXED_RANGE0=${FIXED_RANGE0:-10.10.0.0/24} + NETWORK_GATEWAY0=${NETWORK_GATEWAY0:-10.10.0.1} + + NET1=${NET1:-net1} + SUBNET1=${SUBNET1:-subnet1} + FIXED_RANGE1=${FIXED_RANGE1:-10.10.1.0/24} + NETWORK_GATEWAY1=${NETWORK_GATEWAY1:-10.10.1.1} +elif [ "${APMEC_MODE}" == "standalone" ]; then + # set the enabled services here. This will need apmec devstack plugin put as the last one in local.conf + ENABLED_SERVICES=key,horizon,apmec,apmec-conductor,mysql,dstat,barbican,mistral,mistral-api,mistral-engine,mistral-executor,mistral-event-engine +else + die $LINENO "invalid value: $APMEC_MODE for APMEC_MODE" +fi diff --git a/devstack/vim_config.yaml b/devstack/vim_config.yaml new file mode 100644 index 0000000..acdee07 --- /dev/null +++ b/devstack/vim_config.yaml @@ -0,0 +1,6 @@ +auth_url: 'http://localhost:5000' +username: 'mec_user' +password: 'devstack' +project_name: 'mec' +project_domain_name: 'Default' +user_domain_name: 'Default' diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000..b63e300 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,96 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXSOURCE = source +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +.DEFAULT_GOAL = html + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + if [ -f .autogenerated ] ; then \ + cat .autogenerated | xargs rm ; \ + rm .autogenerated ; \ + fi + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess new file mode 100644 index 0000000..d2bf97a --- /dev/null +++ b/doc/source/_extra/.htaccess @@ -0,0 +1,23 @@ +redirectmatch 301 ^/apmec/([^/]+)/devref/mead_template_description.html$ ^/apmec/$1/contributor/mead_template_description.html +redirectmatch 301 ^/apmec/([^/]+)/devref/api_extensions.html$ ^/apmec/$1/contributor/api/api_extensions.html +redirectmatch 301 ^/apmec/([^/]+)/devref/api_layer.html$ ^/apmec/$1/contributor/api/api_layer.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mano_api.html$ ^/apmec/$1/contributor/api/mano_api.html +redirectmatch 301 ^/apmec/([^/]+)/devref/dashboards.html$ ^/apmec/$1/contributor/dashboards.html +redirectmatch 301 ^/apmec/([^/]+)/devref/development.environment.html$ ^/apmec/$1/contributor/development.environment.html +redirectmatch 301 ^/apmec/([^/]+)/devref/encrypt_vim_auth_with_barbican.html$ ^/apmec/$1/contributor/encrypt_vim_auth_with_barbican.html +redirectmatch 301 ^/apmec/([^/]+)/devref/event_logging.html$ ^/apmec/$1/contributor/event_logging.html +redirectmatch 301 ^/apmec/([^/]+)/devref/monitor-api.html$ ^/apmec/$1/contributor/monitor-api.html +redirectmatch 301 ^/apmec/([^/]+)/devref/policy_actions_framework.html$ ^/apmec/$1/contributor/policy_actions_framework.html +redirectmatch 301 ^/apmec/([^/]+)/devref/apmec_conductor.html$ ^/apmec/$1/contributor/apmec_conductor.html +redirectmatch 301 ^/apmec/([^/]+)/devref/apmec_functional_test.html$ ^/apmec/$1/contributor/apmec_functional_test.html +redirectmatch 301 ^/apmec/([^/]+)/devref/apmec_vim_monitoring.html$ ^/apmec/$1/contributor/apmec_vim_monitoring.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mead_template_parameterization.html$ ^/apmec/$1/contributor/mead_template_parameterization.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mem_usage_guide.html$ ^/apmec/$1/user/mem_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/policies/dev-process.html$ ^/apmec/$1/contributor/dev-process.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mistral_workflows_usage_guide.html$ ^/apmec/$1/reference/mistral_workflows_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/alarm_monitoring_usage_guide.html$ ^/apmec/$1/user/alarm_monitoring_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/enhanced_placement_awareness_usage_guide.html$ ^/apmec/$1/user/enhanced_placement_awareness_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/multisite_vim_usage_guide.html$ ^/apmec/$1/user/multisite_vim_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mesd_usage_guide.html$ ^/apmec/$1/user/mesd_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/scale_usage_guide.html$ ^/apmec/$1/user/scale_usage_guide.html +redirectmatch 301 ^/apmec/([^/]+)/devref/mea_component_usage_guide.html$ ^/apmec/$1/user/mea_component_usage_guide.html diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000..d72ea9d --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,91 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + #'sphinx.ext.intersphinx', + 'stevedore.sphinxext', + 'openstackdocstheme', +] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'apmec' +copyright = u'2013, OpenStack Foundation' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['apmec.'] + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_static_path = ['static'] +html_theme = 'openstackdocs' + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Add any paths that contain "extra" files, such as .htaccess or +# robots.txt. +html_extra_path = ['_extra'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = '%Y-%m-%d %H:%M' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} + +# -- Options for openstackdocstheme ------------------------------------------- +repository_name = 'openstack/apmec' +bug_project = 'apmec' +bug_tag = '' diff --git a/doc/source/contributor/api/api_extensions.rst b/doc/source/contributor/api/api_extensions.rst new file mode 100644 index 0000000..664b581 --- /dev/null +++ b/doc/source/contributor/api/api_extensions.rst @@ -0,0 +1,18 @@ +============== +API Extensions +============== + +API extensions is the standard way of introducing new functionality +to the Apmec project, it allows plugins to determine if they wish +to support the functionality or not. + +Examples +======== + +The easiest way to demonstrate how an API extension is written, is +by studying an existing API extension and explaining the different layers. + +.. toctree:: + :maxdepth: 1 + + mano_api.rst diff --git a/doc/source/contributor/api/api_layer.rst b/doc/source/contributor/api/api_layer.rst new file mode 100644 index 0000000..af45173 --- /dev/null +++ b/doc/source/contributor/api/api_layer.rst @@ -0,0 +1,60 @@ +Apmec WSGI/HTTP API layer +=========================== + +This section will cover the internals of Apmec's HTTP API, and the classes +in Apmec that can be used to create Extensions to the Apmec API. + +Python web applications interface with webservers through the Python Web +Server Gateway Interface (WSGI) - defined in `PEP 333 `_ + +Startup +------- + +Apmecs's WSGI server is started from the `server module `_ +and the entry point `serve_wsgi` is called to build an instance of the +`ApmecApiService`_, which is then returned to the server module, +which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI +application and respond to requests from clients. + + +.. _ApmecApiService: http://git.openstack.org/cgit/openstack/apmec/tree/apmec/service.py + +.. _Eventlet: http://eventlet.net/ + +.. _GreenPool: http://eventlet.net/doc/modules/greenpool.html + +WSGI Application +---------------- + +During the building of the ApmecApiService, the `_run_wsgi` function +creates a WSGI application using the `load_paste_app` function inside +`config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app +using `Paste`_'s `deploy`_. + +The api-paste.ini file defines the WSGI applications and routes - using the +`Paste INI file format`_. + +The INI file directs paste to instantiate the `APIRouter`_ class of +Apmec, which contains several methods that map MEC resources (such as +mead, mea) to URLs, and the controller for each resource. + + +.. _config.py: http://git.openstack.org/cgit/openstack/apmec/tree/apmec/common/config.py + +.. _api-paste.ini: http://git.openstack.org/cgit/openstack/apmec/tree/etc/apmec/api-paste.ini + +.. _APIRouter: http://git.openstack.org/cgit/openstack/apmec/tree/apmec/api/v1/router.py + +.. _Paste: http://pythonpaste.org/ + +.. _Deploy: http://pythonpaste.org/deploy/ + +.. _Paste INI file format: http://pythonpaste.org/deploy/#applications + +Further reading +--------------- + +Apmec wsgi is based on neutron's extension. The following doc is still +relevant. + +`Yong Sheng Gong: Deep Dive into Neutron `_ diff --git a/doc/source/contributor/api/mano_api.rst b/doc/source/contributor/api/mano_api.rst new file mode 100644 index 0000000..bce547e --- /dev/null +++ b/doc/source/contributor/api/mano_api.rst @@ -0,0 +1,315 @@ +******************* +Apmec API Overview +******************* + +Apmec API provides REST API end-points based on `ETSI MEC MANO standards`_. +The two new resources introduced are 'mead' and 'mea' for +describing the 'mem' extension. The resources request and response formats are +described in below sections. + +.. _ETSI MEC MANO standards: http://www.etsi.org/deliver/etsi_gs/MEC-MAN/001_099/001/01.01.01_60/gs_mec-man001v010101p.pdf + +API versions +============ + +Lists information for Apmec API version. + +**GET /** + +List API versions - Lists information about Apmec API version. + +:: + + Response: + { + "versions": [ + { + "status": "CURRENT", + "id": "v1.0", + "links": [ + { + "href": "http://10.18.160.13:9896/v1.0", + "rel": "self" + } + ] + } + ] + } + +Meads +===== + +**GET /v1.0/meads** + +List meads - List meads stored in the MEA catalog. + +:: + + Response: + { + "meads": [ + { + "service_types": [ + { + "service_type": "mead", + "id": "378b774d-89f5-4634-9c65-9c49ed6f00ce" + } + ], + "description": "OpenWRT with services", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "mgmt_driver": "openwrt", + "infra_driver": "", + "attributes": { + "mead": "template_name: OpenWRT\r\ndescription: + template_description " + }, + "id": "247b045e-d64f-4ae0-a3b4-8441b9e5892c", + "name": "openwrt_services" + } + ] + } + +**GET /v1.0/meads/{mead_id}** + +Show mead - Show information for a specified mead id. + +:: + + Response: + { + "mead": { + "service_types": [ + { + "service_type": "mead", + "id": "378b774d-89f5-4634-9c65-9c49ed6f00ce" + } + ], + "description": "OpenWRT with services", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "mgmt_driver": "openwrt", + "infra_driver": "", + "attributes": { + "mead": "template_name: OpenWRT\r\ndescription: + template_description " + }, + "id": "247b045e-d64f-4ae0-a3b4-8441b9e5892c", + "name": "openwrt_services" + } + } + +**POST /v1.0/meads** + +Create mead - Create a mead entry based on the mead template. + +:: + + Request: + { + "auth": { + "tenantName": "admin", + "passwordCredentials": { + "username": "admin", + "password": "devstack" + } + }, + "mead": { + "service_types": [{"service_type": "mead"}], + "tenant_id": "bb6a3be1021a4746ab727a6c9296e797", + "description": "OpenWRT router", + "attributes": { + "mead": "description: OpenWRT with services\nmetadata: {template_name: OpenWRT}\ntopology_template:\n node_templates:\n CP1:\n properties: {anti_spoofing_protection: false, management: true, order: 0}\n requirements:\n - virtualLink: {node: VL1}\n - virtualBinding: {node: VDU1}\n type: tosca.nodes.mec.CP.Apmec\n CP2:\n properties: {anti_spoofing_protection: false, order: 1}\n requirements:\n - virtualLink: {node: VL2}\n - virtualBinding: {node: VDU1}\n type: tosca.nodes.mec.CP.Apmec\n CP3:\n properties: {anti_spoofing_protection: false, order: 2}\n requirements:\n - virtualLink: {node: VL3}\n - virtualBinding: {node: VDU1}\n type: tosca.nodes.mec.CP.Apmec\n VDU1:\n capabilities:\n mec_compute:\n properties: {disk_size: 1 GB, mem_size: 512 MB, num_cpus: 1}\n properties:\n config: 'param0: key1\n\n param1: key2\n\n '\n image: OpenWRT\n mgmt_driver: openwrt\n monitoring_policy:\n actions: {failure: respawn}\n name: ping\n parameters: {count: 3, interval: 10}\n type: tosca.nodes.mec.VDU.Apmec\n VL1:\n properties: {network_name: net_mgmt, vendor: Apmec}\n type: tosca.nodes.mec.VL\n VL2:\n properties: {network_name: net0, vendor: Apmec}\n type: tosca.nodes.mec.VL\n VL3:\n properties: {network_name: net1, vendor: Apmec}\n type: tosca.nodes.mec.VL\ntosca_definitions_version: tosca_simple_profile_for_mec_1_0_0\n" + }, + "name": "OpenWRT" + } + } + +:: + + Response: + { + "mead": { + "service_types": [ + { + "service_type": "mead", + "id": "336fe422-9fba-47c7-87fb-d48475c3e0ce" + } + ], + "description": "OpenWRT router", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "mgmt_driver": "noop", + "infra_driver": "", + "attributes": { + "mead": "template_name: OpenWRT \r\ndescription: + template_description " + }, + "id": "ab10a543-22ee-43af-a441-05a9d32a57da", + "name": "OpenWRT" + } + } + +**DELETE /v1.0/meads/{mead_id}** + +Delete mead - Deletes a specified mead_id from the MEA catalog. + +This operation does not accept a request body and does not return a response +body. + +Meas +==== + +**GET /v1.0/meas** + +List meas - Lists instantiated meas in MEA Manager. + +:: + + Response: + { + "meas": [ + { + "status": "ACTIVE", + "name": "open_wrt", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "instance_id": "f7c93726-fb8d-4036-8349-2e82f196e8f6", + "mgmt_url": "{\"vdu1\": \"192.168.120.3\"}", + "attributes": { + "service_type": "firewall", + "param_values": "", + "heat_template": "description: sample_template_description + type: OS::Nova::Server\n", + "monitoring_policy": "noop", + "failure_policy": "noop" + }, + "id": "c9b4f5a5-d304-473a-a57e-b665b1f9eb8f", + "description": "OpenWRT with services" + } + ] + } + +**GET /v1.0/meas/{mea_id}** + +Show mea - Show information for a specified mea_id. + +:: + + Response: + { + "mea": [ + { + "status": "ACTIVE", + "name": "open_wrt", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "instance_id": "f7c93726-fb8d-4036-8349-2e82f196e8f6", + "mgmt_url": "{\"vdu1\": \"192.168.120.3\"}", + "attributes": { + "service_type": "firewall", + "param_values": "", + "heat_template": "description: OpenWRT with services\n + sample_template_description type: OS::Nova::Server\n", + "monitoring_policy": "noop", "failure_policy": "noop" + }, + "id": "c9b4f5a5-d304-473a-a57e-b665b1f9eb8f", + "description": "OpenWRT with services" + } + ] + } + +**POST /v1.0/meas** + +Create mea - Create a mea based on the mead template id. + +:: + + Request: + { + "auth": { + "tenantName": "admin", + "passwordCredentials": { + "username": "admin", + "password": "devstack" + } + }, + "mea": { + "attributes": {}, + "vim_id": "", + "description": "demo-example", + "mead_id": "ad0c2c7c-825e-43c5-a402-b5710902b408", + "name": "demo-mea" + } + } + +:: + + Response: + { + "mea": { + "status": "PENDING_CREATE", + "description": "demo-example", + "tenant_id": "bb6a3be1021a4746ab727a6c9296e797", + "vim_id": "c91413b9-eaf9-47f7-86b6-3f3a3e29261e", + "name": "demo-mea", + "instance_id": "050f4d0e-ff7c-4a5d-9dba-dbe238b3348b", + "mgmt_url": null, + "placement_attr": { + "vim_name": "VIM0" + }, + "error_reason": null, + "attributes": { + "service_type": "firewall", + "heat_template": "description: OpenWRT with services\n + type: OS::Nova::Server\n", + "monitoring_policy": "noop", + "failure_policy": "noop" + }, + "id": "e3158513-92f4-4587-b949-70ad0bcbb2dd", + "mead_id": "247b045e-d64f-4ae0-a3b4-8441b9e5892c" + } + } + +**PUT /v1.0/meas/{mea_id}** + +Update mea - Update a mea based on user config file or data. + +:: + + Request: + { + "auth": { + "tenantName": "admin", + "passwordCredentials": { + "username": "admin", + "password": "devstack" + } + }, + "mea": { + "attributes": { + "config": "vdus:\n vdu1: \n\n" + } + } + } + +:: + + Response: + { + "mea": { + "status": "PENDING_UPDATE", + "name": "", + "tenant_id": "4dd6c1d7b6c94af980ca886495bcfed0", + "instance_id": "4f0d6222-afa0-4f02-8e19-69e7e4fd7edc", + "mgmt_url": "{\"vdu1\": \"192.168.120.4\"}", + "attributes": { + "service_type": "firewall", + "monitoring_policy": "noop", + "config": "vdus:\n vdu1:\n config: { + type: OS::Nova::Server\n", + "failure_policy": "noop" + }, + "id": "e3158513-92f4-4587-b949-70ad0bcbb2dd", + "description": "OpenWRT with services" + } + } + +**DELETE /v1.0/meas/{mea_id}** + +Delete mea - Deletes a specified mea_id from the MEA list. diff --git a/doc/source/contributor/dashboards.rst b/doc/source/contributor/dashboards.rst new file mode 100644 index 0000000..e203881 --- /dev/null +++ b/doc/source/contributor/dashboards.rst @@ -0,0 +1,9 @@ +Gerrit Dashboards +================= + +- `Apmec master branch reviews `_ + +These dashboard links can be generated by `Gerrit Dashboard Creator`_. +Useful dashboard definitions are found in ``dashboards`` directory. + +.. _Gerrit Dashboard Creator: https://github.com/openstack/gerrit-dash-creator diff --git a/doc/source/contributor/dev-process.rst b/doc/source/contributor/dev-process.rst new file mode 100644 index 0000000..6c82718 --- /dev/null +++ b/doc/source/contributor/dev-process.rst @@ -0,0 +1,63 @@ +Apmec Development Process +========================== + +Enhancement to Apmec functionality can be done using one of the following +two development process options. The choice depends on the complexity of the +enhancement. + +Request for Enhancement (RFE) Process +===================================== + +The developer, or an operator, can write up the requested enhancement in a +Apmec launchpad [#]_ bug. + +* The requester need to mark the bug with "RFE" tag. +* The bug will be in the initial "New" state. +* The requester and team will have a discussion on the enhancement in the + launchpad bug. +* Once the discussion is over a apmec-core team member will acknowledge the + validity of this feature enhancement by moving it to the "Confirmed" state. +* Developers submit patchsets to implement the enhancement using the bug-id. + Note, if there are multiple patchsets Partial-Bug header should be used + instead of Closes-Bug in the commit message. +* Once all the patchsets are merged the bug will be moved to the "Completed" + state. +* Developer(s) are expected to add a devref describing the usage of the feature + and other related topics in apmec/doc/source/contributor directory. + +This process is recommended for smaller enhancements that can be described +easily and it is relatively easy to implement in a short period of time. + +Blueprint and Apmec-Specs process +================================== + +The developer, or an operator, can write up the requested enhancement by +submitting a patchset to the apmec-spec repository [#]_. + +* The patchset should follow the template specified in [#]_ +* The requester should also create a corresponding blueprint for the + enhancement proposal in launchpad [#]_ +* The requester and the team will have a discussion on the apmec-spec + writeup using gerrit. +* The patchset will be merged into the apmecs-specs repository if the + apmec-core team decides this is a valid feature enhancement. A patchset + may also be rejected with clear reasoning. +* Apmec core team will also mark the blueprint Definition field to Approved. +* Developer submits one or more patchsets to implement the enhancement. The + commit message should use "Implements: blueprint " using + the same name as the blueprint name. +* Once all the patchsets are merged the blueprint will be as "Implemented" by + the apmec core team. +* The developer is expected to add a devref describing the usage of the feature + and other related topics in apmec/doc/source/contributor directory. + +This process is recommended for medium to large enhancements that needs +significant code-changes (LOC), community discussions and debates. + +References +========== + +.. [#] https://bugs.launchpad.net/apmec +.. [#] https://github.com/openstack/apmec-specs +.. [#] https://github.com/openstack/apmec-specs/blob/master/specs/template.rst +.. [#] https://blueprints.launchpad.net/apmec/ diff --git a/doc/source/contributor/development.environment.rst b/doc/source/contributor/development.environment.rst new file mode 100644 index 0000000..463f848 --- /dev/null +++ b/doc/source/contributor/development.environment.rst @@ -0,0 +1,48 @@ +.. + Copyright 2010-2015 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Setting Up a Development Environment +==================================== + +This page describes how to setup a working Python development +environment that can be used in developing Apmec on Ubuntu, Fedora or +Mac OS X. These instructions assume you're already familiar with +Git and Gerrit, which is a code repository mirror and code review toolset +, however if you aren't please see `this Git tutorial`_ for an introduction +to using Git and `this guide`_ for a tutorial on using Gerrit and Git for +code contribution to Openstack projects. + +.. _this Git tutorial: http://git-scm.com/book/en/Getting-Started +.. _this guide: https://docs.openstack.org/infra/manual/developers.html#development-workflow + +If you want to be able to run Apmec in a full OpenStack environment, +you can use the excellent `DevStack`_ project to do so. There is a wiki page +that describes `setting up Apmec using DevStack`_. + +.. _DevStack: https://git.openstack.org/cgit/openstack-dev/devstack +.. _setting up Apmec using Devstack: https://wiki.openstack.org/wiki/Apmec/Installation + +Getting the code +---------------- + +Grab the code:: + + git clone git://git.openstack.org/openstack/apmec.git + cd apmec + + +.. include:: ../../../TESTING.rst diff --git a/doc/source/contributor/encrypt_vim_auth_with_barbican.rst b/doc/source/contributor/encrypt_vim_auth_with_barbican.rst new file mode 100644 index 0000000..e5840d1 --- /dev/null +++ b/doc/source/contributor/encrypt_vim_auth_with_barbican.rst @@ -0,0 +1,147 @@ +Save VIM credentials into Barbican +================================== + +Overview +-------- + +This document shows how to operate vims which use barbican to save +vim key in devstack environment. + +The brief code workflow is described as following: + +When creating a vim: +We use fernet to encrypt vim password, save the fernet key into barbican +as a secret, save encrypted into vim db's field **password**, +and then save the secret uuid into vim db field **secret_uuid**. + +When retrieving vim password: +We use **secret_uuid** to get the fernet key from barbican, and decode with +**password** using fernet. + +When deleting a vim: +We delete the secret by the **secret_uuid** in vim db from barbican. + + +How to test +----------- + +We need enable barbican in devstack localrc file: + +.. code-block:: bash + + enable_plugin barbican https://git.openstack.org/openstack/barbican + enable_plugin apmec https://git.openstack.org/openstack/apmec + USE_BARBICAN=True + +.. note:: + + Please make sure the barbican plugin is enabled before apmec plugin. + We set USE_BARBICAN=True to use barbican . + +Create a vim and verify it works: + +.. code-block:: bash + + $ . openrc-admin.sh + $ openstack project create test + $ openstack user create --password a test + $ openstack role add --project test --user test admin + + $ cat vim-test.yaml + auth_url: 'http://127.0.0.1:5000' + username: 'test' + password: 'Passw0rd' + project_name: 'test' + project_domain_name: 'Default' + user_domain_name: 'Default' + + $ cat openrc-test.sh + export LC_ALL='en_US.UTF-8' + export OS_NO_CACHE='true' + export OS_USERNAME=test + export OS_PASSWORD=Passw0rd + export OS_PROJECT_NAME=test + export OS_USER_DOMAIN_NAME=Default + export OS_PROJECT_DOMAIN_NAME=Default + export OS_AUTH_URL=http://127.0.0.1:35357/v3 + export OS_IDENTITY_API_VERSION=3 + export OS_IMAGE_API_VERSION=2 + export OS_NETWORK_API_VERSION=2 + + $ source openrc-test.sh + $ openstack secret list + + $ apmec vim-register --config-file vim-test.yaml vim-test + Created a new vim: + +----------------+---------------------------------------------------------+ + | Field | Value | + +----------------+---------------------------------------------------------+ + | auth_cred | {"username": "test", "password": "***", "project_name": | + | | "test", "user_domain_name": "Default", "key_type": | + | | "barbican_key", "secret_uuid": "***", "auth_url": | + | | "http://127.0.0.1:5000/v3", "project_id": null, | + | | "project_domain_name": "Default"} | + | auth_url | http://127.0.0.1:5000/v3 | + | created_at | 2017-06-20 14:56:05.622612 | + | description | | + | id | 7c0b73c7-554b-46d3-a35c-c368019716a0 | + | is_default | False | + | name | vim-test | + | placement_attr | {"regions": ["RegionOne"]} | + | status | REACHABLE | + | tenant_id | 28a525feaf5e4d05b4ab9f7090837964 | + | type | openstack | + | updated_at | | + | vim_project | {"name": "test", "project_domain_name": "Default"} | + +----------------+---------------------------------------------------------+ + + $ openstack secret list + +-------------------------------------------+------+---------------------------+--------+-------------------------------------------+-----------+------------+-------------+------+------------+ + | Secret href | Name | Created | Status | Content types | Algorithm | Bit length | Secret type | Mode | Expiration | + +-------------------------------------------+------+---------------------------+--------+-------------------------------------------+-----------+------------+-------------+------+------------+ + | http://127.0.0.1:9311/v1/secrets/d379f561 | None | 2017-06-20T14:56:06+00:00 | ACTIVE | {u'default': u'application/octet-stream'} | None | None | opaque | None | None | + | -7073-40ea-822d-9d7bcb594e1a | | | | | | | | | | + +-------------------------------------------+------+---------------------------+--------+-------------------------------------------+-----------+------------+-------------+------+------------+ + +We can found that the **key_type** in auth_cred is **barbican_key**, +the **secret_uuid** exists with masked value, and the fernet key is +saved in barbican as a secret. + +Now we create a mea to verify it works: + +.. code-block:: bash + + $ apmec mea-create --mead-template mead-sample.yaml \ + --vim-name vim-test --vim-region-name RegionOne mea-test + Created a new mea: + +----------------+-------------------------------------------------------+ + | Field | Value | + +----------------+-------------------------------------------------------+ + | created_at | 2017-06-20 15:08:43.267694 | + | description | Demo example | + | error_reason | | + | id | 71d3eef7-6b53-4495-b210-78786cb28ba4 | + | instance_id | 08d0ce6f-69bc-4ff0-87b0-52686a01ce3e | + | mgmt_url | | + | name | mea-test | + | placement_attr | {"region_name": "RegionOne", "vim_name": "vim-test"} | + | status | PENDING_CREATE | + | tenant_id | 28a525feaf5e4d05b4ab9f7090837964 | + | updated_at | | + | vim_id | 0d1e1cc4-445d-41bd-b3e9-739acb987231 | + | mead_id | dc68ccfd-fd7c-4ef6-8fed-f097d036c722 | + +----------------+-------------------------------------------------------+ + + $ apmec mea-delete mea-test + +We can found that mea create successfully. + +Now we delete the vim to verify the secret can be deleted. + +.. code-block:: bash + + $ apmec vim-delete vim-test + All vim(s) deleted successfully + $ openstack secret list + +We can found that the secret is deleted from barbican. diff --git a/doc/source/contributor/event_logging.rst b/doc/source/contributor/event_logging.rst new file mode 100644 index 0000000..6361dcc --- /dev/null +++ b/doc/source/contributor/event_logging.rst @@ -0,0 +1,217 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +Apmec Resource Events Usage Guide +================================== + +Overview +-------- + +OpenStack Apmec supports capturing resource event information when the +apmec resources undergo create, update, delete, scale and monitor +operations. This information becomes useful to an admin for audit purposes. + +Apmec Resources supporting Events +---------------------------------- +As of Newton release, events information is captured for below: + +- MEA + +- MEAD + +- VIM + +Apmec supported event types +---------------------------- +Below are the event types that are currently supported: + +- CREATE + +- DELETE + +- MONITOR + +- SCALE + +- UPDATE + +The above can be used as filters when listing events using apmec client. + +Accessing Events +---------------- + +Apmec supports display of events to an end user via + +- Horizon UI - a separate events tab per resource displays associated events. + +- Apmec Client - supports below commands: + - event-show: Show detailed info for a given event ID. + - events-list: Lists all events for all resources. + - vim-events-list: List events that belong to a given VIM. + - mea-events-list: List events that belong to a given MEA. + - mead-events-list: List events that belong to a given MEAD. + +NOTE: For more details on the syntax of these CLIs, refer to +`Apmec CLI reference guide `_ + +Apmec Client command usage examples to access resource lifecycle events +------------------------------------------------------------------------ + +1. The following command displays all the state transitions that occurred on +a long running MEA. The sample output illustrates a MEA that has +successfully gone through a scale out operation. Note, the here +is MEA's uuid. + +.. code-block:: console + + apmec mea-events-list --resource_id + + +----+---------------+-------------------+-------------------+------------+-------------------+---------------------+ + | id | resource_type | resource_id | resource_state | event_type | timestamp | event_details | + +----+---------------+-------------------+-------------------+------------+-------------------+---------------------+ + | 13 | mea | 9dd7b2f1-e91e-418 | PENDING_CREATE | CREATE | 2016-09-21 | MEA UUID assigned. | + | | | 3-bcbe- | | | 20:12:37 | | + | | | 34b80bdb18fb | | | | | + | 14 | mea | 9dd7b2f1-e91e-418 | PENDING_CREATE | CREATE | 2016-09-21 | Infra Instance ID | + | | | 3-bcbe- | | | 20:13:09 | created: 3bd369e4-9 | + | | | 34b80bdb18fb | | | | ee3-4e58-86e3-8acbb | + | | | | | | | dccedb5 and Mgmt | + | | | | | | | URL set: {"VDU1": | + | | | | | | | ["10.0.0.9", | + | | | | | | | "10.0.0.2"], | + | | | | | | | "VDU2": | + | | | | | | | ["10.0.0.4", | + | | | | | | | "10.0.0.5"]} | + | 15 | mea | 9dd7b2f1-e91e-418 | ACTIVE | CREATE | 2016-09-21 | MEA status updated | + | | | 3-bcbe- | | | 20:13:09 | | + | | | 34b80bdb18fb | | | | | + | 16 | mea | 9dd7b2f1-e91e-418 | PENDING_SCALE_OUT | SCALE | 2016-09-21 | | + | | | 3-bcbe- | | | 20:23:58 | | + | | | 34b80bdb18fb | | | | | + | 17 | mea | 9dd7b2f1-e91e-418 | ACTIVE | SCALE | 2016-09-21 | | + | | | 3-bcbe- | | | 20:24:45 | | + | | | 34b80bdb18fb | | | | | + +----+---------------+-------------------+-------------------+------------+-------------------+---------------------+ + +2. The following command displays any reachability issues related to a VIM +site. The sample output illustrates a VIM that is reachable. Note, the + here is a VIM uuid. + +.. code-block:: console + + apmec vim-events-list --resource_id + + +----+---------------+---------------------+----------------+------------+---------------------+---------------+ + | id | resource_type | resource_id | resource_state | event_type | timestamp | event_details | + +----+---------------+---------------------+----------------+------------+---------------------+---------------+ + | 1 | vim | d8c11a53-876c-454a- | PENDING | CREATE | 2016-09-20 23:07:42 | | + | | | bad1-cb13ad057595 | | | | | + | 2 | vim | d8c11a53-876c-454a- | REACHABLE | MONITOR | 2016-09-20 23:07:42 | | + | | | bad1-cb13ad057595 | | | | | + +----+---------------+---------------------+----------------+------------+---------------------+---------------+ + + +Miscellaneous events command examples: +-------------------------------------- + +1. List all events for all resources from the beginning + +.. code-block:: console + + apmec events-list + + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + | id | resource_type | resource_id | resource_state | event_type | timestamp | event_details | + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + | 1 | vim | c89e5d9d-6d55-4 | PENDING | CREATE | 2016-09-10 | | + | | | db1-bd67-30982f | | | 20:32:46 | | + | | | 01133e | | | | | + | 2 | vim | c89e5d9d-6d55-4 | REACHABLE | MONITOR | 2016-09-10 | | + | | | db1-bd67-30982f | | | 20:32:46 | | + | | | 01133e | | | | | + | 3 | mead | afc0c662-5117-4 | Not Applicable | CREATE | 2016-09-14 | | + | | | 7a7-8088-02e9f8 | | | 05:17:30 | | + | | | a3532b | | | | | + | 4 | mea | 52adaae4-36b5 | PENDING_CREATE | CREATE | 2016-09-14 | MEA UUID | + | | | -41cf-acb5-32ab | | | 17:49:24 | assigned. | + | | | 8c109265 | | | | | + | 5 | mea | 52adaae4-36b5 | PENDING_CREATE | CREATE | 2016-09-14 | Infra Instance | + | | | -41cf-acb5-32ab | | | 17:49:51 | ID created: | + | | | 8c109265 | | | | 046dcb04-318d-4 | + | | | | | | | ec9-8a23-19d9c1 | + | | | | | | | f8c21d and Mgmt | + | | | | | | | URL set: | + | | | | | | | {"VDU1": "192.1 | + | | | | | | | 68.120.8"} | + | 6 | mea | 52adaae4-36b5 | ACTIVE | CREATE | 2016-09-14 | MEA status | + | | | -41cf-acb5-32ab | | | 17:49:51 | updated | + | | | 8c109265 | | | | | + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + +2. List all events for all resources given a certain event type + +.. code-block:: console + + apmec events-list --event_type CREATE + + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + | id | resource_type | resource_id | resource_state | event_type | timestamp | event_details | + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + | 1 | vim | c89e5d9d-6d55-4 | PENDING | CREATE | 2016-09-10 | | + | | | db1-bd67-30982f | | | 20:32:46 | | + | | | 01133e | | | | | + | 3 | mead | afc0c662-5117-4 | ACTIVE | CREATE | 2016-09-14 | | + | | | 7a7-8088-02e9f8 | | | 05:17:30 | | + | | | a3532b | | | | | + | 4 | mea | 52adaae4-36b5 | PENDING_CREATE | CREATE | 2016-09-14 | MEA UUID | + | | | -41cf-acb5-32ab | | | 17:49:24 | assigned. | + | | | 8c109265 | | | | | + | 5 | mea | 52adaae4-36b5 | PENDING_CREATE | CREATE | 2016-09-14 | Infra Instance | + | | | -41cf-acb5-32ab | | | 17:49:51 | ID created: | + | | | 8c109265 | | | | 046dcb04-318d-4 | + | | | | | | | ec9-8a23-19d9c1 | + | | | | | | | f8c21d and Mgmt | + | | | | | | | URL set: | + | | | | | | | {"VDU1": "192.1 | + | | | | | | | 68.120.8"} | + | 6 | mea | 52adaae4-36b5 | ACTIVE | CREATE | 2016-09-14 | MEA status | + | | | -41cf-acb5-32ab | | | 17:49:51 | updated | + | | | 8c109265 | | | | | + +----+---------------+-----------------+----------------+------------+-----------------+-----------------+ + + +3. List details for a specific event + +.. code-block:: console + + apmec event-show 5 + + +----------------+------------------------------------------------------------------------------------------+ + | Field | Value | + +----------------+------------------------------------------------------------------------------------------+ + | event_details | Infra Instance ID created: 046dcb04-318d-4ec9-8a23-19d9c1f8c21d and Mgmt URL set: | + | | {"VDU1": "192.168.120.8"} | + | event_type | CREATE | + | id | 5 | + | resource_id | 52adaae4-36b5-41cf-acb5-32ab8c109265 | + | resource_state | PENDING_CREATE | + | resource_type | mea | + | timestamp | 2016-09-14 17:49:51 | + +----------------+------------------------------------------------------------------------------------------+ + + +Note for Apmec developers +-------------------------- + +If as a developer, you are creating new resources and would like to capture +event information for resource operations such as create, update, delete, +scale and monitor, you would need to : + +- Import the module apmec.db.common_services.common_services_db to use the + create_event() method for logging events. + +- Make edits in the file apmec/plugins/common/constants.py if you would need + to create new event types. diff --git a/doc/source/contributor/monitor-api.rst b/doc/source/contributor/monitor-api.rst new file mode 100644 index 0000000..1a3da12 --- /dev/null +++ b/doc/source/contributor/monitor-api.rst @@ -0,0 +1,136 @@ +Apmec Monitoring Framework +============================ + +This section will introduce apmec monitoring framework and describes the +various actions that a user can take when a specific event occurs. + +* Introduction +* How to write a new monitor driver +* Events +* Actions +* How to write TOSCA template to monitor MEA entities + +Introduction +------------- + +Apmec monitoring framework provides the MEM operators and MEA vendors to +write a pluggable driver that monitors the various status conditions of the +MEA entities it deploys and manages. + +How to write a new monitor driver +---------------------------------- + +A monitor driver for apmec is a python module which contains a class that +inherits from +"apmec.mem.monitor_drivers.abstract_driver.MEAMonitorAbstractDriver". If the +driver depends/imports more than one module, then create a new python package +under apmec/mem/monitor_drivers folder. After this we have to mention our +driver path in setup.cfg file in root directory. + +For example: +:: + + apmec.apmec.monitor_drivers = + ping = apmec.mem.monitor_drivers.ping.ping:MEAMonitorPing + +Following methods need to be overridden in the new driver: + +``def get_type(self)`` + This method must return the type of driver. ex: ping + +``def get_name(self)`` + This method must return the symbolic name of the mea monitor plugin. + +``def get_description(self)`` + This method must return the description for the monitor driver. + +``def monitor_get_config(self, plugin, context, mea)`` + This method must return dictionary of configuration data for the monitor + driver. + +``def monitor_url(self, plugin, context, mea)`` + This method must return the url of mea to monitor. + +``def monitor_call(self, mea, kwargs)`` + This method must either return boolean value 'True', if MEA is healthy. + Otherwise it should return an event string like 'failure' or + 'calls-capacity-reached' based on specific MEA health condition. More + details on these event is given in below section. + +Custom events +-------------- +As mentioned in above section, if the return value of monitor_call method is +other than boolean value 'True', then we have to map those event to the +corresponding action as described below. + +For example: + +:: + + vdu1: + monitoring_policy: + ping: + actions: + failure: respawn + +In this example, we have an event called 'failure'. So whenever monitor_call +returns 'failure' apmec will respawn the MEA. + + +Actions +-------- +The available actions that a monitor driver can call when a particular event +occurs. + +#. respawn +#. log + +How to write TOSCA template to monitor MEA entities +---------------------------------------------------- + +In the vdus section, under vdu you can specify the monitors details with +corresponding actions and parameters.The syntax for writing monitor policy +is as follows: + +:: + + vduN: + monitoring_policy: + : + monitoring_params: + : + ... + actions: + : + ... + ... + + +Example Template +---------------- + +:: + + vdu1: + monitoring_policy: + ping: + actions: + failure: respawn + + vdu2: + monitoring_policy: + http-ping: + monitoring_params: + port: 8080 + url: ping.cgi + actions: + failure: respawn + + acme_scaling_driver: + monitoring_params: + resource: cpu + threshold: 10000 + actions: + max_foo_reached: scale_up + min_foo_reached: scale_down + diff --git a/doc/source/contributor/policy_actions_framework.rst b/doc/source/contributor/policy_actions_framework.rst new file mode 100644 index 0000000..c1befef --- /dev/null +++ b/doc/source/contributor/policy_actions_framework.rst @@ -0,0 +1,91 @@ +Apmec Policy Framework +======================= + +This section will introduce framework for apmec policy actions. + +* Introduction +* How to write a new policy action +* Event and Auditing support +* How to combine policy actions with existing monitoring frameworks in Apmec + +Introduction +------------ + +Apmec policy actions framework provides the MEC operators and MEA vendors to +write a pluggable action that manages their own MEAs. Currently Apmec +already provided some common actions like autoscaling, respawning, and +logging. With this framework the custom actions can be easily +applied for the management purpose. + +How to write a new policy action +-------------------------------- + +A policy action for apmec is a python module which contains a class that +inherits from +"apmec.mem.policy_actions.abstract_action.AbstractPolicyAction". If the +driver depends/imports more than one module, then create a new python package +under apmec/mem/policy_actions folder. After this we have to mention our +driver path in setup.cfg file in root directory. + +For example: +:: + + apmec.apmec.policy.actions = + respawn = apmec.mem.policy_actions.respawn.respawn:MEAActionRespawn + +Following methods need to be overridden in the new action: + +``def get_type(self)`` + This method must return the type of action. ex: respawn + +``def get_name(self)`` + This method must return the symbolic name of the mea policy action. + +``def get_description(self)`` + This method must return the description for the policy action. + +``def execute_action(self, plugin, context, mea, arguments)`` + This method must expose what will be executed with the policy action. + 'arguments' is used to add more options for policy actions. For example, + if action is scaling, 'arguments' should let you know + 'scaling-out' or 'scaling-in' will be applied. + +Event and Auditing support +-------------------------- + +This function can be used to describe the execution process of policy. +For example: +:: + + _log_monitor_events(context, mea_dict, "ActionRespawnHeat invoked") + + +How to combine policy with existing monitoring framework in Apmec +------------------------------------------------------------------ + +In the monitoring policy section, you can specify the monitors details with +corresponding action. + +The below example shows how policy is used for alarm monitor. +Example Template +---------------- + +:: + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + resize_compute: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + metrics: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 65 + evaluations: 1 + method: avg + comparison_operator: gt + actions: [respawn] diff --git a/doc/source/contributor/tacker_conductor.rst b/doc/source/contributor/tacker_conductor.rst new file mode 100644 index 0000000..a22ca6b --- /dev/null +++ b/doc/source/contributor/tacker_conductor.rst @@ -0,0 +1,70 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================ +Apmec conductor +================ + +Apmec conductor is a component which is used to communicate with other +components via message RPC. In the conductor, the RPC server sides can +access the apmec base on behalf of them. + + +To start +============== + +Apmec conductor can be started via python console entry script +'apmec-conductor': + +.. code-block:: console + + apmec-conductor --config-file /etc/apmec/apmec.conf + +.. + +we can easily start many apmec-conductor instances with different 'host' value +in the configuration file: + +.. code-block:: console + + test@ubuntu64:~/devstack$ grep 'host = secondinstance' /etc/apmec/apmec2.conf + host = secondinstance + +.. + +and then start the second instance: + +.. code-block:: console + + apmec-conductor --config-file /etc/apmec/apmec2.conf + +.. + +Rabbitmq queues +=============== + +Apmec conductor is listening on three queues: + +.. code-block:: console + + test@ubuntu64:~/apmec$ sudo rabbitmqctl list_queues | grep CONDUCTOR + APMEC_CONDUCTOR 0 + APMEC_CONDUCTOR.ubuntu64 0 + APMEC_CONDUCTOR_fanout_0ea005c0b666488485a7b3689eb70168 0 + +.. + +But only APMEC_CONDUCTOR queue without host suffix is used. diff --git a/doc/source/contributor/tacker_functional_test.rst b/doc/source/contributor/tacker_functional_test.rst new file mode 100644 index 0000000..3e3e25e --- /dev/null +++ b/doc/source/contributor/tacker_functional_test.rst @@ -0,0 +1,126 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + + +=============================== +Functional testcases for apmec +=============================== + +Purpose of functional testcases is to verify various functionality of apmec +features. From apmec home directory, testcases are located at +apmec/tests/functional. + +Writing a testcase:A testcase is written by declaring a class name derived from +class base.BaseApmecTest. BaseApmecTest is class declared in +apmec/tests/functional/mead/base.py. + +A testcase body typically looks as below: + + +.. code-block:: python + + class meaClassName(base.BaseApmecTest): + + def test_create_delete(self): + + //Testcase operations + + //validations or asserts + + //cleanup + + +In above example test class 'meaClassName' is derived from +base.BaseApmecTest. Testcases typically has sections to setup, test, validate +results and finally cleanup. + +Input yaml files: These are input files used in testcases for operations like +create mead or create mea. The location of files is apmec/tests/etc/samples/. + +requirements.txt and test-requirements.txt : The file requirements.txt and +test-requirements.txt lists all the packages needed for functional test. +These packages are installed during devstack installation. If there are any +new packages needed for functional test make sure they are added in +test-requirements.txt. + +Asserting values in testcase: The base class BaseApmecTest +inherits base.TestCase which has inbuild assert functions which can be used in +testcase. +Eg: assertIsNotNone, assertEqual + +Apmec-client: In base.py we instantiate apmecclient object which has apis to +create/delete/list mead/mea once given the necessary parameters. +Verify apmecclient/v1_0/client.py for all the apmec related apis supported. + + + +Important guidelines to follow: +=============================== + +* Install test-requirements.txt with below command: + +.. code-block:: console + + pip install -r test-requirements.txt + +* It is important that the test case executed leaves the + system in the same state it was prior to test case execution + and not leave any stale data on system as this might affect + other test cases. +* There should not be any dependencies between testcases + which assume one testcase should be executed and be passed + for second testcase. +* Testcases in tox environment may be executed in parallel. + The order in which the testcases are executed may vary + between two environments. +* The code added should meet pep8 standards. This can be verified with + following command and ensuring the code does not return any errors. + +.. code-block:: console + + tox -e pep8 + + + +Execution of testcase: +====================== + +* Install apmec server via devstack installation, which registers + apmec service and endpoint, creates "mec_user" and "mec" project, + and registers default VIM with the created user and project. + +* Under apmec project dir, to prepare function test env via: + +.. code-block:: console + + ./tools/prepare_functional_test.sh + +* From apmec directory, all function testcases can be executed using + following commands: + +.. code-block:: console + + tox -e functional + +* Or from apmec directory, specific testcases can be executed using + following commands: + +.. code-block:: console + + tox -e functional apmec.tests.functional.xxx.yyy. + + +Committing testcase and opening a review: +========================================= + +* Once testcase is added in local setup, commit the testcase and open for + review using below guidelines: + https://docs.openstack.org/infra/manual/developers.html + +Sample testcase: +================ +* Check sample tests under following directory: + https://github.com/openstack/apmec/blob/master/apmec/tests/functional/ diff --git a/doc/source/contributor/tacker_vim_monitoring.rst b/doc/source/contributor/tacker_vim_monitoring.rst new file mode 100644 index 0000000..b5503ff --- /dev/null +++ b/doc/source/contributor/tacker_vim_monitoring.rst @@ -0,0 +1,124 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============================== +Mistral workflow VIM monitoring +=============================== + +For the purpose to make apmec server scale, the mistral workflow is used to +re-implement the VIM monitoring feature. + +The main monitoring process is like this: + +- user registers a VIM +- apmec server saves it into database +- apmec server generates a mistral workflow and executes it +- the VIM monitor mistral action is executed and do the monitoring, if there + is status change, it will RPC call conductor +- the conductor changes the VIM status + + +Feature exploration +=================== + +Firstly register a VIM: + +.. code-block:: console + + $ apmec vim-register --config-file ~/testvim_config.yaml testvim2 -c id -c name -c status + Created a new vim: + +--------+--------------------------------------+ + | Field | Value | + +--------+--------------------------------------+ + | id | 4406cf8f-f2af-46cc-bfb9-e00add5805b7 | + | name | testvim2 | + | status | PENDING | + +--------+--------------------------------------+ + +.. + +The registered VIM's id is '4406cf8f-f2af-46cc-bfb9-e00add5805b7', after this, +there is a mistral workflow named as +'vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7', is generated in mistral: + +.. code-block:: console + + $ mistral workflow-list --filter name=vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 -c ID -c Name + +--------------------------------------+---------------------------------------------+ + | ID | Name | + +--------------------------------------+---------------------------------------------+ + | 0cd0deff-6132-4ee2-a181-1c877cd594cc | vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 | + +--------------------------------------+---------------------------------------------+ + +.. + +and it is executed: + +.. code-block:: console + + $ mistral execution-list --filter workflow_name=vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 -c ID -c 'Workflow name' -c State + +--------------------------------------+---------------------------------------------+---------+ + | ID | Workflow name | State | + +--------------------------------------+---------------------------------------------+---------+ + | 99ced0e2-be09-4219-ab94-299df8ee8789 | vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 | RUNNING | + +--------------------------------------+---------------------------------------------+---------+ + +.. + +The monitoring task is running too: + +.. code-block:: console + + $ mistral task-list --filter workflow_name=vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 -c ID -c 'Workflow name' -c Name -c State + +--------------------------------------+-----------------------------+---------------------------------------------+---------+ + | ID | Name | Workflow name | State | + +--------------------------------------+-----------------------------+---------------------------------------------+---------+ + | f2fe2904-6ff2-4531-9bd0-4c998ef1515f | monitor_ping_vimPingVIMTASK | vim_id_4406cf8f-f2af-46cc-bfb9-e00add5805b7 | RUNNING | + +--------------------------------------+-----------------------------+---------------------------------------------+---------+ + +.. + +Of course, the VIM's state is in 'REACHABLE' status: + +.. code-block:: console + + $ apmec vim-list --name testvim2 -c id -c name -c status + +--------------------------------------+----------+-----------+ + | id | name | status | + +--------------------------------------+----------+-----------+ + | 4406cf8f-f2af-46cc-bfb9-e00add5805b7 | testvim2 | REACHABLE | + +--------------------------------------+----------+-----------+ + +.. + +The deletion of VIM will lead to removal of all of these mistral resources. + + +Rabbitmq queues +=============== + +Each mistral VIM monitoring action is listening on three queues: + +.. code-block:: console + + ~/apmec$ sudo rabbitmqctl list_queues | grep -i KILL_ACTION + KILL_ACTION 0 + KILL_ACTION.4406cf8f-f2af-46cc-bfb9-e00add5805b7 0 + KILL_ACTION_fanout_a8118e2e18b9443986a1b37f7b082ab9 0 + +.. + +But only KILL_ACTION with VIM id as suffix is used. diff --git a/doc/source/contributor/vnfd_template_description.rst b/doc/source/contributor/vnfd_template_description.rst new file mode 100644 index 0000000..0f60e03 --- /dev/null +++ b/doc/source/contributor/vnfd_template_description.rst @@ -0,0 +1,632 @@ +MEA Descriptor Template Guide +============================= +Overview +-------- + +This document explains MEAD template structure and its various fields based +on TOSCA standards `V1.0 CSD 03 `_. + +The behavioural and deployment information of a MEA in Apmec is defined in a +template known as MEA Descriptor (MEAD). The template is based on TOSCA +standards and is written in YAML. It is on-boarded in a MEA catalog. + +Each MEAD template will have below fields: + +:: + + tosca_definitions_version: + This defines the TOSCA definition version on which the template is based. + The current version being tosca_simple_profile_for_mec_1_0_0. + + tosca_default_namespace: + This is optional. It mentions default namespace which includes schema, + types version etc. + + description: + A short description about the template. + + metadata: + template_name: A name to be given to the template. + + topology_template: + Describes the topology of the MEA under node_template field. + node_template: + Describes node types of a MEA. + VDU: + Describes properties and capabilities of Virtual Deployment + Unit. + CP: + Describes properties and capabilities of Connection Point. + VL: + Describes properties and capabilities of Virtual Link. + +For examples, please refer sample MEAD templates available at `GitHub `_. + +Node types +---------- +A MEA includes **VDU/s**, **connection point/s** and **virtual link/s**. Hence +a valid MEAD must have these 3 components. Each component is referred as a +node and can have certain type, capabilities, properties, attributes and +requirements. These components are described under **node_templates** in the +MEAD template. **node_templates** is a child of **topology_template**. + +VDU +--- +Virtual Deployment Unit is a basic part of MEA. It is the VM that hosts the +network function. + +:type: + tosca.nodes.mec.VDU.Apmec +:properties: + Describes the properties like image to be used in VDU, availability zone in + which VDU is to be spawned, management driver to be used to manage the VDU, + flavor describing physical properties for the VDU to be spawned, monitoring + policies for the VDU, providing user data in form of custom commands to the + VDU. A complete list of VDU properties currently supported by Apmec are + listed `here `_ under **properties** section of + **tosca.nodes.mec.VDU.Apmec** field + +Specifying VDU properties +^^^^^^^^^^^^^^^^^^^^^^^^^ +A very simple VDU with 10 GB disk, 2 GB RAM, 2 CPUs, cirros image and in nova +availability zone can be described as: + +:: + + topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + +Using Nova flavors for VDU +^^^^^^^^^^^^^^^^^^^^^^^^^^ +OpenStack specific **flavors** can also be used to describe VDU configuration. + +:: + + topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + +However, when both **mec_compute properties** and **flavor** are mentioned in +a MEAD, **flavor** setting will take precedence. + +Monitoring the VDU +"""""""""""""""""" +A VDU can be monitored by pinging it on port 22 for 3 times at an interval of +2 seconds every 20 seconds. Number of retries be 6 and timeout of 2 seconds. +It can be re-spawned in case ping fails. This is described under +**monitoring_policy**. + +:: + + .. + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + monitoring_policy: + name: ping + parameters: + monitoring_delay: 20 + count: 3 + interval: 2 + timeout: 2 + actions: + failure: respawn + retry: 6 + port: 22 + +Providing user data +""""""""""""""""""" +Custom commands to be run on VDU once it is spawned can be specified in a MEAD +template as user data. + +:: + + .. + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + user_data_format: RAW + user_data: | + #!/bin/sh + echo "Adding this line to demofile" > /tmp/demofile + +Configuring a VDU +""""""""""""""""" +A VDU can be configured as a specific Network Function under **config** +section in MEAD template. A sample template configuring a VDU as a firewall +can be viewed in a `sample file `_. + +Specifying external image +""""""""""""""""""""""""" +:artifacts: + To specify an image via a file or an external link + +An image URL can be specified as **artifacts**. Apmec will specify the image +location in HOT (Heat Template) and pass it to heat-api. Heat will then spawn +the VDU with that image. + +:: + + .. + VDU1: + type: tosca.nodes.mec.VDU.Apmec + artifacts: + MEAImage: + type: tosca.artifacts.Deployment.Image.VM + file: http://download.cirros-cloud.net/0.3.5/ \ + cirros-0.3.5-x86_64-disk.img + +VDU Capabilities +^^^^^^^^^^^^^^^^ +Computational properties of a VDU are described as its capabilities. Allocated +RAM size, allocated disk size, memory page size, number of CPUs, number of +cores per CPU, number of threads per core can be specified. + +A VDU with 10 GB disk, 2 GB RAM, 2 CPUs, 4 KB of memory page and dedicated CPU +can be specified as below. Thread and core counts can be specified as shown. + +:: + + .. + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + mem_page_size: small + cpu_allocation: + cpu_affinity: dedicated + thread_count: 4 + core_count: 2 + +:capabilities: + ++---------------------+---------------+-----------+--------------------------+ +|Name |Type |Constraints|Description | ++---------------------+---------------+-----------+--------------------------+ +|mec_compute |Compute. |None |Describes the configurat | +| |Container. | |ion of the VM on which | +| |Architecture | |the VDU resides | ++---------------------+---------------+-----------+--------------------------+ + +Compute Container Architecture +"""""""""""""""""""""""""""""" +:type: + tosca.capabilities.Compute.Container.Architecture + +:properties: + ++---------------+--------+--------+---------------+--------------------------+ +|Name |Required|Type |Constraints |Description | ++---------------+--------+--------+---------------+--------------------------+ +|mem_page_size |No |String |One of below |Indicates page size of the| +| | | | |VM | +| | | | | | +| (in MB) | | |- small |- small maps to 4 KB | +| | | |- large |- large maps to 2 MB | +| | | |- any (default)|- any maps to system's | +| | | | | default | +| | | |- custom |- custom sets the size to | +| | | | | specified value | ++---------------+--------+--------+---------------+--------------------------+ +|cpu_allocation |No |CPUAllo-| |CPU allocation requirement| +| | |cation | |like dedicated CPUs, | +| | | | |socket/thread count | ++---------------+--------+--------+---------------+--------------------------+ +|numa_node_count|No |Integer | |Symmetric count of NUMA | +| | | | |nodes to expose to VM. | +| | | | |vCPU and Memory is split | +| | | | |equally across this | +| | | | |number of NUMA | ++---------------+--------+--------+---------------+--------------------------+ +|numa_nodes |No |Map of |Symmetric |Asymmetric allocation of | +| | |NUMA |numa_node_count|vCPU and memory across | +| | | |should not be |the specified NUMA nodes | +| | | |specified | | ++---------------+--------+--------+---------------+--------------------------+ + +CPUAllocation +""""""""""""" +This describes the granular CPU allocation requirements for VDUs. + +:type: + tosca.datatypes.compute.Container.Architecture.CPUAllocation + +:properties: + ++-----------------+-------+------------+-------------------------------------+ +|Name |Type |Constraints |Description | ++-----------------+-------+------------+-------------------------------------+ +|cpu_affinity |String |One of |Describes whether vCPU need to be | +| | | |pinned to dedicated CPU core or | +| | |- shared |shared dynamically | +| | |- dedicated | | ++-----------------+-------+------------+-------------------------------------+ +|thread_allocation|String |One of |Describes thread allocation | +| | | |requirement | +| | |- avoid | | +| | |- separate | | +| | |- isolate | | +| | |- prefer | | ++-----------------+-------+------------+-------------------------------------+ +|socket_count |Integer| None |Number of CPU sockets | ++-----------------+-------+------------+-------------------------------------+ +|core_count |Integer| None |Number of cores per socket | ++-----------------+-------+------------+-------------------------------------+ +|thread_count |Integer| None |Number of threads per core | ++-----------------+-------+------------+-------------------------------------+ + +NUMA architecture +""""""""""""""""" +Following code snippet describes symmetric NUMA topology requirements for VDUs. + +:: + + .. + VDU1: + capabilities: + mec_compute: + properties: + numa_node_count: 2 + numa_nodes: 3 + +For asymmetric NUMA architecture: + +:: + + .. + VDU1: + capabilities: + mec_compute: + properties: + mem_size: 4096 MB + num_cpus: 4 + numa_nodes: + node0: + id: 0 + vcpus: [0,1] + mem_size: 1024 MB + node1: + id: 1 + vcpus: [2,3] + mem_size: 3072 MB + +:type: + tosca.datatypes.compute.Container.Architecture.NUMA + +:properties: + ++--------+---------+-----------+-------------------------------------------+ +|Name |Type |Constraints|Description | ++--------+---------+-----------+-------------------------------------------+ +|id |Integer | >= 0 |CPU socket identifier | ++--------+---------+-----------+-------------------------------------------+ +|vcpus |Map of |None |List of specific host cpu numbers within a | +| |integers | |NUMA socket complex | ++--------+---------+-----------+-------------------------------------------+ +|mem_size|scalar- | >= 0MB |Size of memory allocated from this NUMA | +| |unit.size| |memory bank | ++--------+---------+-----------+-------------------------------------------+ + +Connection Points +----------------- +Connection point is used to connect the internal virtual link or outside +virtual link. It may be a virtual NIC or a SR-IOV NIC. Each connection +point has to bind to a VDU. A CP always requires a virtual link and a +virtual binding associated with it. + +A code snippet for virtual NIC (Connection Point) without anti-spoof +protection and are accessible by the user. CP1 and CP2 are connected to +VDU1 in this order. Also CP1/CP2 are connected to VL1/VL2 respectively. + +:: + + .. + topology_template: + node_templates: + VDU1: + .. + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + mac_address: fa:40:08:a0:de:0a + ip_address: 10.10.1.12 + type: vnic + anti_spoofing_protection: false + management: true + order: 0 + security_groups: + - secgroup1 + - secgroup2 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + type: vnic + anti_spoofing_protection: false + management: true + order: 1 + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + VL1: + .. + VL2: + .. + +:type: + tosca.nodes.mec.CP.Apmec + +:properties: + ++-------------------------+--------+-------+-----------+----------------------+ +| Name |Required|Type |Constraints| Description | ++-------------------------+--------+-------+-----------+----------------------+ +| type | No |String |One of | Specifies the type | +| | | | | of CP | +| | | |- vnic | | +| | | | (default)| | +| | | |- sriov | | ++-------------------------+--------+-------+-----------+----------------------+ +| anti_spoofing_protection| No |Boolean| None | Indicates whether | +| | | | | anti_spoof rule is | +| | | | | enabled for the MEA | +| | | | | or not. Applicable | +| | | | | only when CP type is | +| | | | | virtual NIC | ++-------------------------+--------+-------+-----------+----------------------+ +| management | No |Boolean| None | Specifies whether the| +| | | | | CP is accessible by | +| | | | | the user or not | ++-------------------------+--------+-------+-----------+----------------------+ +| order | No |Integer| >= 0 | Uniquely numbered | +| | | | | order of CP within a | +| | | | | VDU. Must be provided| +| | | | | when binding more | +| | | | | than one CP to a VDU | +| | | | | and ordering is | +| | | | | required. | ++-------------------------+--------+-------+-----------+----------------------+ +| security_groups | No |List | None | List of security | +| | | | | groups to be | +| | | | | associated with | +| | | | | the CP | ++-------------------------+--------+-------+-----------+----------------------+ +| mac_address | No |String | None | The MAC address | ++-------------------------+--------+-------+-----------+----------------------+ +| ip _address | No |String | None | The IP address | ++-------------------------+--------+-------+-----------+----------------------+ + +:requirements: + ++---------------+--------------------+-------------------+-------------------+ +|Name |Capability |Relationship |Description | ++---------------+--------------------+-------------------+-------------------+ +|virtualLink |mec.VirtualLinkable |mec.VirtualLinksTo |States the VL node | +| | | |to connect to | ++---------------+--------------------+-------------------+-------------------+ +|virtualbinding |mec.VirtualBindable |mec.VirtualBindsTo |States the VDU | +| | | |node to connect to | ++---------------+--------------------+-------------------+-------------------+ + +Virtual Links +------------- +Virtual link provides connectivity between VDUs. It represents the logical +virtual link entity. + +An example of a virtual link whose vendor is Acme and is attached to network +net-01 is as shown below. + +:: + + .. + topology_template: + node_templates: + VDU1: + .. + CP1: + .. + VL1: + type: tosca.nodes.mec.VL + properties: + vendor: Acme + network_name: net-01 + +:type: + tosca.nodes.mec.VL + +:properties: + ++------------+----------+--------+-------------+-----------------------------+ +|Name | Required | Type | Constraints | Description | ++------------+----------+--------+-------------+-----------------------------+ +|vendor | Yes | String | None | Vendor generating this VL | ++------------+----------+--------+-------------+-----------------------------+ +|network_name| Yes | String | None | Name of the network to which| +| | | | | VL is to be attached | ++------------+----------+--------+-------------+-----------------------------+ + +Floating IP +----------- +Floating IP is used to access VDU from public network. + +An example of assign floating ip to VDU + +:: + + .. + topology_template: + node_templates: + VDU1: + .. + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VL1: + .. + FIP1: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: public + requirements: + - link: + node: CP1 + +:type: + tosca.nodes.network.FloatingIP + +:properties: + ++-------------------+----------+--------+-------------+-----------------------+ +|Name | Required | Type | Constraints | Description | ++-------------------+----------+--------+-------------+-----------------------+ +|floating_network | Yes | String | None | Name of public network| ++-------------------+----------+--------+-------------+-----------------------+ +|floating_ip_address| No | String | None | Floating IP Address | +| | | | | from public network | ++------------+------+----------+--------+-------------+-----------------------+ + +:requirements: + ++------+-------------------+--------------------+-------------------+ +|Name |Capability |Relationship |Description | ++------+-------------------+--------------------+-------------------+ +|link |tosca.capabilities |tosca.relationships |States the CP node | +| |.network.Linkable |.network.LinksTo |to connect to | ++------+-------------------+--------------------+-------------------+ + +Multiple nodes +-------------- +Multiple node types can be defined in a MEAD. + +:: + + .. + topology_template: + node_templates: + VDU1: + .. + VDU2: + .. + CP1: + .. + CP2: + .. + VL1: + .. + VL2: + .. + +Summary +------- +To summarize MEAD is written in YAML and describes a MEA topology. It has +three node types, each with different capabilities and requirements. Below is +a template which mentions all node types with all available options. + +:: + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + description: Sample MEAD template mentioning possible values for each node. + metadata: + template_name: sample-tosca-mead-template-guide + topology_template: + node_templates: + VDU: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + mem_page_size: [small, large, any, custom] + cpu_allocation: + cpu_affinity: [shared, dedicated] + thread_allocation: [avoid, separate, isolate, prefer] + socket_count: any integer + core_count: any integer + thread_count: any integer + numa_node_count: any integer + numa_nodes: + node0: [ id: >=0, vcpus: [host CPU numbers], mem_size: >= 0MB] + properties: + image: Image to be used in VM + flavor: Nova supported flavors + availability_zone: available availability zone + mem_size: in MB + disk_size: in GB + num_cpus: any integer + metadata: + entry_schema: + config_drive: [true, false] + monitoring_policy: + name: [ping, noop, http-ping] + parameters: + monitoring_delay: delay time + count: any integer + interval: time to wait between monitoring + timeout: monitoring timeout time + actions: + [failure: respawn, failure: terminate, failure: log] + retry: Number of retries + port: specific port number if any + config: Configuring the VDU as per the network function requirements + mgmt_driver: [default=noop] + service_type: type of network service to be done by VDU + user_data: custom commands to be executed on VDU + user_data_format: format of the commands + key_name: user key + artifacts: + MEAImage: + type: tosca.artifacts.Deployment.Image.VM + file: file to be used for image + CP: + type: tosca.nodes.mec.CP.Apmec + properties: + management: [true, false] + anti_spoofing_protection: [true, false] + type: [ sriov, vnic ] + order: order of CP within a VDU + security_groups: list of security groups + requirements: + - virtualLink: + node: VL to link to + - virtualBinding: + node: VDU to bind to + VL: + type: tosca.nodes.mec.VL + properties: + network_name: name of network to attach to + vendor: Acme diff --git a/doc/source/contributor/vnfd_template_parameterization.rst b/doc/source/contributor/vnfd_template_parameterization.rst new file mode 100644 index 0000000..e6a3ec3 --- /dev/null +++ b/doc/source/contributor/vnfd_template_parameterization.rst @@ -0,0 +1,277 @@ +MEAD Template Parameterization +============================== + +Overview +-------- + +Parameterization allows for the ability to use a single MEAD to be deployed +multiple times with different values for the VDU parameters provided at +deploy time. In contrast, a non-parameterized MEAD has static values +for the parameters that might limit the number of concurrent MEAs that can be +deployed using a single MEAD. For example, deploying an instance of a +non-parameterized template that has fixed IP addresses specified for network +interface a second time without deleting the first instance of MEA would lead +to an error. + +Non-parameterized MEAD template +------------------------------- + +Find below an example of a non-parameterized MEAD where the text italicized +are the VDU parameters and text in bold are the values for those VDU +parameters that get applied to the VDU when this template is deployed. +The next section will illustrate how the below non-parameterized template +can be parameterized and re-used for deploying multiple MEAs. + +Here is the sample template: + +.. code-block:: yaml + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + + description: MEA TOSCA template with input parameters + + metadata: + template_name: sample-tosca-mead + + topology_template: + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: True + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec + + +Parameterized MEAD template +--------------------------- +This section will walk through parameterizing the template in above section +for re-use and allow for deploying multiple MEAs with the same template. +(Note: All the parameters italicized in the above template could be +parameterized to accept values at deploy time). +For the current illustration purpose, we will assume that an end user would +want to be able to supply different values for the parameters +**image_name**, **flavor**, **network**, **management**, **pkt_in_network**, +**pkt_out_network**, **vendor**, during each deploy of the MEA. + +The next step is to substitute the identified parameter values that will be +provided at deploy time with { get_input: }. For example, the +instance_type: **cirros-0.3.5-x86_64-disk** would now be replaced as: +**image: {get_input: image_name}**. The **get_input** is a reserved +keyword in the template that indicates value will be supplied at deploy time +for the parameter instance_type. The **image_name** is the variable that will +hold the value for the parameter **image** in a parameters value file +that will be supplied at MEA deploy time. + +The template in above section will look like below when parameterized for +**image_name**, **flavor**, **network**, **management** and remaining +parameters. + +Here is the sample template: + +.. code-block:: yaml + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + + description: MEA TOSCA template with input parameters + + metadata: + template_name: sample-tosca-mead + + topology_template: + inputs: + image_name: + type: string + description: Image Name + + flavor: + type: string + description: Flavor Information + + zone: + type: string + description: Zone Information + + network: + type: string + description: management network + + management: + type: string + description: management network + + pkt_in_network: + type: string + description: In network + + pkt_out_network: + type: string + description: Out network + + vendor: + type: string + description: Vendor information + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: { get_input: image_name} + flavor: {get_input: flavor} + availability_zone: { get_input: zone } + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: { get_input: management } + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: network } + vendor: {get_input: vendor} + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_in_network } + vendor: {get_input: vendor} + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_out_network } + vendor: {get_input: vendor} + + +Parameter values file at MEA deploy +----------------------------------- +The below illustrates the parameters value file to be supplied containing the +values to be substituted for the above parameterized template above during +MEA deploy. + +.. code-block:: yaml + + image_name: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + zone: nova + network: net_mgmt + management: True + pkt_in_network: net0 + pkt_out_network: net1 + vendor: Apmec + + +.. note:: + + IP address values for network interfaces should be in the below format + in the parameters values file: + + param_name_value: + \- xxx.xxx.xxx.xxx + + +Key Summary +----------- +#. Parameterize your MEAD if you want to re-use for multiple MEA deployments. +#. Identify parameters that would need to be provided values at deploy time + and substitute value in MEAD template with {get_input: }, + where 'param_value_name' is the name of the variable that holds the value + in the parameters value file. +#. Supply a parameters value file in yaml format each time during MEA + deployment with different values for the parameters. +#. An example of a mea-create python-apmecclient command specifying a + parameterized template and parameter values file would like below: + + .. code-block:: console + + apmec mea-create --mead-name --param-file + +#. Specifying a parameter values file during MEA creation is also supported in + Horizon UI. +#. Sample MEAD parameterized templates and parameter values files can be found + at https://github.com/openstack/apmec/tree/master/samples/tosca-templates/mead. diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000..4e95ee7 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,139 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============================== +Welcome to Apmec Documentation +=============================== + +Apmec is an OpenStack service for MEC Orchestration with a general purpose +MEA Manager to deploy and operate Virtual Network Functions (MEAs) and +Network Services on an MEC Platform. It is based on ETSI MANO Architectural +Framework. + +Installation +============ + +For Apmec to work, the system consists of two parts, one is apmec system +and another is VIM systems. Apmec system can be installed +(here just some ways are listed): + +* via devstack, which is usually used by developers +* via Apmec source code manually +* via Kolla installation + + +.. toctree:: + :maxdepth: 1 + + install/kolla.rst + install/devstack.rst + install/manual_installation.rst + +Target VIM installation +======================= + +Most of time, the target VIM existed for Apmec to manage. This section shows +us how to prepare a target VIM for Apmec. + +.. toctree:: + :maxdepth: 1 + + install/openstack_vim_installation.rst + + +Getting Started +=============== + +.. toctree:: + :maxdepth: 1 + + install/getting_started.rst + install/deploy_openwrt.rst + +Feature Documentation +===================== + +.. toctree:: + :maxdepth: 1 + + contributor/mead_template_description.rst + contributor/monitor-api.rst + contributor/mead_template_parameterization.rst + contributor/event_logging.rst + contributor/apmec_conductor.rst + contributor/apmec_vim_monitoring.rst + contributor/policy_actions_framework.rst + contributor/encrypt_vim_auth_with_barbican.rst + +User Guide +========== + +.. toctree:: + :maxdepth: 1 + + user/mem_usage_guide.rst + user/multisite_vim_usage_guide.rst + user/scale_usage_guide.rst + user/alarm_monitoring_usage_guide.rst + user/mesd_usage_guide.rst + user/mea_component_usage_guide.rst + user/enhanced_placement_awareness_usage_guide.rst + reference/mistral_workflows_usage_guide.rst + reference/block_storage_usage_guide.rst + +API Documentation +================= + +.. toctree:: + :maxdepth: 2 + + contributor/api/mano_api.rst + +Contributing to Apmec +====================== + +.. toctree:: + :maxdepth: 1 + + contributor/dev-process.rst + +Developer Info +============== + +.. toctree:: + :maxdepth: 1 + + contributor/development.environment.rst + contributor/api/api_layer.rst + contributor/api/api_extensions.rst + contributor/apmec_functional_test.rst + contributor/dashboards.rst + +Project Info +============ + +* **Free software:** under the `Apache license `_ +* **Apmec Service:** http://git.openstack.org/cgit/openstack/apmec +* **Apmec Client Library:** http://git.openstack.org/cgit/openstack/python-apmecclient +* **Apmec Service Bugs:** http://bugs.launchpad.net/apmec +* **Client Bugs:** https://bugs.launchpad.net/python-apmecclient +* **Blueprints:** https://blueprints.launchpad.net/apmec + +Indices and tables +------------------ + +* :ref:`search` +* :ref:`modindex` diff --git a/doc/source/install/deploy_openwrt.rst b/doc/source/install/deploy_openwrt.rst new file mode 100644 index 0000000..a0e6912 --- /dev/null +++ b/doc/source/install/deploy_openwrt.rst @@ -0,0 +1,179 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +======================== +Deploying OpenWRT as MEA +======================== + +Once apmec is installed successfully, follow the steps given below to get +started with deploying OpenWRT as MEA. + +1. Ensure Glance already contains OpenWRT image. Normally, Apmec tries +to add OpenWRT image to Glance while installing via devstack. By running +**openstack image list** to check OpenWRT image if exists. If not, download +the image from +`OpenWRT official site +`_. +And upload this image into Glance by using the command specified below: + +.. code-block:: console + + openstack image create OpenWRT --disk-format qcow2 \ + --container-format bare \ + --file /path_to_image/openwrt-x86-kvm_guest-combined-ext4.img \ + --public +.. + +2. Create a yaml template named tosca-mead-openwrt-with-firewall-rules.yaml +which contains basic configuration of OpenWRT and some firewall rules of +OpenWRT. All contents of the template file shows below: + +.. code-block:: ini + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + + description: OpenWRT with services + + metadata: + template_name: OpenWRT + + topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: OpenWRT + config: + firewall: | + package firewall + + config defaults + option syn_flood '1' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'REJECT' + + config zone + option name 'lan' + list network 'lan' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'ACCEPT' + + config zone + option name 'wan' + list network 'wan' + list network 'wan6' + option input 'REJECT' + option output 'ACCEPT' + option forward 'REJECT' + option masq '1' + option mtu_fix '1' + + config forwarding + option src 'lan' + option dest 'wan' + + config rule + option name 'Allow-DHCP-Renew' + option src 'wan' + option proto 'udp' + option dest_port '68' + option target 'ACCEPT' + option family 'ipv4' + + config rule + option name 'Allow-Ping' + option src 'wan' + option proto 'icmp' + option icmp_type 'echo-request' + option family 'ipv4' + option target 'ACCEPT' + mgmt_driver: openwrt + monitoring_policy: + name: ping + parameters: + count: 3 + interval: 10 + actions: + failure: respawn + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + +.. + +The above template file comes from two files. One is `tosca-mead-openwrt.yaml +`_ and other one is +`tosca-config-openwrt-with-firewall.yaml +`_. +In this template file, we specify the **mgmt_driver: openwrt** which means +this MEAD is managed by `openwrt driver +`_. This driver can inject firewall rules +which defined in MEAD into OpenWRT instance by using SSH protocol. We can +run **cat /etc/config/firewall** to confirm the firewall rules if inject +succeed. + +3.Create a sample mead: + +.. code-block:: console + + apmec mead-create \ + --mead-file tosca-mead-openwrt-with-firewall-rules.yaml \ + +.. + +4.Create a MEA: + +.. code-block:: console + + apmec mea-create --mead-name +.. + +This MEA will contains all the firewall rules that MEAD contains +by using 'cat /etc/config/firewall' in MEA. + + +5.Check the status: + +.. code-block:: console + + apmec mea-list + apmec mea-show +.. diff --git a/doc/source/install/devstack.rst b/doc/source/install/devstack.rst new file mode 100644 index 0000000..d336f5a --- /dev/null +++ b/doc/source/install/devstack.rst @@ -0,0 +1,65 @@ +.. + Copyright 2015-2016 Brocade Communications Systems Inc + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + +==================== +Install via Devstack +==================== + +The devstack supports installation from different code branch by specifying + below. If there is no preference, it is recommended to install +Apmec from master branch, i.e. the is master. If pike branch +is the target branch, the is stable/pike. + +1. Download DevStack:: + + $ git clone https://git.openstack.org/openstack-dev/devstack -b + $ cd devstack + +2. Enable apmec related devstack plugins in ``local.conf`` file:: + + [[local|localrc]] + ############################################################ + # Customize the following HOST_IP based on your installation + ############################################################ + HOST_IP=127.0.0.1 + SERVICE_HOST=127.0.0.1 + SERVICE_PASSWORD=devstack + ADMIN_PASSWORD=devstack + SERVICE_TOKEN=devstack + DATABASE_PASSWORD=root + RABBIT_PASSWORD=password + ENABLE_HTTPD_MOD_WSGI_SERVICES=True + KEYSTONE_USE_MOD_WSGI=True + + # Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + ENABLE_DEBUG_LOG_LEVEL=True + ENABLE_VERBOSE_LOG_LEVEL=True + GIT_BASE=${GIT_BASE:-git://git.openstack.org} + + APMEC_MODE=standalone + USE_BARBICAN=True + APMEC_BRANCH= + enable_plugin networking-sfc ${GIT_BASE}/openstack/networking-sfc $APMEC_BRANCH + enable_plugin barbican ${GIT_BASE}/openstack/barbican $APMEC_BRANCH + enable_plugin mistral ${GIT_BASE}/openstack/mistral $APMEC_BRANCH + enable_plugin apmec ${GIT_BASE}/openstack/apmec $APMEC_BRANCH + +3. Run ``stack.sh``:: + + $ ./stack.sh diff --git a/doc/source/install/getting_started.rst b/doc/source/install/getting_started.rst new file mode 100644 index 0000000..a18ece7 --- /dev/null +++ b/doc/source/install/getting_started.rst @@ -0,0 +1,133 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============== +Getting Started +=============== + +Once apmec is installed successfully, follow the steps given below to get +started with apmec and validate the installation. + + +Registering default OpenStack VIM +================================= +1.) Get one account on the OpenStack VIM. + +In Apmec MANO system, the MEA can be onboarded to one target OpenStack, which +is also called VIM. Get one account on this OpenStack. For example, the below +is the account information collected in file vim-config.yaml:: + + auth_url: 'http://10.1.0.5:5000' + username: 'mec_user' + password: 'mySecretPW' + project_name: 'mec' + project_domain_name: 'Default' + user_domain_name: 'Default' + + +2.) Register the VIM that will be used as a default VIM for MEA deployments. +This will be required when the optional argument --vim-id is not provided by +the user during mea-create. + +.. code-block:: console + + apmec vim-register --is-default --config-file vim-config.yaml \ + --description 'my first vim' hellovim +.. + + + +Onboarding sample MEA +===================== + +1). Create a sample-mead.yaml file with the following content: + +.. code-block:: ini + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + + description: Demo example + + metadata: + template_name: sample-tosca-mead + + topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + +.. + +.. note:: + + You can find more sample tosca templates at + https://github.com/openstack/apmec/tree/master/samples/tosca-templates/mead. + + +2). Create a sample mead. + +.. code-block:: console + + apmec mead-create --mead-file sample-mead.yaml samplemead +.. + +3). Create a MEA. + +.. code-block:: console + + apmec mea-create --mead-name samplemead samplemea +.. + +5). Check the status. + +.. code-block:: console + + apmec vim-list + apmec mead-list + apmec mea-list + apmec mea-show samplemea +.. diff --git a/doc/source/install/kolla.rst b/doc/source/install/kolla.rst new file mode 100644 index 0000000..eb3f27c --- /dev/null +++ b/doc/source/install/kolla.rst @@ -0,0 +1,197 @@ +.. + Copyright 2014-2017 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + +========================= +Install via Kolla Ansible +========================= + +Please refer to "Install dependencies" part of kolla ansible quick start at +https://docs.openstack.org/kolla-ansible/latest/user/quickstart.html to set +up the docker environment that is used by kolla ansible. + +To install via Kolla Ansible, the version of Kolla Ansible should be consistent +with the target Apmec system. For example, stable/pike branch of Kolla Ansible +should be used to install stable/pike branch of Apmec. Here the stable/pike +branch version will be used to show how to install Apmec with Kolla Ansible. + +Kolla can be used to install multiple nodes system, but Apmec server is not +ready for multiple nodes deployment yet, so only an all-in-one Apmec is +installed in this document. + + +Install Kolla Ansible +~~~~~~~~~~~~~~~~~~~~~ + +1. Get the stable/pike version of kolla ansible: + +.. code-block:: console + + $ git clone https://github.com/openstack/kolla-ansible.git -b stable/pike + $ cd kolla-ansible + $ sudo yum install python-devel libffi-devel gcc openssl-devel libselinux-python + $ sudo pip install -r requirements.txt + $ sudo python setup.py install + +.. + + +If the needed version has already been published at pypi site +'https://pypi.python.org/pypi/kolla-ansible', the command below can be used: + +.. code-block:: console + + $ sudo pip install "kolla-ansible==5.0.0" + +.. + + +Install Apmec +~~~~~~~~~~~~~~ + +1. Edit kolla ansible's configuration file /etc/kolla/globals.yml: + +.. code-block:: ini + + --- + kolla_install_type: "source" + # openstack_release can be determined by version of kolla-ansible tool. + # But if needed, it can be specified. + #openstack_release: 5.0.0 + kolla_internal_vip_address: + # The Public address used to communicate with OpenStack as set in the + # public_url for the endpoints that will be created. This DNS name + # should map to kolla_external_vip_address. + #kolla_external_fqdn: "{{ kolla_external_vip_address }}" + # define your own registry if needed + #docker_registry: "127.0.0.1:4000" + # If needed OpenStack kolla images are published, docker_namespace should be + # kolla + #docker_namespace: "kolla" + docker_namespace: "gongysh" + enable_glance: "no" + enable_haproxy: "no" + enable_keystone: "yes" + enable_mariadb: "yes" + enable_memcached: "yes" + enable_neutron: "no" + enable_nova: "no" + enable_barbican: "yes" + enable_mistral: "yes" + enable_apmec: "yes" + enable_heat: "no" + enable_openvswitch: "no" + enable_horizon: "yes" + enable_horizon_apmec: "{{ enable_apmec | bool }}" + +.. + +.. note:: + + To determine version of kolla-ansible, the following commandline can be + used: + + $ python -c "import pbr.version; print(pbr.version.VersionInfo('kolla-ansible'))" + + +2. Run kolla-genpwd to generate system passwords: + +.. code-block:: console + + $ sudo cp etc/kolla/passwords.yml /etc/kolla/passwords.yml + $ sudo kolla-genpwd + +.. + +.. note:: + + If the pypi version is used to install kolla-ansible the skeleton passwords + file maybe under '/usr/share/kolla-ansible/etc_examples/kolla'. + + +With this command, /etc/kolla/passwords.yml will be populated with +generated passwords. + + +3. Run kolla ansible deploy to install apmec system: + +.. code-block:: console + + $ sudo kolla-ansible deploy + +.. + + +4. Run kolla ansible post-deploy to generate apmec access environment file: + +.. code-block:: console + + $ sudo kolla-ansible post-deploy + +.. + +With this command, the "admin-openrc.sh" will be generated at +/etc/kolla/admin-openrc.sh. + + +5. Check the related containers are started and running: + +Apmec system consists of some containers. Following is a sample output. +The containers fluentd, cron and kolla_toolbox are from kolla, please see +kolla ansible documentation for their usage. Others are from Apmec system +components. + +.. code-block:: console + + $ sudo docker ps --format "table {{.ID}}\t{{.Image}}\t{{.Names}}" + CONTAINER ID IMAGE NAMES + 78eafed848a8 gongysh/centos-source-apmec-server:5.0.0 apmec_server + 00bbecca5950 gongysh/centos-source-apmec-conductor:5.0.0 apmec_conductor + 19eddccf8e8f gongysh/centos-source-barbican-worker:5.0.0 barbican_worker + 6434b1d8236e gongysh/centos-source-barbican-keystone-listener:5.0.0 barbican_keystone_listener + 48be088643f8 gongysh/centos-source-barbican-api:5.0.0 barbican_api + 50b9a9a0e542 gongysh/centos-source-mistral-executor:5.0.0 mistral_executor + 07c28d845311 gongysh/centos-source-mistral-engine:5.0.0 mistral_engine + 196bbcc592a4 gongysh/centos-source-mistral-api:5.0.0 mistral_api + d5511b195a58 gongysh/centos-source-horizon:5.0.0 horizon + 62913ec7c056 gongysh/centos-source-keystone:5.0.0 keystone + 552b95e82f98 gongysh/centos-source-rabbitmq:5.0.0 rabbitmq + 4d57d7735514 gongysh/centos-source-mariadb:5.0.0 mariadb + 4e1142ff158d gongysh/centos-source-cron:5.0.0 cron + 000ba4ca1974 gongysh/centos-source-kolla-toolbox:5.0.0 kolla_toolbox + 0fe21b1ad18c gongysh/centos-source-fluentd:5.0.0 fluentd + a13e45fc034f gongysh/centos-source-memcached:5.0.0 memcached + +.. + + +6. Install apmec client: + +.. code-block:: console + + $ sudo pip install python-apmecclient + +.. + + +7. Check the Apmec server is running well: + +.. code-block:: console + + $ . /etc/kolla/admin-openrc.sh + $ apmec vim-list + +.. diff --git a/doc/source/install/manual_installation.rst b/doc/source/install/manual_installation.rst new file mode 100644 index 0000000..395916a --- /dev/null +++ b/doc/source/install/manual_installation.rst @@ -0,0 +1,311 @@ +.. + Copyright 2015-2016 Brocade Communications Systems Inc + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + +=================== +Manual Installation +=================== + +This document describes how to install and run Apmec manually. + +Pre-requisites +============== + +1). Ensure that OpenStack components Keystone, Mistral, Barbican and +Horizon are installed. Refer https://docs.openstack.org/latest/projects.html +for installation of these OpenStack projects on different Operating Systems. + +2). one admin-openrc.sh file is generated. one sample admin-openrc.sh file +is like the below: + +.. code-block:: ini + + export OS_PROJECT_DOMAIN_NAME=Default + export OS_USER_DOMAIN_NAME=Default + export OS_PROJECT_NAME=admin + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=KTskN5eUMTpeHLKorRcZBBbH0AM96wdvgQhwENxY + export OS_AUTH_URL=http://localhost:35357/v3 + export OS_INTERFACE=internal + export OS_IDENTITY_API_VERSION=3 + export OS_REGION_NAME=RegionOne + + +Installing Apmec server +======================== + +.. note:: + + The paths we are using for configuration files in these steps are with reference to + Ubuntu Operating System. The paths may vary for other Operating Systems. + + The branch_name which is used in commands, specify the branch_name as + "stable/" for any stable branch installation. + For eg: stable/ocata, stable/newton. If unspecified the default will be + "master" branch. + + +1). Create MySQL database and user. + +.. code-block:: console + + mysql -uroot -p + CREATE DATABASE apmec; + GRANT ALL PRIVILEGES ON apmec.* TO 'apmec'@'localhost' \ + IDENTIFIED BY ''; + GRANT ALL PRIVILEGES ON apmec.* TO 'apmec'@'%' \ + IDENTIFIED BY ''; + exit; +.. + +.. note:: + + Replace ``APMECDB_PASSWORD`` with your password. + +2). Create users, roles and endpoints: + +a). Source the admin credentials to gain access to admin-only CLI commands: + +.. code-block:: console + + . admin-openrc.sh +.. + +b). Create apmec user with admin privileges. + +.. note:: + + Project_name can be "service" or "services" depending on your + OpenStack distribution. +.. + +.. code-block:: console + + openstack user create --domain default --password apmec + openstack role add --project service --user apmec admin +.. + +c). Create apmec service. + +.. code-block:: console + + openstack service create --name apmec \ + --description "Apmec Project" mec-orchestration +.. + +d). Provide an endpoint to apmec service. + +If you are using keystone v3 then, + +.. code-block:: console + + openstack endpoint create --region RegionOne mec-orchestration \ + public http://:9896/ + openstack endpoint create --region RegionOne mec-orchestration \ + internal http://:9896/ + openstack endpoint create --region RegionOne mec-orchestration \ + admin http://:9896/ +.. + +If you are using keystone v2 then, + +.. code-block:: console + + openstack endpoint create --region RegionOne \ + --publicurl 'http://:9896/' \ + --adminurl 'http://:9896/' \ + --internalurl 'http://:9896/' +.. + +3). Clone apmec repository. + +.. code-block:: console + + cd ~/ + git clone https://github.com/openstack/apmec -b +.. + +4). Install all requirements. + +.. code-block:: console + + cd apmec + sudo pip install -r requirements.txt +.. + + +5). Install apmec. + +.. code-block:: console + + sudo python setup.py install +.. + +.. + +6). Create 'apmec' directory in '/var/log'. + +.. code-block:: console + + sudo mkdir /var/log/apmec + +.. + +7). Generate the apmec.conf.sample using tools/generate_config_file_sample.sh + or 'tox -e config-gen' command. Rename the "apmec.conf.sample" file at + "etc/apmec/" to apmec.conf. Then edit it to ensure the below entries: + +.. note:: + + Ignore any warnings generated while using the + "generate_config_file_sample.sh". + +.. + +.. note:: + + project_name can be "service" or "services" depending on your + OpenStack distribution in the keystone_authtoken section. +.. + +.. code-block:: ini + + [DEFAULT] + auth_strategy = keystone + policy_file = /usr/local/etc/apmec/policy.json + debug = True + use_syslog = False + bind_host = + bind_port = 9896 + service_plugins = meo,mem + + state_path = /var/lib/apmec + ... + + [meo] + vim_drivers = openstack + + [keystone_authtoken] + memcached_servers = 11211 + region_name = RegionOne + auth_type = password + project_domain_name = + user_domain_name = + username = + project_name = service + password = + auth_url = http://:35357 + auth_uri = http://:5000 + ... + + [agent] + root_helper = sudo /usr/local/bin/apmec-rootwrap /usr/local/etc/apmec/rootwrap.conf + ... + + [database] + connection = mysql://apmec:@:3306/apmec?charset=utf8 + ... + + [apmec] + monitor_driver = ping,http_ping + +.. + +8). Copy the apmec.conf file to "/usr/local/etc/apmec/" directory + +.. code-block:: console + + sudo su + cp etc/apmec/apmec.conf /usr/local/etc/apmec/ + +.. + +9). Populate Apmec database: + +.. code-block:: console + + /usr/local/bin/apmec-db-manage --config-file /usr/local/etc/apmec/apmec.conf upgrade head + +.. + + +Install Apmec client +===================== + +1). Clone apmec-client repository. + +.. code-block:: console + + cd ~/ + git clone https://github.com/openstack/python-apmecclient -b +.. + +2). Install apmec-client. + +.. code-block:: console + + cd python-apmecclient + sudo python setup.py install +.. + +Install Apmec horizon +====================== + + +1). Clone apmec-horizon repository. + +.. code-block:: console + + cd ~/ + git clone https://github.com/openstack/apmec-horizon -b +.. + +2). Install horizon module. + +.. code-block:: console + + cd apmec-horizon + sudo python setup.py install +.. + +3). Enable apmec horizon in dashboard. + +.. code-block:: console + + sudo cp apmec_horizon/enabled/* \ + /usr/share/openstack-dashboard/openstack_dashboard/enabled/ +.. + +4). Restart Apache server. + +.. code-block:: console + + sudo service apache2 restart +.. + +Starting Apmec server +====================== + +1).Open a new console and launch apmec-server. A separate terminal is +required because the console will be locked by a running process. + +.. code-block:: console + + sudo python /usr/local/bin/apmec-server \ + --config-file /usr/local/etc/apmec/apmec.conf \ + --log-file /var/log/apmec/apmec.log +.. diff --git a/doc/source/install/openstack_nodes.png b/doc/source/install/openstack_nodes.png new file mode 100644 index 0000000000000000000000000000000000000000..3659f25f8b01dcb1b3f0968dfd846ec907f1f422 GIT binary patch literal 81038 zcmeFZXH=8j)-DVvA{|8m1t~UEO0dx(2%;iFR1}aF5Rpy@p|^+%5osz_X%>o#fJg~7 zL0ae?0tAR5K%@o;JrKS-Jp0-2e!sKdeg2#uXN;p`D0t^yYt1$Ln(LYgzioJ(m+L4O z3kwUc-i@nwSy(vuSXkKH_U{9pd=WV<#lpfP=%}M}TTe$v^0o);v7?I}3(Jl0coU9$ zu}wm0*bIS#W|C}owW&8=%U{0trTgc#8*I-Ho|pPOaNy3*;)wkNo%aRU@obF`Yx88e zRyVbDbsq?GE8ZK;c#@Z&>Z3yIz@-LI1J@ouZs%cX_MROccDcv?MC?kMf@)*T+oZVG zlPfG2M7S^gl1ObkFtK?}H}5=hQw2uN?k&Kq5~{V_Q8 z$uuG2ULzy$j+ zQ%$)!cWT`&Oj+5`jo5LZz`BiP56dC1|`Y_WZ; z0dY-fBB#=4g50d`eTx*0eyUK-#(2V6_dWCJ5lO)Xwaw&B67t7ut?GT+ulRhcujL(C zyzwe}|1YI|hn|3?##p;Pr>A?yaV&jc%zQRpEoOD>?h5>JUC5UU&X*SzKoa*A+1jk0 zd{Qyyb|U!J#Onv)tXGi_70$nMIN$ti&jb6~(|fm&pSVLc_#aD?j*B#r0@Q1aw53T0 zNc%n{yK~e%lkPs_ku$ZWO@g$(l@#-=bqHCSy^Cz+Qwsf->JATpj)-1Q)0g}&P$Y8HQjB}Yd`L_oM;newk;OAq7zm6oqv1qpn3GM zns(dl?-8al{d-M|p{{QsG-Gcfar6ozRMdx1b`Pn|o&eAj}uckVBA;Vs=rgYiTWSx%% zGQY6kKI3C4GK@eavT6BkTH1Syx#7>VSe%)75xQ|9bzFJhUM<#UcM}Iz4ePzFHky3Z zM|@c%1XxZ#y&=H;ruvfUBG0QmDjjUJ>W8;^ZvH&_{lL8^uGZ3@S?NEobnZ3#dDflJ zVxP%RUU&Yy2&;vIJWtlF_g@h>sIBw+h0w>CBuV2Jd&XY+O0nJ$IT&?P>g)~G<63o% z>W?_@zWkuByIPyTRX$s=T84N#v$(-yO@|L&s3%A2AMRjz$>j z_H?~{W`t@kDIPQEPoena+%e(m!Cwr_i7 zYQGlve`9B~@#SYX`}4Y}4S zWqM$kQ|kFuZB-kg4@tML4HzhED|4?IfJhpzZ#)ucdm--*J$>L}I3ipo-24S^U2L7< znC+Nk_|@*?4_>S5y*{aUHs;=~p<6@ORj*7RUwNbX=5W*IBMuKqmAA5z!f#p{1DbG6 zmgDy0QtXG>1=)Q|{P`y=VD;g{@`%;D_H z`}gi$y60(>lF?{lkztzWtF`#_Va%BH*Q0%Jnm-zTc=;vgbHHcmFFv1Pj+fq@Z9^s8 zYWbA=v9H^(N4#6}TvxnHymnmaP44(}@kis!t31DR(%m)JNj-oWjV1rSri{HPIb!e6Yn`+ zejrts(wW%l)78-(&=)@acv@=u?R4fey-%%|<5KUX2Io5G*8b1E%2{1))goDu0@oC6 zFOwz7r^z&tk@FZ^Ut8nIFA_bMC!bB48b}zVJq~~TY?8OF!|YZrORj8gv6+Dxe-YeO ze@<_xun6sx-Zw9iquFmOYwyxD@XVGv{a!UewM!M@W)+tDL%q0G9!u-?YNkC5##*Pdd%sK&5x$>$|eq@i#>aL?6M59I@&I^ zX}5Bj&4^xg2-{`3)}jm{0JqlA3gdONFKP+HBvf^pNln zdH@Um?)gjko~ngUhRQxwc~2MbK#fr+b=X$_B0BZAA<3{MWhe!sTA|FHI+IGbh_t+G z@zuhu>}$ofvOLeKU)S7e2=m|l)2mG*gBl&%@99u+MezsX0UBu#h2-kw@6I*05FJPf zg|EX6lx6Y~@15@k+XE}{DPB7vaY7=m!!6w&BZPU2p$9@Y1~)jjqt}D1%L*q84L5BW zR~V-l$5@?M;jD*Q`SDe>G#_=@e>Eymtp3{3#`|^1)hd+weaDL%ElJxrmoKbi2 zLvLqN=gZFPHVHO0-xJ&~1wLHgT%aE~ecD%1)<*|`Hq)OQ!z-+b1wkJNjFk9lLRL04SS%Zz5 z=tm}n97Z~KbMA5|%eJ3x*U#hk*@KRuG@0W95+>i4$j|we+iz|zV|b{MKO%!QBcMN# zsYg=Eo#7(N>6v!l%`h+e{Clzc zeWq9a&H9=#^A`04SMz8P1w=I^l|H1sgq<3Gqo3)WX_pB#_As{EtY;)HLBmWkbEi$` zD*b1Ujvf8^F4?&X-~Y|X(&YR6;^x9ilxULZO~p!rauB)x$D$oU@s47+l2;-sUJ0f+ zTKSlk_%iaPXnb%y?|s$#GG8@aNN!U@(}l&s$@3-MuIXqRnmBr(=u^Q^QMrpW+@8^% zT`}g9y3@CU9~v2|(Dy-(oEYe_6tqNS^=FoN=eLjcc{!w(=D~<@K_+W-Dl8BEh1gZ? za7KfL?*)J5y4(ahnL~%aXr)ta(u zM0N)~4GjND)1J{?i?|LdvvMk*4J<1d`Lf+8VTcr)a-Vmo9P`NB_)6$YXy;7DRa90O zTGFuI^Zt!1ElS7TJVzrd{OJ90<|F7bbULvKL)&y?jC=@)m=~N+RKM+ad-XQOW(6$E z=<1vs4*G_}W^dnV{S*+R5(Cvdps~?yz8tq&H2aoOI0RUvIh~E3he_J{xMnxs@_{i1 zzQ6O$-;Oc80M$em^Pl8f1}sxNxNs@ApP)f)z&eXY9t~XAq-=Mh-o%(mIBS*#tL~_f zXoVjhKyN^sv1Wm;8$z3N4EOd0cX{*11S@uhfzF~!ETglmPZIXBY@e$HpJ^Jkx_jjj zRrf604rj}#{ayIJ_u}4fWRzx3vT#^mwA^b;XOS%P$8ECocEO99bmJy}OGfC1f<$U? zBoexu<&_!B`Z+Oi@u3UqTVBE^12~?f(k__0>4O`-Up;JM%HcBkgBrnTfl2$4B1hqCCvQ9&}zsMFpgA0d(Pl9Pox5!q3gq z##hb_apJE@{+{Qm9m3Yb(cRM#<|fIU*XAM2%Twd{appq*{PP!`cD|1PwUQg+AKL;p z2x5K$IxnvP`sduhP<7^0)!UA~b}pt@9bN6*5WpIm=am%{)prN{FCYEalK(RF!G8@^ zyl~-PNB+wv{}`zbV(#EycJvp!cAonHPfAG_8T}_vU+nlP^9Dym}=W`-;C?N#*SM3-t*E zRX6t>0hhjCTI2n9W$*uxU)`Yk#rs_jsG_SEskZ3R1bR64ItG_Um8 zxl3t1VPR$4yI(-^-~3|T#bzQaCa=m+h5U!Ne%-3B`Ed2$jsNE$>+kMoIRE_`|9A&@ zf#W~EAR;SvlkI;ZbaxV2vF87AC%Z2gvjvlHAN{vG{hLy2(rv!~_`)wil*ivM{~L}y zVF`(Ul7B9K-~YhTzlNGB$L;x#FRa&n5c|&zJ@qT+W+v-@+^-Pd#Q!W>CLx;e{|(Pg zIoH45b^N#6{Cnjmp*kx6`31=TUM!fe_x_0Gp9=9uEdO0?|6j9~5MH*O<@B9~u1iAC9Z&K#LBQWs zzh;{RUw?XdV7SJHwQls~3qc6SBiQRc38vrO&(=_;Nz<~RN9I@^V7@HF)}ZqCyPlSn z%iTp0raxFdZk>`0dLzU~DaklC3L!q}I=9So9FJsPcN;Q?Hy%)EpZMH|jVbMyI3ahE zNqZnG*#cwP5zriw_9+L(+#z#i4q35wd#Gz0U!#-GCmZHdVBg9nECn^k>xygMI~-&Z zc}P&9{qU#RuabUU_m#cEn8aKIc6OlgxsRNf;fs-*_xT!)k7uSbhrc>uozikA2Ppy1 z1z$6}!K88ncsH;jZ7oh%0^%jG!K)Q$i0z>6R$X_X38l774cuiUgciBsm2Kmm%LuB zocPIjM{~>!8Cmp-N#zoIBx1u`gL)R^ADxF`O5h3H4I}p5T{%6d__CU04sTQ#^Qjgq zz#P^(t_T4<*Rzj$yKJ}xup#nC&eL&S6H@OHCNUy_0Io?2juyKsOL@!OSCKRikMlmr z93K4yE`L3j5mZ;q5OI`ZQaR2oLRp&gA2qUC7PAaG!F2cJ;XoB@lDj6|$=O@WSoA0_ z^Jxehz#O(0hlQl_4)4>A-DN`(zy=XJ6ZWQvyqYf;nZzg@RA`UVI$h~Za@U?U{_3Gs za`5cju2jVU!lTW$=`WnO&v$DxscaMmL{C_$%&z>cQoebODS(Eu4Ug&?Qs8P||Ll`* zikFV=K4k|O$u1xxt8F2}^YZjA8_ol>oo~+LdLwKZYGltO#`>gnifG6!UwjSe`9SWy zi6<_n?hffOCshHa6)WCcJAmJ`%?M>u`3eMx9zEj+@#{i(u0$~f5Q(5oJPEDcn9a!@ zWPRZ3a4C}clxx2Tg-g=d^m(bV$RUH{Og20OW_uWCdH5u#oGZH>Solfx4M0oS-Y(>Y z)^Zk@e`#hNyqQrH%N*_jOsnFwLOqeUg1f=JD|#p(dKjLTw)Bzmds>t!01?V$3oAE@ zHfUmAxaY@Er^Bwf9TEUES?k;P{qS6cGw*g?MLjTE{h5k$H>^Al8>a4>`D;K+_Ff=a za-#$vqVLA<`4NA^QJOjFI54e8?=j&G9bJ7&OaUmgw`#Pdrbz#XQ|{tOZ_N5wL@f%uMIybmPdj2oAHwrt z88cW-iwbYoBdK_Ow!MbH5krl0W;XQY%&b%=3mb)P5=J@XCUEAz<<_m*?5)W{M}0>$ zfxOE{phr*fhr92={t|94t&(HdQL~*oK?o0;7)NFNXK_<_ak@(dsCMK0-}npUz5lJDO)zRUY|%%5DWn^`><$>F=4S zmz_O#>h`_r$}eZKYgBbOpuqiI{CLd{fOAr;(T71e@}8d6(2~R zNQ8<9ZzLl9eAh{+j^vFhY7nf{W1@MNrX(vMcdXhtbou4Xq2_5x_arTS8d^b#9t+-x zB|&#)kQ#xjh3QqDKNYB3{5vEtkI$$dC6uDBWqlJ!)8mHbdQX9d!s8PT4xH}nt~{GU z*&HEmkC1F7=mP>m>bOL6w=%nR^v)`gj~YOvH$ciDlKMafT5tSFnj57nLO)l>I%uM9 zPBqXM{M@T5E5zi2p@eG1vC*INsWZZ+z(RL{g))X?@1M?#y_55z+A73&5|++b&PG+t zmpj9dWBWQZ)`s0_ZA!R`JkDF^0L6)`c-uU#d+|f}Z8mduv$skb3)OpS>BJpI?zRYc zaSn#0&q|{jVk#iNM1Z-^o7tg@-A(%oBf7<2Sw1gFHY~aj^72;)cxP*&5KJ$uP2V0& zuSfOg$%*B`OLeS;fHAVO()sr+Aco)A*f_Y06p-{*d(DfLv{b5kDFm)V6hCmv0wM=c zVpbWoIj$=T@4m@avDYj~DQF&%E;`CsMN_968YngR>I#9BnmIYr}V@3RdoiS6nbUSIkzmNKN%kgsbZ6BB{ops z#X$9r8osOobX+G1ADT}(3Z2L91VgcxufdTA0g6wyw%tDtddK~CpJ1tEXHDf4hCVN< zx%`1Y6}_DnFn@&6hut)(<=jUu6a z8Fb_hRmW6W4BD{$q9b4}FwR{g6F5-Zob#T3BRp4kQ=ijCP^3c%1E{lWN}>w^`4UI2 zoU)|);}=lKbyRJX(x~r;AqDW!olB4;Xegsppz$zpq-(Ye26Lklm1gtyay;^paxK@9 zyY-%J0kTyG`4*%xWgDZRSUKkk6P+(5M;F)YaH9;eKQ;IOwm)<7)hD+XGyCf-*)DPc zQssuKY!Vp+q*`+R+{c&F&E2l8l)Jp>Y~YM(aB9R@W9ehAucKUu0U(0pnvcHw*sT_<7zR+b zl)0TX#wbxVcorAzLOuD=;z3g2bH(kA^qq}CBjQ>RQE96evfb-GlZ#Fp=}6TI%}?^* zTEunGMVq&;@BEhF1+PB?D_fL#*bPVO6S3Q?1eA1|@5A+V99T1O;hBUw2v+(2_thSi zDAB!-nNH^#vioKIN=79Y_=A=+0L@)_$~81dcf1R zuQ8&5Evqb1NCD;_?>qsi0qHh6xzEyg#BA4+SA@%+A z!bPkHab=++QzHZxNERiTpgMMPh1T;>$Q-IT7%3a1In38_%zvXFjqwUvSxGi;d1;4S zicMGl+}L-V(Er0YHe!Yd7+5|^)wYb}zjByEt%Yx&4bd}waV{os{DfLdqu&zXZ_C9Q zQJ@?jliXrEuaAzE;q<-xZ^;4{D-!r|y3XskL*Jpp4zBY;s>B{(i`HRf%f9lCnm#X5 zTyFyPiklIuchV|^mgv_=L8>>@ti&4!RsW_2sL;O^RYdhpL%X{+5}s^(i236jHObQ z%MW0{8okFrIia;uaaB`GfBOO@jpD_Ev!imF^OQqgZnNJwI0ck}N=B-Yru@gN5z$svgM)q zy<$-Dz=fZrb~UOPIM5qO_-J!x#vZw9kIE06ka5ORg}FI9P@$N`2_WcD7s4Td54RQs zgcNG=37Xr>J;G@b{4VCoTg%yCjbSgmCA_e3W;b9gpup)bzTusB!)w z3iQCj1PHlNX$z`cs0q!dS)}e&L26bVC^2Z^MAzbBnrp*|smFK|?uU44)!HaGL363` zgfk%u>whV3Ep4rm;<8gpa)K{ts}%5($`(%r>N*5!TICCAA}d`!qg$FpCt(m=T-m1U*+ zcr8DnLcmneT`LcM$;+N_(y0eV4GqO8C*fumlAn@W)_q)Rnqn;z7pZ5Xg-_NtC741> z`LJ(Grw#@~PR}O0iMqkel*Q1C3_6u~Z+y^}Of~1{4#=)FRaVc;`@uO55VbVyMAWx% zj(N4}deFgUUTCPQXu~}6zG>yL4&B=0U%5k6fKn3TGai~<`5CBvni^JDPW6S>mbC|H zZ2taaFL8B0rMw1fK$)uH~oD~<>dMDgXWy^;`-EYRxiVHO!Bk%d2^aZ78;Z= zi?ao`&D?e)fGcu_TKeA*tqrb__Zf!c76vf`dNipVP4Sex`L9a8XX$cZ#IO z%;Jn}1UZWoe7FzkCu8duV(3+>oQhkc)NQN4q01q9KovFCL&@lErNilaN6oZ=ZQZW} zb~WNd45~!|(bh!t2dUrbJbXX)K-HYZc$lU7RqGr;NoCrm(oL&6O98n6exjkPH+8Wh z-@9thSx?zO1IqOM$t~h~Pat`TZ`;$#*S;&u0|z~;%D6OkdbTsmz^>l4)+8&d0|k+o zuXvu1xUt5W?xx;XFU4c>CZEYj%B5r=~&FQy+E-Dvk| zsHmMjIk13PtAhECg-eGKVA$=gMV)3*w9Rk>{OyMYl(@gcIx_)D!N-oSGpIWZQeMUU zO^Kt%P^fApVasdFofQ8$|F&N z=P5M?!fb^=3G=Z{KIX39ouC(&zQ3>b4K9EgUI6BiII=rW{Gz&48jY(XhN6s$Q*pzU zrjjq{Xdv2>jbW7%7=TjL=@d(=z{Pr!K6JBt&=0$uTs20^A*!f5J6BTQRTluD0>i#b`Q{2{SSA{PD=SNCiK9n$qOAkhwn{NUbKLb& zYe&%Vt*z+^_*~C()G%<)z;#p{2(Ddx6~wn4_YoqN)>D4v{ZYVdE`K;PBmkFd{_;J1 zZv!nU2=QrgmF#zCs?6M41YedPmArZcjj))MLgX(hX1~L_IuQUrrgrDr=dfyTK*-+7 zr@$vb31Zt5D?#elsL8RBXo-&Nt;yc$V728+lh(REh-m6IgXG)3+IS+S&cH%Y*$fC7 zmmU-x3wwC#>}OF{AY^RN6YFeCthDb+50caoegT2sk64AV9G|a zf*R(;63b>R`bsEs&jx|!<=bQ{>DOAOt7a27?U6L;p3ua4rlN8&3nK6L>oF# z6wE>>h$n~mUWNFW5Va7`1zy+#R(PH-9+h}|)C98>Pnt}IMe|#hVy3c^!_ZZ8uEOHP zW#F!VBxCG68;uL7(XW8-&yD$?doYo9*IGShP)X@144ft$pH>lyiJ2M3HyF^Fs$3Yh zL7;Vj!O!!HAfOyU%UU4aiq{-h3>gGGZef>+WkKaG`eyLlfuT{%V#O=t z*w1sK`DsjLy_)L^4i1L;!6|)g=A}TUo2~4=HsVfx$*SEB)HVNizCmi*r_#I9my}E6j^CaUjE- zKFkakHyP#E_17AxS!M1y{}rq9`BW|7&RI)%4-!sp=paV~H1s-D`P;kI$zr z3g1vyC;uo6yA9qWTj=9m^!Ngd|y?^ zh^Ih~QA3&z5*EhHTd(9Tk{U!|w<6xnm4wy~{=R?fuUKQy19{=xLo#kaI)D?meM@2fX__CowqLYByJCd-6jW&($b3aMF!SBHRN#q z99kvkpE#B%jA=&nOf*TSTgce24EcH^i`{)~XaVzO_O1k5R@SRD`A? z<7;={Lzk|l!`GfvF9HflZZjLuu`qw}MlqV*4A_R^nktf;Hl&P;#2dBpoOahhRfb@v5+!05qsu@1h8e6kb6$B+?rYuIM1>fi!ng?MsfIRTclf;3b z(mIUy+`VenK{sp2`S77jzfrNWWcEsTWKUD#Lh|{kQ!c`g*18#m@E3$oy~cGP?=f$Z zzXj|}K+jq>ioK#Q`$W&z4mT8M$EO%ge+~KH8q1wJ!X-_9lCt~5HoK7@ENx>u84JF{ z^26RzTGnQORu_#L^_|W$(#y%t(Bej*JBbQNJ&G;KdxnNyeCY7o$hT*N90Ly4*wS3f zL)wT6vl#)WkSrnCCr!VuYbixPB7sv#4mZkX^g^Fs{-U}9c!hem3+zH)y z=s-bqXqcR4jr(x}ST4{8{a+WmZKGkS4-Xzb2e|zJB|e4?n}U@HcT7FfCd94hROQ8U zZird0q!gF(=Ykr9X0F&TrMHzwPgmN0t+VTC%7$AK=24|(z{P7Spx~-AXPpn?&ldOH zzxh7C%=|YDLPIZv<-5{;=t%Ix@(@xA|NVRq0$Gn<>hQ~7Xjs#sTqn=faO|L07qw6>}1E zNhJg~(X`;rsi+Pdhe^bnS`+MMU!g16mU)B<1aD@w?BNG^v$IVlF8B81jH#z3Mwb%M zEWIG0}f)ZsZe?LV15u4HoV^5u^U2@FF>1YGkbmz2Jxq zqAK#I|B8UBl@~7c#a3vZO2WuEuDtPO%x49;@&pa7O<@<#Hg&fDxx6_vewz^VP%Wbcsi*pOLWk<$Jmz_Yr}+cAVNPF{rJ*;hFJRO-Hq!fdC2EuK0= zm(j6W7B=U!c#!+q(&D`|o5N<{rajWgYB2cR#531$CFdLIWeXfCw zFWG6TpcyJ-1Y5B)g`fr^8X#YxM((YH{Zn&_sjotC- zPr}lC80~`_CGPWWp;~%CTBO%TZU*YMDfr_h-dgg3#~ijVFPVZ|&P_mWS||l6d#OAopTt zzmLP`Z+y*CgnC#&gEuBf8HURum2ssslKyk_;Wc<$aw3B5o zuC2Ud39)0g0Ynu3)qvplg=1V0qJDljm_BC@-vAa-tei4{sj6PX=R+mZf>s9$x82I8 zvY6Fn^>QAy-6jM__5{`TRAVnjccK{H99HG!yE&DOVqWL_PJhY%@pN%40>sC>MIWrO zO{%r1^1qxTrncMa3>QOw=MG~ssg@@t&`6`&a7%BC+tDJi?PSQ=V;Hcm9h z7N82c-3R3bFYACkg>r3$`D)d+k$)rGKv(InuZtfECJ6_VYRKY1Uj$2t-(G;Gwoe+C zx%$yFO#BZIfK5OuR<7j2jsc(?(6La)+m67;z7#d0pG~??)r@5=fVq*_*h^+V!Vm1q zTAax&+6(lZnQmH*Uy9<=JsE(BuVlV2!{zb0b-_Y>`e(e2ujO-$2d$0xlzbO9d>#Y< zXDS7XhdJ#{F9o(aTOmGFE{AYP1iF{f+SPhc!>5^qz1{17jPiKwTh@>j`6wn>rq#0M zROJ=N9{d+vXzhY4C#emBvJ&_G6++skB6r%`uxnLRUXwhF!LlgnT`4ZJRYfDR6K5*I zMwx_RXeLwuVE7&Kehuyh-Jt}I){s&*emqv z2DE}PdIq(}sEm0kA5hU>^NXv+BR8hI5qwO*qC&D(K6%*Xef+&)XAAxfuDq1cS^&+M zTq`=d5JvkA6kly?A&hYd-wqS9M6wT_v~QohMQUQ!@Ym;vj5*re@IM3r?02$jHoJe9 z++dIU5McUOCpziMiMAK|G==!G-?)8Fv*K^`z3WQA`%cJ;g%S5~VfP%8PZl~^TS4AM zs%cP-Q1(0wAiu1hW$wSQwte`C75Sa~g2u*pZ0!tyj^gfyM;AKEd)Sb%LBIY6os7+n zCStcHNOCa0mW?bj%xFKBG^@>Q&7pBE0}7He6g*c&(M=&M$Mgi}O*0!`J@)!bP&{sB z4dH4?_quuedYV~}7&?UtT9JrAHItaaxibc0j4iy534G*B7A5-1Go~O&|Mg}Q%!R;N zTM0F!tcxH>V*B^?4!0}->{d6d530KKJk|-xpdf=+xAM1|^Ld!)Q_QM4c#6sv1t||r z?GCMHUVGH0Gp5qh;wQk&dSepgeLUPv^Mj(Z+u1ziqA>FO^9>ww1GF>2L%$w8e!U%& z`3uTuLHaR@-`nslGRvu=3>uQrK_R*bOdcdT>+fvl@A&U0G}L2!@5;4bt`SYF!M_UA zKSNhS(v(olt48}ZGIDX|I;?CWfdo`#U-G9zv$-P(Lb4r7-p`nAM27O6-O4^5Jcm=b zixL0}O+mHuGKo{xdS48meJ6TTD1WKTF-l)y>11$%=fir(CpG?%~`hd!Lf6wZ#|!uc(yP138p6kx>!+ zH`Fp+<4~~D`M`Hwq0C7q1s&{Ck|7RnFQemx>Fs68F@>#S{m#I4W)C_zrV#pKgwhM$ z2Ojns4o?p8Hv+JwfLdOqVfQ=%!{^&W`P)Ma8y(1C-?a+h`fD9Pkwg_j#KRzB!6a^| zasWCg@Ivs|!k~Tnv0$=NFnN|_5IeAt9J(``zFo+KF}$qE+wO<>f0UQkN`tMVmAYNE z31qb`9HDHt2fk;2uV%Z!r%tGxT;1huF4}?~c0sg>c~|Vd%FDqI-dcEYdrUN{Mw$UpgSrnzF@??WiZ38w#K5 zVXybx@w6JB={Gup2K>?3~|<1_pu|;?^76mx1wAN`t+}Y>y$^iP z$f*U{2XDq(6|`Dn&wApH`J|*T5K2mw-68;_9RP(^4H?3f^{iA<-#EuO``5^(>eqeF zNWZ$p#PzL4v6ISdgUJdm5|byP+tVf;#Lsc<$u-9xtv7djw$s+hG5ou3D9<_3GubsF z!db0!x64!d@aLcuHAl)}%>DQAl{Y9SrpgjJ&r{$fVR%7cRntl>8L~x8$Pf1c+k2qF zY(TyM{3K{UHIC@nE$10TcsN>uJ7MDgR903wEEKoA6v7|)Ys)AC&Z>}5=6e3ZtOVGm zG`Gs}nA>hY>lC|_1jP{4Y0q1XySyDo%&IT<3{lv2>huhLqnUQ?eEanpTOeRU(#_~q z6QwM^H{MQBmB9c$tQNFY1Ns&}!^iBDyCrj*uB?H9*!^F#_%a=!sX?Q z)qD7PfT%(55I+hMn>RZYxe|7a9Bd!D9>X+%#|J+9r6jXw#NGrLuNt$T7N$n6L>MrT z+xa_Q3|{srHXBv2o&>}v8WFWPEbS88o~kG()&P;png}I0RuO_Z;FU4|O@=9qeW-V6U|<~nyngc{0lr6~A0?Us0O8xhtfEFCD*gTjfnyxf>8OKS; zro|i3GCUcFI|l16)G`(e+|{b;TpQ-}(D47}bo`uv-q)CbzT*Bstt>46A;aih>^3v` ziAu{yZqIZeCC<1ANCzhyehsA{PNz&!4^%C8>oc)v+IO52|8{#Q*Offi*yr-=&puLAN)UaT82G+GQZM!Ljp8A_dvEf!WX-K;p%uH692HbM_Dq4y`)Y zDB%3*1Y&T1rGfOAVllz?Urp#Q_H$F<-nyL#<_F(_ZNuG6=;p!gH8w3>YIg|EU`aA; zpLm6UFyyQ2=FxnuGHRWLe+>vDGiQ{(+3ODEF6p{sEl|eNLIS$dPVQWXI<0k~9A7m* zTd?3IhTSG$t5z%3n3F41Y0Z&`myZYangW6$@~A(SawBe?yb5ZEE4D=envxac8X&!0 zLEN4vkx&wv`q7lM0JqYO$z=MCLsiTc;7@GDF^H%iio{vtB_MT~G%{d*H-|z5V4K6T z%F=ngHKgzmb6L#lo~3BDdF376_tNQs9{bZ})yIF#WS9*giQ#1i+$iGKq!LQ0lEiD@ z2Qz!MRn4KPur&!|Ob*hLg@iiN79@6!>m1X#&R)A7-zSb=HVOuy;}u~5!tA%?b`UN0 zc4MB3VfhUaFkEC+k77g)ZKb3mgLg4!-v>kUdDH?rk-K{?WA1re@|?P$!zoa=ABv{@ zCXuP*d`w5FRs@!FwI_YgbTH`c2VBa|BA9_&AcGlX%2Asu^XXf$NRQQcE7DcG#Ll7w z|IQe6E26Nzep&FzBCwmquIm!Vf|sO`9_uXtu$uyP+V58U3*B ze)pB>J`!;&8F`VJUU2iLTh@fD2{Ro<67>Bl+%5QnqIsvb1sR{nK~k_^j{%5C3wi?z z48=3=u}Q=oIuabnpruPN)814Lsnqo6N@s(Bpe0fRI~4X~EIE5jjM51a3#;kLKhC>W z8#pboLq-PnM*|L0L*3JxlRL%cL(g9k^ss|XO0tQ=+hX(^AQ_Oo2nG%itZa!)UR1b% zorj4vHc@xpmKn&2y_$k?yOGOx7<3en3j|Rh+0X;tj9=5&lphES?Q{zw$UdW}IkcQu zSgg+!XGzhlFI|+l-oS)so*(rxU}k7cAbxvWV~Q*~71u3c>D4+VN4)-^HTD?M4m`^Fl)qnGT=F~%5S;{9MdmK_aS%*_{ z6-`I%ZmWm8=t8>zsBreEc?+pg>`V2^y^ZnWe)gP-%4<3pVh6cj-!>oYQU$T6w&uZJ9RSgNDOFHIO{!^KS z`H+!&SR7!@TGpD(c{VZ=5Go91Q!UW<{acFJAC!dccYLbw> zIi^fi{;UmrlYp+sOC@cKh}d(x-%D@~K&szA6#4SqanL!guq@_DQwt7U-^06~aTmG! zD)f0MdTPl__xs-jWz)KQoknoZ&`Ye=27~6XI{2!RPanhyZ$%pX<)RDsULl zAgnUm3f(Ff8=kEa&|Pc5$&Az-FJr&6>RK_gVN^`57lw*4TkrUQlTPizJACD2_uOzF zM@RgD(v^O5RCao>f9XVmd02Lu|GA1z+nCq0FJ}i*tN&Rf+TQ$Uk*GXIYKQ3zNTI@3 zA~!LU;v=uHBOmJFKr4*?P<~&jxSG#?TR-K5nqE_9pUiVkZo{FqW=s7O8Bf z8lndZfdxv_o!L$;DWrIEBa+-VFjP9`!A`J~S4wPn_F)GVP;@Ht`COu>xO61ard`+y z<+Lt3UG#R?WD2Ms9qup=Kn$vXn@~4wAzS2>Ax7u#A7n!7*WW_RsQ&Bt<_Vy80gJi$ zyAUa{vySso3|P5NTw#`fCsJHs`+=LIIuPKk)aP`0wI5+0z;h8K6|%&YD^wguZFduCde z)$lFBC{ByVvo9`-1w__uTL`{jOKhrz1TV$nm8!(6C>qqVG&T=(|53O6YuvH&+dcgp~Se;yFGx` zt4<&oj>kf*)@|MJH8x4DnSyx+XbGUd=W>M3{Bu>D-Ne>;zn7~(wGpB^gY`_Xj-3}WBj`2 z((9<$FRsW-``#^Fx7&IKsx-PJ`!!V0%2&xTP~9RQ!!;(^v@l=fiCy*vGVH0WII^n*;xS>UjEH`{ak5uEyBee3sxg%?Y3-ZJoBx)? zPg-m$RoNV^QC33f&v=yx;8V(Ulz$50Ub-J^pel6ikIAN915cj3>mQh9M}2qQwI!x| zj=q$L4b%uc?$?7|%sz^kG$(A57Rh@JFME%Roxl|j_iJ$0;j3L?-&@|+X27iXBhy36 z^c!0ikL$FH8iJgOKt*Lxb7;pmBw^{eJ>$-%y51C1MY{bS_@;)#c(`ewmdo7bTkOO` zf)MXJ&r#r$;cai6H13OM<@Xe);2a%OX5={J#8!p6*tWlHKe@G&#Em?eMQt4Nu91#h}+`d)EH3wp(v-(_3T)Qb`s#gF6*Xi5YHYtr_Is zDW+(F<_Jn0T-%Z4o~+7|$yW_KorqLI^OEllTXu8p(GGIbPHSSvUteQ0Da6;fkeA0i zVGJvz-6oW{C&_YM}I%w1Ci$ z+NM^8F76#|%}~zZa^HhXaw)zyh^ZoOs4ZS;scx|-c%&*D-~&cXgFo_w)t zO#O1C54)NF>Qv`}4e=nR*8K(YYPZ)x=f)>(hLM4(X&KzOroXw02Q1@7UjZ$~C`J0L zN_(6ewA{UDs*O{K58_joxJ6efy1=Ls_0Y6P!Kv6T5XD2xmfX!R+FGIf@NpD)E3!m) zd^yJaj&csb6dazgq?zz~vKHe?*32UuK9h=?0?z#NnQOMe z62}uKR|;T7${AZhgd)ZExtSQC@z3FW9(|IJRQeM4Kvgm0Jim1Hn4$`@+VXK^(#N2J ze9}z6Ef_&@Dv-s`u+gn3I%y3p9}r1Fb`7TUqH3$hP83+?5rhX4FFJ}hU-%#7_flV!&2BY2y?(>6PM~*^xf&okJe}qMjuS z^peL7MfLU#vHCSy`!`0Imm8d+4Yb&uPH`L|SOxaDt`>u~M;QxK#qMCtMWtJAwR7%l z?U^ZBNsaa4)=8ZlYwjLWffr;n#UF~ulSVd4$DU6~OV=Qz6bYJek(T{Hd{+Ax7gi{%>R_ZIXJ!@UMNr780AMLp?u8&7!F*vxg2ixTdIuUWu_Q z5y1x`P@b;=DLz{k;l)i9B(_8z06fI1^s%*)pdn4SnN9Z)1?nwmi%wc7WR3gY_pzo_tknoFU;&5lt%TDAZ!_~y~0;X|VUTU(za@FvX}5z2(qnH7!t zx0LmZVd@2ug7J$hC9$t6HmAxW*ruAIzdI;nGN@u)5v7Z7f=9Ya@-6hGQ>4J%x(S{h z1v4k7rEoV~VrRVB4dYQ{eC#rRpXXBP*)=7{Ea?$GOPAq$?RMhMu0X(qCKkaW;3G;t zH)600eV3EvrxaDln|2K^isp@qLCY#Zxtxe2i5_fI7RQ5v_%CO`u5!bk-vrSo6&&53 zCuQ1YFAght&#GPPRxBII({dAU@bf4g^*kGo`Y?M5relap&l$K!{UYuRPf?2x^?)A! zODF4WD|`LknDkjydA)^C-kZ}WT+&1p0-$Y^fQf!7w(L#1O7AhmiMkHI6rGO+!>&#p zwIXj-I><$;?Yb2S`P&mC9?OQktNnb9wx?4FjL$rAaP1Q6-Uh{0PY+?M^_a3cJ>4&> zQ9C#Ig1EUqky6VF@&<^@Ys=A&g|5)UH^A_MsX9l04>=c6kYh?Wj!Yw|Mwx8-mL3~T z6(X0;JX(~(HI}-p5d@Wi_~iGF;r95@P2wV!S`5+<(w?99N7>2l9Kz>DA+i;I7qkkI z%VG1c#F2!C@Z~kHnmJog;*k?s4$$9_=}Wlm!05}aj&%16GL_=6sa&5evsky} zNt^XOPCen{o^+#8H^4SCbs=_Wz&3FZ|1H-5lwY}luk`PN#N~nEmD6zs6IovAUxPl3 zE@uqp;olm#E)RHzTw499oLW&w=n7Wn*R#bs^2KUOxW$0z9$+`*Z*O=2evZ@PG(TD0 zuk!n`!#5{vd~3DUg=$tT5`}POW#1C+!E*dwi_-~{5{n`rue3)-Ru%xWbmHRK|_AH`lcDJrYNb^>McDo?kKH_5(M~y> zSQ$ICH{AxosD>(~RN=^Kh~mTC5wJ})RkhCatfMrqF|(2S|FJ|b(Wd_AgoA9*4Z7(UA+5xhuoXq?ot7N!{hOAtk?bX zG|#e`YA~B)N3S8vHQUjhX%Z_?P zK)beHM3?+)7+02PY+_WpC5F2Jt}-rlb3D!%5t$7G6qmKu7UO5v_0tgrK~}^S&NIo4 zMu(ie98dp7`?nY!aV~oa%QQJwimf5JU9m88c;+8&f9whHqX_WBG*f>(^fy6tB)Xo# z(tT!XST@3v=(*vZaPJ4Yh-BT=RpHR1U!3UiN5YCAR&JE69rJ<@*Z)gpP(4|YXMC67 zHN=tl>9hC4RWE!7&*C|!Ll3PdK)_rkZA@JfT$wjju`vgUCD&i=#YLAR%|g`T>!y;3 z(|M!ZmNpe=sWV9k_-MC#$g|LM!y01QwItA@MZUJmX;$0|IB4(ugz zl`a)KTD(JfH-)KiCL(>+6i2qkGP_>Nh8gQFd`WfnzxJpTbf~GU+ji-dpWge-`3NaY15^?0hYxMdV~SEa2u{|2cg zS`SQzt~Vp6ag8Xdb;d_Cb$m6CIB&@9Rj8DhW;glyG6!-u!S^xD6Cb-R4}3zSvpF}W z99}dqdJR#>EaVj$kS+zb+6*!am+$H9Ze-X_J>n>}RB_EV(c4z4q~du0@wR2Hd3ksR zL?lM`@b8V;jE^LBEvmo9*?hD!tgWD=OV~Ws(E1le)msre#}RW1g?08AKok8n8hKsE z)^-wE)m%Q;`JfmBEAZgNv(|>VUY{oPEXYHWA*R?obX;wgX^ObLvA5Cd zA`Z7^b=_8L%o1}rE~qT2-PASFoSQ|@5)?Jn*`_bGe36Mj6WJ{s`);t%92WBQpz5GA zf`9(w);?41er~M|K5J59;M$oMFi7A$)Ycd%SZCrPHL3v?9e;D=0(IA>DHALZh@ z18Ol1o`Ff{E%Hm6OW zb9h{G))suU%&rBseT9v*0(DTP4@ta`brU}IqGI$ckzaQPUvqkMXWDaNy)e>6RXhOJ ze@l__lx_cGm157(3Y9$v&3pY}wO@QAma42Wrqe5pTvoEas(^-V69)B*$*_4yz zZ&#{#xj7iYg=O!?(!qP}U!0!ovx-Y!xEFdvWDF9tD_Cqk3-<_Y5J88;EDWuzYmzl< z!4`~Jgzly}O@V-Y5Ip60h(94bT_MFOVo7#fR428MlCB)Npj_0=oqxv14t|(Sw>gsf z9{;r-@bF0LRWQh3vTVAk)Y%=60#%-;RaWW8 zM^BUf?*G$d;1~pVkfLfuV6#f+Q*A3y`Jmx=uiwO5AMgTv8{eFooct!_Dzm|pKN<1S zIX;V^!m?sW?6LDV;!W0xdS%^n=u}p8*q^MAa~m+r?G60dmD*0BWtooRW6J*Gxebi4 zIH?T-`$U_MTL<(>8f^rQwh0QZRNWC{3E@$d4M6g~T+@$FUlMcke|_2R45UFVg&l-* z*`Q-2_n%oN94qVn1I?E4jJZc}z4qpl^Aji`idHn5c zKFCfo{GL#dee3=c?3i|Wyc%PHu&qc7mRk#EH5n${%CNa6KQd1IekiQyax?if8&h;i zDMtW@okRH&WDB%ww(QignctM2|W&%l^^w%(A4!XCV5ji)E%>S?@8owxpP3 zijE>9Tvx3DGZ6z!^Nrg22N9?W*5pp`Lx#GXn%V zSfCohC8>Q`KJXrE35pxQo5yB!7aeeMe zKQ%UK-u)@Bv!=aQLW;5Cul{MP_1uvH#LNND*YJq5_G$hsrvUPM_GXvaI*z_m5xoql zK+LyY{OX^~xmsD7IP7%EHAN$$Psdynho}ejg>9(Tj(g3<*$}uKm#n6VHRD&V)WtUZ zh2G!FKr?CV5Sf3vDARzQ9GZ@<&v;{9kRJlb9XinCj$i$2NkY@IG+){<9&%S`9P|R5 z@_8~K1$uY;S>=+Uzn2>rRQu%-$frWg|7o!X9_|32r><0;)W_DYi!@!zf=wYK z@TK%>NW*_xD+BBNr&s!adZqt=_DcVG;}g1L-pXBmbmYD7>oUX(4mc<>&pX=jSsx!% z*G}b@8G{o;7JB9d;nAbCS#{7!!hOLiA*X@%twXEqOp&RVx-CxSMAYqC=6FN8kR42y zq-lzp^~DpHl0GJCun+QbMy9*qpFzULu(bd8A4 z>Nc|D;kR3iw_9(8>Kb|R3jE1QkM`#&nJ1K~)bKwM@X@Vik9Gd-A80=)n#dlJMNTYb z>GufsR{9JVuGUuA*w!d*%-C#PiY_DHl{9sG^M{m7(MjYC1?ywua~+R`R`n_K1!@U( z&u}ZuT)UAEj-`|6P@r$It4x>J@CVu%Iu3h@x1GAuSh5xinp^cWk5(I{TCy3q?u1=z zINFuy9oq_9oSL2tOfHd4tyNm^HeZ@JgQwhoyrOY#MEMuu;-v7s z%nxRiH{q@tvgSC}fnzt#`I@s&kPn_bmLYEcQ{DaWjFmz$ZCZm8NT6a*-RI-Ova-N|mvK>o-Vd*Tc7N<@ zW^bNU$X;ICRhFS9=$lc@7twOpHoWT5)9n6LKgoi7_1W0E4!i{&nP-ollMvg^1duW1 zL&7Geq?+p9EBZPI{qc^d7gf7>d(y;5DUYAb06o3&D)Fh!J1i=6IUOYjZ}DTG{o1HvXfWB5p2I-suYXKTn&XK&)hHou{#)Y6 zlr5?QT?ekaVA>%aE&jyb>hJMpVhF=4dH#eKx$NZ3xHZ`3d`VwYYNSa2vo!x%n*Vss zf4t_uBH{l%gtmzK*4(eH`B|IT;r+fK6;&L(Q&e4(nX-u|mxCbC#nN61sM0&|)~-M>rAm@wtL7uYI67O8Lv4?E#LQGC6+p z#P{Xrvf2q8#+rUHs9!F*vS4w{EGjPUMdkCJ078Jp&(4UZJ|lzbS{c>q{|(IVB_xd$ zs)(3B7e)O^8$lEqBpW@UFNt~~KvVt`2fxV11s)w{hQBYVslpR8MdVsJJzmjoe=#BX zwba%W65gFDUE6!x)hxL5L$wgrq4ZrmRS&h@xuMuH*Z2^eCS@zkKeTt{Dy(ZK!a=su z)Pe6yy7{);+WT&5TPFnS!n|5!v`2%&;>@wNy%+YPa!VcpHp<3O7OC48>osB$GE!2VGvk z(|DSsE$*eLSkNQ_A;LLBajh0b<;XZ`eILyBB}gvRO~^AQ1=$EXmTtaMTk)6qZmux+ zAYqb0Yz4*F>psQYOt5 z@&|Hw^p2SvKTf{UR)DnZvlB2PC4X`}kO93^kOv5xC#dabohF1H0Y1XQknXH#td73U zPSr|Ta2pU6LTQX~KCrYtjh5wmaDIugJF`URl|x zf1%G(&D?nTFt_XY>(ZDpwTr6SxUpG7te`U4=U4177n(ZpMPE`ddF=I{!WsZv@lGqMVjvy0SDlEH{oChz$H+Hc?Q$qpT4)eaS5%B z93_sXFMm$O&b7I7{MM_SfQ3yb+-J#7ptzta`><$r-6Y`}+y{6`Xyb84J{9A}fUEU+ z^l*|{ESC6a(%oXA=oDr;cOMuX*`&@NxCW}uH*_$(f3C&zcfcqs?}wQu0fL<8UCf-o z=OLOia398+pvaC8V`NlcpaRzjx%&v~r1%gIUK;#vA?j?Vm#Iq< z?e23j$jt1oVhT1KbX`G_|_!uPxkr(X9h>&p#j>T0d59UADmN3ca#u65KkA_h4t!m`@nFW5 z_#2voDite23^(gHjV~}tEa*^g667O1?Yopa0Y?Al76C>A8;_JwwLh`|a&$!&f>l1( ziHX{Q?od{WUDQJdPvAdWA)WYIk8op2PMj`T#il?%eRkPK+p0v3v1t`o4SR=D1ut;> z0JyK8K;Yx-E#FGn@ z8nH}0x!xLtEHB`v&nl*@7z-5-4z9rX!OqZDygM_ld3bnvr;@;bNb5hO^?x^M`QA~Z zpxoUhT}^xZ_vN|UN$9B$rANzgC4L|zLd_ckNKgpvwTCw2ofP8kmlzN!bNRf*)272j zhbP4>@A%e$JHAyvoC#I@u75xu=P zj{btSa2$Bir^awuoYPtz`{tcr`)-$|N+M!nZdnn8w@sakuz3O2Z~`;cAe7|3O&i%l zL}Ua?eeA`0Dl0ogUnHpJ`T#ZQ=?aaTcYe}_Z*TUqRnfMAs5IU|8;w3=?LP|_p&>k% zw!SF!bL{U2Zb^Q4;WSfGFWmKx-}QXMml*CdW~zUvLfmJ}^>w>o2&E})21EYG*&+4o zDT5#z6RozxE0z=~*!YD&TB#s(`RHxZZ1*1FonlNM4%=Tw_}Q6V*h$SR|BqoNZG8ke zRx6)>qb|6=eJV&t-nCXVc+g$oU8|I>NGh?HUxy`9=~~!f&>HaUy-64kYl;$GQuors z+oYa*=+cNt2IY;jnr1hhT>bPmo}<{{QoGi}@yf+3O7cw&!7qLKnM6j$N4OdP)Hrza}GMG zhAr3-(oiVkH!b}B|2Khl3<}o?4b5r~dJ+4h^lX5kBH$pAtCN-^x9gahE?CY_+vN)_h}GhtB)H9W1o) zQU1rV1h2v(I3uD%_;=Und|VSe9F*0tQ2`K?C6k`P`B%G~O%J}T91ow=sDyk7Bf#VK zHmvB!8_6Ea=Lr5th^>h+Vk zmS2@iLtluKvjWF-760N0eO9ZxWN?N^Jwl*Bz#kcW{-rG3jfjadp6kOTll;@aVaIri zz9?<7E@<+!KCa}^zxA^@;5a)yBRy!C`x86@i9;qoamKET)Evex?W3F?f-t%MO9#*hl@j zX~Ao(@xhO^qr0bjxX4|)AK#s`ktX{(50#I6gDw%`%a;|FStDAf_ltj8_-__aQ5IjT}eaOCQx*@iY#vA8WeY5K8iI(*!h=e>M-$L@3ikT-uesHKuF9 zVNs2){$g1vw;Io!PbPeyYUCMxiqXe}HqrSUymDdH&$WZZRpch>53-5#%U=@soO?oC zh$AB)#Hx?xtu1|>(Nwvx)~n&ZW7%~I&fb?OI~5DM4!cg-gg?XW;3&S#aPT5|C?XO% z?5?{^nl8=XhPU?(O-5J>l$4iYBQlWjpWSW0{uskM7;F|SQ~Gfm*fOR#W|N<48joyC zdO%R6VDCKMKc}7lD2sCR)CJB=kFrqo+RtPU8?_dz9L}L5oz? ztCst5pglDGYrRSv2`R+=^G6lwe@Cq?!wvVL(1&0|CQh5Tj!2WcVg7Ok0p*108Hzp< zbWOAvLf_zx$>-ntrKT!jwWmv34t>VBgSV7-%xV^(#- z8Lw$UL6Tp1(?#>-xB0)wW(00-J1C=vfIsk++q(_Tl21_b@12QxI%iX6BIv6M&xr&s zNCS-ymf=leFb~=SXOyXIzXTF?2xr#IXufNm00UMKf$ zbW^VlD6CmXX;}?tdEK8zMZJeyadG^4KP9l@qL|uWU!XMUf1mH~5kVr)U7(DMc01VG zaP-R>H(F|Ew*YN<1n_cey*pH~x)6Nqgn z`A{BoqpqWp?y~5C(=!DGhBWx87NAHyL2Nmpd`QIQTOb=Tn9KF&cy( zU`vCR^9?%cJq&GL3y8wC6T6P{2foc}Q>}X<0dffOfCIc_NHpx-Y+HuK_y$&hl+Y1c zAsxcU8C4pEkU@^YN8~_G<|CRa!UxraFg0Bc{x}NBW#D&m1dH$F$eIqMn&-U5rJ*?W zx6!~CeiNRAGJ8NEWaj{bq!Ey~+&#Or*rXtHaZ{fxSE=dWX-2>TBv6*pIUhI4Xh1ow z35YtECw*<=rl;xb!cb=KXQ2LR{|WD~u(Yimzub!ddG4bqkqY+(_jZX@pB>>jG|F6k zA`Gg-RFxF~A&w9>AA6zdlbp(KE-OL{7$|pwi*ewmXQAT1PHne{4OW@UeV-Ki8d~6K zgCCaiB%wgG49jtEno2{w@i_oG>Gzlx!9dr9mMqcIMjDEbAMrn0FoNk%BunsJ_rZc( zj3dcO9l1ZNd%O7WUU@vCtQ@F!7=0=-gN^^n*JlN~-|f*&U{ZNirX(UCVpSI&PwD<7 ze{zkvxh~NC1tZD==Y|r>qO)(~39dp`L_@r^dh&1HoubxF_J9Bqibp;NQT{K=_#gF5 zX(S3uX#=qaI7a3KV_rvmqz_dXxS0#uHn4pTrK?|TCj-*6h3;O->Rl~A*W@{OGxM@g zGMiZI%Bk`9gy($nd5CjhwfdR!2N&$kw?+lJhE&}8xEHZ!)1RGYe@dx(N~|FwLx5XV zbWU;;(W*qBAR3?h9Mz-W356U%0ME5Vvz>Lmj}=&EW!^1TEtk7@zw`75O#fa zHD)><5VWyJzQ-8Q9F6t=T5@V?i4dzA)FypDJ1hGfm-dLi6aIUMNLrgML%qQof)C1b z1mzhsHTkHlhvc{A9i*Y$!GYuau-mVH__KAzbBO5B)YrY@w`}ovBt=tz`+^$MC91d)? z`gNxYOsx_7)20Aa0!g}Ce$BOEj!fLW`mmt3Oel_O2!Qt+w!?LGdf-#4WR*|9HL@7A zF4z<=?jyio>|gvmybAj)3jk5q0b(e4L3&r`Qs9BG;p+gE+!g6q*=h0~e{mxq>(ttC z^CcRgkP0#0z*&krbR8FYM+wRkEOnCm9ZpD<>mi3t~k26+RdWbuPJnUSzo z?S&J8|A-v07{jmz2pUm%+}hSrq0Gpjq5cxSbA*@Uxh?)U=(&w+s)V8XsY0Cs($agr&oSP4JY>=(;fO+6wR{Qv zxOoB-=l>N1<@@py#HyEOtA9UF%}p*=>JA@%Vd=5Mne`EmZ|t6HkU3Lw3BZ|s?=4uj z^6-ly%lBW0DLTM`cf12mEsgh+wfHZ>8m5(Ao42scbY* z-^i>{jRc?0*(PNKM!2xi0JowKe)%aOuHwb>A9UCRP5J>g`jhNPKGnIf8`09^ZxQm> zRwyR0t-GG_-nb2p7^F$K4pZE6`$51(1B2Tjd38@y=I`h1)2J)c0m=%X>Kte&bj@Py z_tBs{K!x!l&S4ND{K~Tl>-7;_ugsE&^=Ih2*bDtl4;h+_LL=~_KA8Kp)Q0iqJ{f%l zAdrO?-l_o1@Hv715{nirfPc<>vMH1kg_n(PA00#?AW*ir|0L~$hB3m4L^A@P;!`}( z%7h#ATt4b!Kxq*^`PH;On9&G0x37Mmw^U*68B5Yd-{19Aow2YgX9sjhD_-My4!>d! zAubSVEzJ`vx(JHuM4c8jeBfL;kF;|r@Q@J`Pb2QZw?;XDta}=^B+M!#_k4>m0FUYu z1d4nU-)U|Jl7WSH&y}G}xUf(&z#PsQjuIfX^coT*p86hg*09f3cf5Nyun}bWGHClY z_RCZ{Dy#*`>HLq=BMd;W&eC|D6B~jO(5(I5M$8r&ty?muJA8^6>!VD4Rmgma8?JCg z_z$=Q9ft0$53@6xWCBYf-u>fp0j!l<&5KdZCI>ojoSDBxT2Tv>Vpb~z1COE`r8qog z4cl%dvnhUK+!*$qe8E6{MXrRhM7N~!^)Wh^b##t{EIw;M^YfynoG>sfg1lrkxo@0c zt}d~f1P?8h{R1Ial?+e>&TQ!)T9=eUPXt3Wlc(BQ*XAFBR+y5e7Hxw{+-YJNAzty9%a=Tn(7%kj*mDTynGH!DoMig8<^{%3lJuJy6YrJI-D)gWD$$T9JVD6~? zDzL-3YMH2L&H6`zKlDDgV6iV4`n^=l^w|J0&OtBYEZWjGz_9*?FD&jDdSh79pi-D0 zQNoX3i38ci2gA?&&Bush^V+APp|}mvOdL(nO!SPxVE=;-??B2qMV`kfsOGRxH%~H= z+d1mob}#b51Me-9usIu^w@^;emumG25kyB%vy3}D?hq95DReF*1i+gfGxV=BZ~$*g zA;!ggKm+14O|TC+A~Fam$3Pu{nW5<1x5o6Ku0kbbs4tVqSVyB+gakB7z<8He@OY!? zDXc}xXU@8#hH3xm!G%Z=zj)&_5H`NYeOtZ^a7|cV75&u#w1_mm(=Q}xzfF;3gn*@0 z%ow;pw0Rt1hQ(b!y0I_u&siI77o_HZKvBt&F8uVvU#b-1Id5XN`aK*o+cwwX#U89} zTZ&|cFTfq2Wl>59$;?+(cx6QudME|mIeKoo_P2#g2ia&N8I$F^107D!RAp4VnGEVe zQT*{h#Ve#6r4L4F7#A^m8K_vR&i7$0*gOi5%Ckc=j+O%s6zXzdLqmb%Z$rXp zf2=ABfR*jxWP$<7f|GEc5-;lfxGi3DbLgT;h+8DOM)73@Gc{Uh3}Pk#@#!1Ey!aU$ zhw?eCBrFK5?L!<{joeU_)0V`q0%`<6{etL->gHFi zExfI$DSWMHo$z|L;2}CJP9Ee&ofE#+2xSUi_1UX$A4TpSP?m1#t@nU}1Zy~%86f?o z^vdA$1`jZLbOcV%GKl~#hfpSk-7!C1G6dC=lDJHgQ ztW~-iep=g(-XC6cfCW8WdxBBFL@D;7!w-PI1K3kg+J?29E| zfxL@h=zUeeWxZr#V2F*?q#s%|@NELQkR?aa5BHvEX(g`Rd=_J}rBked3Gv$NxKv`ygN0`lMc5739G^a6x(uK-eX*fW(qL4a)0XcB+zm|I0JL$-lLLDtIaaZ=^VKyU~(s=vrH} z?UoBJrLaCV;8PRv0(>`ZwP6XaVcg9BX4im%YkWc#l?)P_lh1=+l#Td`-kgLSkH@BO zJf@*YMp@|J)NXQhMsmmQ>S#a*C)Wo_qIs!e=ze$#C^u2T;^gLMFu2!8+v|}^`h};G zkBx~{)|Va9HrdYJ2Gaq<*|yTq@ip84sVh6Q2s&D1-Uuc7AcqtL{h)C_@LNN)> z-yOAg>^e27d2U;;u<)S*2OOw9#Hv_p!6hr0Z{zp z30tr|W|RKnYb&G1^ad)a2=5DQ%9#slhif8PP11XbXsBbzWi2qStUh3?wug_13TwlH zZh*LxaZHlfLn!;8ly5Slz*-USP<;c(VtTTnMY_j&ALe37W0gAfm-S0uuHnhon*uZxRO|uG%{rJ5ihX>zku+lb+Vnjl?4EPaM~9x#g6&o-KD3 zNM#hPcJZo2kZ<5_XOy0O+cMsIlY4QHJx&7gP=upejC@OdPxrNELPVdYOJk0WKr6-q zr(k)J1u3%7`#V5j)2i&O|_InjOWwk>CTpS+kF@(Kp@Z+J{L#}9NE7w z2@3f*EncP+ni>-eZl{XaGPQraFM^6BSLh>B8bPJ`8a=c1MWMDKhOM$6Q|#-z9<{K3 zMWok%=Tol(RG#-ejDsCSyJ8uok4P%VR#KLnKB_3MT(>jxYRWB^eXzjkw5;ZIzO3H& z8Q^fpR*OPHMDT8?i)5Y$R;h?gc0`&-!mJ18w{lNAqlSM+0QcN^t7F_R1bc`~DKSwb zxnLUN^Feb&gMk&ElH~a%%rjmGr$jE;#L>@zbb0h)W;%|`+R)0%;sCoTjqST%N)*BK zrW&8N2x=u{E4eb$p|&Lf&r1ih!HgHn77FW)G2@;KMKAZT{cKE{hRo`5l{`+c56bo0 zEp0R4ICuK>&HHdDKGH{&-VGXJyDrRcnRG_us&v<@zdABh3@l@&)|>_MtQ zi=$k<1~1XNT$Sv>4+)m5-cA=yG2?l4jG_Tju?_bZT0f$ifNcc~=no`C%SsbDeQ#{i zD$Jx;5|L!P(oy*9o(KisR`3AO8acSlQ@|cd&&O_rj-0!O+Rnx`{8s+ehRS9 zLa?RYND1fs1hR7fFR1F`sq&D9UTt~fxkJ4CskHku?i=AoniAabPcguw1`Y#H29fz9 zk8X}m3djO3Q=5K6%Y~26^@j zF^_^2q`r^HD4LqI0&Munn$rF-`I%z^p)c#}D+e$7+DKhuz7d)rno%~%m%R|X_ullO z&6Fbma#gZ^%_1m->0CS+&{_*2C=z$*ip*L~ktm^i1%iWXcNU=)W z*2Y35OFk{d>$1~c-~YkW)ZQc^m8G^Ytg5f7y1CvzxyoCA5wU8_&sfb=m$k>*7lDhU z%b0o1hCh+w^^P3lb=(R~S8UB>{>x2lq77b-3M9?|M(s z17L~B`g62t+0)0M{+u9aPf=*Uett5hw*`HBrzCGSb}(1mPPH|s(tV)`V|)ueFVU6t z`XF?HLsLII11_Hwn35AR_)%|lu-Gb~u=05+l)JaXG@%=lQvZVS3U5k#6i@?0w$c;b z6V~$9uYXH=DY#>E9Z$9ZcfT(i5L;eUv$$=zE6cJPGu-XjkV?DGyM05ZRG`u2iY60E z62;As#O1wOFDKA+%$qa!QX_`oDwLG_w+ms{6@9Gs7ge!2=Y%Dd?|a}KPb3RS9TaAq zb%aKDMypiRbjp%ySI_Mz-E=FHsS--L&4wS(h0a>_r%HHzcf&ijroVF!p(BM--59)? zN$aD~<8$l6>V%QJ^79FVdZmhEO0tD%&Nh> z6Hir?Pjp?1$*y;!;sIN$tn{4ryZF}HLtB$)tYIitjL7}t7ahMvxD007lC{5YGJW6F z5b)X{m}Rx-3_kEMW};dpFnWKb-K1WC;X1*-E^OSJ*vQ!(=6}CaTQ=_2mYS6Z+WKZ) zH&Xfa&&p-wEI zesS^Ib>FdGinvb?^lVEUY5h6PI)&_3Uss*(P8F*vdnSF?)Av5|UG|z0U~GY%pQ2kk zegVx0tw89goA2Vs5rC>PK~Eb|8faEu!3o2-V%@?`z_U=!rANy4LnBNJ`y1^`+eARF zzlBoCVFjgUiSx5!A;^4vP@$_mcSGc3i4G3}r0xx(65D}b`WT&Lx&RYoMQ`rcs3fhX z;8n1`@odscdAEk)<@rT#6r(!D{C;A!pe*fTr=zJ{^4pe$k>}UM?^$ADs*iEXsozKJ zp$-al@yn&j1MMZ@T;<;D za86Ivs|Wr@RmDg4%qe`LYX(huSR-y8`C>~K2KZBqd99AD^Bbq<++s(pJqHa;Rb3_6 z!MLr<(vypO`NGFe)iyb?C1Hp^nv=~F3Xq5lbqdiNUNws9@A}VH{%3PQ>Tj5OW3U`;jh3KfQvc*^H)#EB)P2nC$cmqHv(Fo1bBV{p z#W7X9cL?8mm&b@eyJ$OUK2{=D<=4IGudsfP_gpM4DpW4nfAju9|9#?e$~Q`rZ1i_C zXCnE_*YwR;= zn*7DGK(+X?fqt`r;My~0XXX3WmR8Dd%bTm#lL_LJj#y${qi)Zm1~Cfq!ho+qvx;Fl zdR(11l009Rzc_k<3d$^Wnp>|S?pGro8%*OFh-4Ly-x5`8+BbhQUD(IKstf9V0rrYG zO-W^I+3_`sOmZt0_bc;e9l3;CmSI4PQoP8Z(+9hfQP0%4LZ88cHkWJPQwZ4| zn^yUPyaKl`Ufm`2U0lz9Jr2|=xZd*^o_E`WU>Z<-UE^G0qK`q``6Z)%$Ss6DaJzkU zWYG8OoQy$^RFRjgYjTk+e>rLl<8CHJrPT8SVWuc=flPjjQ0c5PRT z%VIw>J?nO_gFkn-Idi7TE zCGuw8PaeOxw7XC2`C7MLPwBMTxDH3Df9WaQ3%+SRw}gbO{L zfR3+Fy$u#h$Af~0RuUHtcYoUL>am_j&_4#~Pxd3scDS!Z5PSQ zu%uS25nRs|>M7Ok5p!L<9O}!L#wT@t66GI6FH@~(w@PkxmTv`!4`E#)^~|qERr}n8 zR@;KesP&|H-sIBnY2t{dw0>@0rl-c!l0xEuK~+sq(_ZK9tIheeirOV^cC<7|uYMnf znan&=u6bX@r)U4>uGk9gQ%)jC#G(jH9T;FeQ~c3gKkqM=+B{~gwI5Io^i3tSHLH0o zjb4`TmS30{o##3%SjDhABDh&rf8Q)JCv&vVS*Jrx=T`^gv-K$^J1y7sy^mOnh^?-Y z`j^UO?T#;3vev}rYJ4zP#I{{thGmW$rz%r?ab#L6@_dkx8JamfesXzPQsAxbps#Ss z(pO!XhZ!Z)nn;Ma9Y<2vmB6iXq=Im6MUtf8S`EJ|`;YEMg{X%*$LeJ* zwfHiF3TLJBUj`mhI|*mD{TS;z!N#T({OWeLyVD+%m#mle39D{Ta{A=58dtyME6rCj zA#dxcGIPd+Xn3E=_IUB_S`?k^YjAULZQu2t?A`56N%E^=9o90lk*>m&y{lAi(jqJO z>h18FoC&=4%N>7q&5l=vZT;_OHe(JDtinAdxnydE zJY7dR-mCV3tQxF4d_{3s&?B$yXN;C3{^9Em&ykfRcu{oYXF|v0KDoK9wlK1O`-*KN zWBC50D^SA%DF%5EkYKBBJQ;eM{=Nq@X+XUThDui`t;T7Y_C9nSP#;+N$+$&qKYCR#tUi<6l{0SsEi*JN zas1?VShZmDsF`bPPx?)TXfn64;^>*L=PO)K@*#A=g172gV7K&GHR7jEl3`lybMIrP z9`T!AbjxPusl`MJ2pxmSLO%b)MSVLznGD1`QY6X!4tTq2^|x$;Fvw6wYgJ%cfflt& zb&Px5KqPoWwkI4uf7ftM6I=V%*(9ZcA{eG$2~tfu;u14!`M8Q=KIbf#-D9=PK!&c7 z;5*I$DNl1n9S7JYFomcs^&44FpX1A_meiu_A!(fO5KH?8?KavpDi$op&9tjufV#M` zcsJ9gSX&hOyz8A&H_9E`++4YCw;YFE9NYs92DY|tUns78GX8B@P%`VBS-*MepxEl6 z_V#5%cW_e0#+$2X?TefEO;4sbKhMrDz67ynY|Wv6$SO~&J^AQZCSA+@eC~%DA&X;p z;32K&$W*RZr}@oH_HVY{POuSVVrLije22cs!_~}hC*@}LHy_N!iq$vXoL$aRRdChR z&@;dgf%x}GP|PzMG*lssBAX6xumSW20OWi8_xG3JFv|N?k)9l|0aUuEoN~T~;iQ|{ z>=!@oR}wxK(H6aPY1We3>EXF3niGBrUq8vLOdG`*5(}5cgtaF6UypfyScp+qI=GSa zrbrn_d;OrVX4DexB=6~9V6(>_WYBUe^yKj6(!dVkXX(PiE5;>uS>b3)`=pb4mJ`(q zbtbtDXMtjF*%Q*WJ#OktPa^z_{#I94J5mtbjawzNg^zzrx^|9) zODpM!uaNlF`wyb^?h-`a>t8Eg?7w+WCuxqXK#pehVzv`w*YeO))PsWa=K%WY7qv@_ z3%Dpv&L11aF6&n?g1XgzIWRCl7l}aRpYF=u832+06kBcX?vi_a zx3)RfuR5YPS&7gnFGu|(qAxPr(#FQ5(>ZUjerIB`T=HVkN(Ak4hY)F;fOxl3MI+It`NTM~826*%>T z;X2m5p2)WFw?Bjl^K95!=MAnkxYE1+p)&yI!$Aew`qgh%d&fUMUO+~-8nYfz>@q}w zto+djO0Jsc^ZRmhr>hn*>~c3YW3423(KDdv{9DMoz%{~^eC!FCw=f*1by-x&Po0)x zli;}aC+Aq&*n{{Uj;wGqy~4r}88=t@+l{HOzau+tiYpuQ^3Nj*S4P8rrfino_VV3O zelnXeC_Lf!AOBhIu)qnTpTm^kQqj`D70bgAUA;4emx*Mqs})AL%dXw^-#5EtWqD);TE5ydhcGu=no+B-xX zx``viSEOxcYNsJ3I>>y2MQ{IU6{w3{gT2ko4r1)}1biGlx2Ky0@2H{73MB=#cDxanB#F^pnY~ie3wrjD`J~EQA zLS_Xmb5+{uOJP#4-_4u3yvLUF+oHbcjH0Xg{1iuj8)k5c@cevQk@dfjXyeK)tb*Mo zj37B(uU;Fa1uEF^hy;I&3JSyuz77OlvQ0AEW-78dPb-O{_ zjq7=~nvnd6m~|$p|JeC@HFtVk^;KcqjvzR2e9I?&6TPGHtjBR3Vwe!aX0Lj(o0msb z#o>uH{Q8Jih-FxjSh335%Af8nbQ$INJ}jv7rSOlka3s!kx_;TZ{80tfx>5!$n>Uz2 zE_~d{_u)kejm7F&0nPEPf9xflWYPC@kRlK3tpg((*m{ocHj@Si#YFz-1>58(l^#8- zu2;>j*GYRw==Y3hMpv2g#23)_k0@Q;U~g8~%efGdBd3p-KUq)3I*3ACS%o*zLjpnR zCokGSuV3iV2|t!?bU8oH_dD%>d9OMAf2S>5N7c@5-+b|orZ{pwZ%w9x^kbpE82Puh zf&$t%DOaiag8piduBV$qFvsvj^tVdMZ*7L{J5w^@Oj`$U`?PXhqi!D#DH5|)c{`S+ zmj;&9AZsyv6`y+8{tsnu9aU8qzKa5)l#&ukOLs|kDGid+iXz?8tte8W(z!vpJEbJ0 zySt>j*}y(?;rF|D+;hkIfJ==B}{>)i=;< z!EF7IK=q2;f4RtK9A0vE^+~bdh#Z^YBMA69|CF)40F{DhrMNAI0BNqp-iSGMfvzZK zZ*D_a4^I3av)93h4_zw(zTf`*JR{V6exd;^cf0dPFUE0kqWyFPNb_BP%OeCU4fxW1 zC08#unJcrL7+b>ty(c%K28>lsW1IAd1rUbM8AIVf%@x+vJx%U!W|dl9sb}QTMXyUD zOEmF42biV)64JhM@|B$DFEo}jz|!kwIQr2|A9LiYF0RlUVr{G>OvIF7eAqM3K-NjU z-_-iNzCgkF{B(2$HB1f~odH|_BwyCLdRHe?hpd6S9Y1{W-4D=PQ*u1T`=yAgLMG5h zfW5|=z0J$d&LzqE4w~}F_oAI81*(6IkuBnb9%wR@#L$poABmMC7FUSHQ>UP!lCmbe z#S#NlpuQ+YC>Q48J0A|yoWpH58etGf{hQ?Rnr~lVoQ>ywv}vt zT?o1Vk4xVKF=#028>LFtPgo8(jP)O-d5poo4v(9_9&Fg9&a4EBjx$RcJ$m%MxVQk` zJ|AxKwfl4?!$^NN`Z8`_nB;wAJh4UEgv;ev(Y6MO{=9;?JzoOSy_9!rXmkF4&{Vx5 zzCfVztKWpgS(VJ!XIQTjK8xa_C;45qiO%(s3F(ob`oF|Dt~(V~6-eZ@lh42Y$%D%X zMfz<&r0NAgb(S8x%l2$B(mp%n&DCzbS0J+=p&nbB9nGJJt;^qe_Aezg7*0m5YJ}tq z2%ZiaS(3G#w>NxVgt6D{W9As3g)rv|bg()GDUf}<`N|(fZd^vv`QiQ%hLFN4Q6!}M z%+V#sc|%Z|qlq|n)ymCxs+U%8&xEC&R;WkUHm3fM!H-!qDkfjXBZ~VKCwpxJg*}R*OR#-4v0!+|MO{fs*!c?^Mvmmidq8WO6mq(=&x^j#~4%=}#rw^KVFJDHTca(HXqchNIn|Fr$uVpC4sDtF?nwz>|V4~#Bb-yGr zFE4Ma?@Xn91pP>zyOW+x#c1~Hmd}!-TeB>m5{%AbS>`j^6r+bV#`jd%;(7Z@4Kd$T zyrw%?zsYeq*>b#kGk^XPRIn?oCx?~`rrA!NMjA>fql!nC85vI1AH_7wTp=$a^{)+^1l@>)@IVyjn@p{#=HH17(;nb(8qUcBAbhvk=u;V0p35f?`2=^ep z*>eud%PMK9N^B=8Tz2FVEvBHI(Tx%Ij^We<3X(#Hmv=z#JFIDg-$!nEZDi>B+BK7R znm44t_&q908}K~YU|NqD4VoB~oZJyhe26`(qI>dlSXQD$>RYngQcxXs$@xI`3!28h zM`nqVx0zvri^Gi8-ca+@>T#>{ui@O@vQN^(UZ2Em@w00b&;t=f&5O>`**Un_4t{2_B!~iqj=cs+P7c`ePVyT~SX*=+ZKd%E$cXu3P{^^HZ@4iVW>+EM{7S^C_blA7jI_eGCfH*4 zHw+awyXFx8@@SMed-SC0#qb>*Wrjd z=KJ4VR9&ZwyT@x*MQV8oyD6D)N=-t_0Vec^|Fw~EP1@hkM+H9PFc=*w$TFC*+2Vf@5q2T2Ky4dGuDSu0kHu}dApvU<1 zC+9;ti0ZL+t|7WG@6q z2I!fgrhTxr$V}&L))Pjv8s%Js9ZJf^MqG*m98j?~Jv< zA%3F_?T@>enQ$^()vWcqs9m&vu2+#5*u4@>oG39f2Tb1ZZj{H4lCD$lMbQI4XQ1t( zgY0vWi!%r`F_pSLCpCbujmNIty?0N1MO^G9ZMiJth~$!?gb<$B*95=DiZ*3ewc%@= z;9s7{XehFHBz&*aUpk!MOR7*-6|$!}(up7@e8}wY_YqPivz_ccEu8lwfn)(2K93GeCC{FG)?NXI{^YuLWSXyY*}~gANrF z@aKV4YG1(1KcT0Js6bjAQJLT^De#`B=u55=GY#wV3y|(noPU`so8CTrB_Cmd03q%Y~ zP9Oig!#9usPX4J2DQ6Awj<8Nm2oHr{(jUT>Ex!xvu^>AOi~f_Clw zt&B&g>lH2c4*HY{inFtGd8rC2)s1$r)t-7{k*|3v1KH_b4WUiFgMLwHi$To~gpL)> zV~1{`7xdFu1WcX=f3Hogv&77%>>It(ujD4*HI`17ByIa>*u1URteI;Mkw4||er>#3 zVaH=Lee@CQ2)=i*Jt}|3GG*%3X8M7+C(mPUX?n#Q z7RK17aYu2vSAL?7lHAmrS@zKgt9Q~?n&Fr}y;*S~#48gA=w{#*D&m{TnH zKHL2mNid6MDj^-g3Wk`<>=&emAk>b$FG?<@I2d2`!CsJj4GRB6?*bnGzr*bt5T0b( zd*p4uHv}LOl!gW=RWiiz`Xi2Y&sy{A__;Wg4z49q=?$afoBGRjWHBB1-Ik0AT#u#Ni#jSH@GxEB$vZ zm9{aePNvVtXX3jjMm0%`#QR;y+4M@KX8uZ%QK^Zd{BqI#$S!KpEk_PCRlSbW=n15$ z==Vpr@PMX(AtTy2ped0n+X;BWr>*=nIQvh;{ZnD0GKgzD7VIMpDJFDHSd-r(!N!r@ z-vM5;_Zc>PRyy8+3;sN)L(>D4V~b=(KyOG0gW zj`9_{wbpcO`t0`?k&wI5pPCQ&J?5jrXE+%5YwZ-~&@UN+7xLN+eR<5kBxE3EWLVf^ zvodKnt~RrT5Yac#sl+?0D}7ICUd6;G>M$G*OG!;-&OfgW z<6nenWlj>A<}NH?A$h?*AilAgiRo*zDpfMS(#3yzd7m>yi_ut zrKpJ?JHNd1*tfajsdpLquxXs*hLwV1=im0I`iY4`6OCYgOHy~$siVH1gfJ~zpL!%u zBf}d?hqopg-pU=BC4J|NU3aSQV(=#WF8RJJ*8Pszx5V-21b_Pl(dO#Kbj)9Qn&_?n!OJS1*zGFTdHe>dq*H zV8-2eNq%Iwxr2&U^k7;+kkvXE=SsJNq3B{eipy)!N{@dZE*IlO&QbY3lu3M;udHk6 zWav8MQhA!_%uD<&so0V4+*)LPzROHJ%p72@8p}kTX1Hi6fkNT15&e8K*|82WMl#vwIJnsE`PYb{{5Pm`0WZ9m(OoJ zTs4fDqsfoQQ{i?1=&YJ?EqtTJ*366k7Vlfc`s5N)$laVEH#>ae@`Vd$-z8(C5V#iy zuV9hNxgxTJ4b8Uuj(X)NUjvRoawXGV#%2b5|LU6yQ0?cnp%1ccL|~Jd^aAH+lL(~W zzmKG6`deX+eTYO;)dN|9Z+eoEmMqJ1Njwb-(aTvqnU9=e1}aS|Uc>p6hYzy09AbV_ zSDl!cOeepwXsm;8G|1{|Q|g*N5*|v{;h%*Z1vO?h&H+N*+pO(Tt902;2~mUF4)47U z5mrO`N@;>|fuD5tzgK=`Z{^Fse;#vWZUSc?borZoGD7R^(F|i)ttZ`l>#b*wdyq%4 z-%w4p>y&V5D$)kC%u`tk3C_9nKOMUE;F~n}vnfiNGTw}b?;Gf#qHVlU815t@Q?HET zD;0&+yQ$xl$t|`;rx_Qi4oa*>Tf!;}Cg86YN#fPvcsp~K_M0;%9q$vK;$%=%DNTMW z$?$&FD76rM$h+ed@2v2!dspX_ka47#5_2n`42$E04-37ulttKVwYIkJ(wlldri4;)+uz=n1PJhbWur00i>P3_*+BD{1L0prUGFxN zr`p4n37F0sDD@@c%Cj(ebxRybUrUcEw#A;#m+4IC@Tv~EhsZnt!`A0SKIzxY-#PA9KlDYzo4*dO#ivzUMsh0;|O=CwW*}E@P8KR?tXN& z@R>QLd>*;s+@*%~i+*ibSJ<_gaYFvkx5u3-vZ7?8bRcHej1l)h^DjojFR3elSfGzzmfmbnT0EYzsG7u(1OANQ&@W_DE4uK8wIs9(_ZaDu|y{Rnz% zT?qD2D)vbW!w(_ODYBd#Z?B4V*Q*0o-ERPoI5sIYtnAEn9jzHzxUMo^t#BReN;*Ak zL)oP{VPM#=2=WCH$2(scQ@f@4Gs(*3C%rnAy@(udxA8bWYhf++e#?ZBdd(|~>E^rr zl&=|v`9`a%S1+1Q52JFf-76>F>7~m>nQj+nIg)Z!7!T5pK6|PLKbgv!b`xV1%en62 z(=ExkKD910JP^!#F#liZ^|i)0Ls`3?Rs3!JtIQdUDnshIli!AI?oTJ&$Oa#41T_%< zTntrla0zQ*rD<{!e$cv7<4VLMnj`P*z`@4+2Y!QEU!7WY@OMtOWki>d@S$hJj;66? zg4YM>E|~QT$vMR&_YVqFN`f(Thv;@?UTe$-;^cu0n=5!^Zz&%okkx4VcYB;sRQJ`*};j2Je>yv+YoaLWgVCfF@^)yMSe#O{3y2{b z?kAzzs&A>5-*_y|C#HGtQ@q<3V8-;BO|KxvxF0sySZw=k-_0u4;l? zf7Ri>;A!+o2m2jhrv^A(EDo~BJ{{tmhvaREyH|4i)*t`AIyv2If3{4?zSVl zviUqOYgusw70R4fO)Rgfa%L9&Sbe8^a`w2)r~Dw_LBHvD7)V#T<((sn$`k#@N519R zqdFtDV`Al&z0pPqkpth@1UOGU2oSm`61n2OOY}v}KQ( z-nkMS4HRH1VH)3nPf97fBv7S(7VRX~&kZ})YNy#g^wpz?bq_Y4dWUVyX*#Qbn;E-mOh#XhM-Jam zQl|>w=$@e*%;KN4`B5!IX5GGj~1mbe{68H6!_fQ<{5eI1fXyEmW1eFBxL{w1Ay+Ra3RR8 zlpi_)U?iwhHfJQTLT`QzK!~mjfhG2g^j+?SBD~)zIS*j$Ya8#489~&1xlzXYM%cjY zra7bh?FOdhd@U2N)f6M5)mP76u0mOP#dADfy-{@uL_187PC|(*#jku&aqu z5Oj&a%BIszG`tRWOsC#nu9`_44onr>+;GgIRmmG>^;=}>p2-;YS2AkA_e~DBi)CNy z;8~u3o_a=T={0R+{4*K7&#LJ1=fv`*WuuYxPY$v320PV}qoQF`Ch_wIb=Am=lc&?$ z7%`m&j+C#-LFs_?M==2%C>^AwV@*RF2Ead1;VKNw?qv6j-QyHdf~L=-){kl2AVv8C z70F9dpFNkw1;>H+O^pr$!nq%$a9aHTygn_PSF05gK*rbs0uw1o6jyH{gM0z6z`i}7 zdpl*w^r2WN(s!YU+J`n>If3yk98l%@^;|6?$<>urC}8<9cygiX!vo0f_{Bl>03Htc z6U-w%`CczK2Wf%(U@Tow0(ccis-WmVfc0WieI+&1f%KF*=vgeZm2V%xb3H+Ez|hjW zU(T|0*jCUI6#io+k@n8HKt_lHky#l)(`{RiDp|z9bXyb$LH#Fta}(_a0$}S*FwXV$ zXD+CCCkgkyDsF##<+kAZl~M%(-ypA*0Hj8wEZ7hFiDp1;8_9nkqi1h6BHCNvt2XH+ zXF=(5;i4;22vm!)9wmID6pI3Wgbd+*m(vo>)zt1nbwHH&9kqb>=%o}MHrU}Tsu!SW zxnC;${i~;M$NB4Hz?w(LvrNE;(H$8PMT0=_IVE%yZ6tXI_QEUhl65b$H!N9fT(T$;CrD$i(W_X z5r@<2ogTo;r|QE8n1#3T4WAd^N1?_AcvgVd#(>t01RzSl=6^vaG@%6qP$aK~rwD;j zAP6Z23)-dy=f2aUKUCM?edT>TrnGzcA+~Z19ykdOHoz zEuwZ@@8RNd#slC$+VGth9YkM$kH0Dl5B5dopEg+!nWd<(099!|v%F0f;n zr2G|V+!Kg9jeCG2*L#MC1I}v{V)_`}Pz*gH@uR07MSlpb76s#+;7tofnpW-J!ttId8qJ zpA;BGx_gg3PX#1GTB`mh*q{?`$oMBx{ighn#_b<_RNbtZb|Am1_OjTFF z3m}+<9}c5BOQcvr7@yqK=N-3qff1ZB9rJ8ET8KO8vc}swz)=?l0nm_f}+9BvO^#2MV97HVGXRX1~A>3#>V3=5fiVWpQRs3tT_bFF_3nHStt8zN< z%|mSoswYqp^+_=&)~(U}1`SKFlAWuWLTc`aLbguPoO(xI1|6(q_g`H9Cn;Tsq|=5H zC!?gXPBmuR9($)`jvf2imv7K9A)y}P#rjT_ly4uYnpdC&*jGOYXz|Kd)Aj}#VIJ&` z+pTeyhcI&ZrPivUU|x$sC({FSabTpUi36yt3_d`r2Pyq>mVAR`Y1p-Jd#(WrujVW| z1`hi>hAnZ#Wb@u{QRLd(GEaaI2Vk=){)IUICx!#Ccx;H<0GpNznfm_4Ifchs3}mwA zOxma`N0cSH@SZ>qc#9Y7YKfyKwy2@Ellr1uVSJ~wYaba^2}~Lfl!rD^-fB?Yqxo(HF?YUwr>$czaioqriii9~kH)fEs#k5;7bPW<7ne@sNSGOGDZ&-T7YN z;e82m6?UmxyY&S9Eq(hl&{~)VUtZRdNb!J(xlSh1%%H~s%>aPBNSmWKZFON6g50yx zAW1VrzX3W&8w~7;s%CA$1DE~huEW0&XVfRN2L@9+9KyP!Xk=)&<_?fL)ENQvk34MH zk|sG*tJYd08Lu40KNbLqLouG#0z@p5e*hRrXI#Ybd%Xy7yu|@8*Rcbnu_Eq)$|mXz z#8Ly;6BNI{M%q;8!^48kH&W%5kt$$Cyh)r2_4@qSZnoh49dl$9F8n58ACrKv!rN>1 z8<;Ew`@;wK$&dK%(YJBOP?70Hb5;ZOT;TKX86!$-9W$P;A!q$r7Tr`pIV-z-vquBK zPa-8t_Rp=B{gxp?AOYsM*zqGd@U9d-9C5|4s!hHBmJjqLRw9k`_KXC;8R6d`kr0Xf zwYjhO<tDF}0kHB3uM0o_6bVvT~)8~A0=z)6}i^VWpY**l7e5D|nfZfQwn(KT% zaL7Tgp70##Ie<&zAEbKp0Q^?_eX`lx+bspPV8#p8l}rNjm5-=y~fYzW66xY#g2I}>yGAe9ciRRWZG+mId^Lcc*m#|7Mi z=>I#n;631sBY|LDpO$?6#Jip*47_sq#!ymUmf_g!?~-*~;HaQCP@^2FTrsdAEV(P( zkKHZn+QvTRrjeoMDuARw>mbeP_F(Qq2NPk*f&!F>(1LqP9zF0`7Fr*qJYkUX=(X>J zo<-yArJVU*2e40XpiH^X?(E!0G5SYO8L9kg4g-<0s?h)nx_pvPLjmE(0l_V!9}rC^ z9k~0BPIe|Mu~l4LEG`$QivxvTzxBbJJ3uketAk)$&W*%R=!aYYG9*e<{C$RY3R3AX zVLPo%!l!rq-Ozy$Z)qJMMHhk;{RTCbQ2eh=Pgh{r91tRx6`<^rXwrp_cydIDaH1DL zzgK4q_Wy-4%v8x@8&Bi0y z@@1jvNis~Y|IEP0rkOW5sf6=Nz?OAO3Cz^p$1+DjIRKS<5=yIMz)FLJA`fA#f0Wr7 zZ3B67FjTq&c~*x0HU@jmg5;SsyrpChhoJn>>Zdw!QMO&8_#YJf*RY<{ha zJIKfft(;%A&vzv`4P1rtU!4_{0 zmUhJtmV-=3iVTWVxFEQ!8ach$I@t&GNecg8!Urg@gvMm)c#qz-hq;sT9GOJu00Z4c z0mMsI8jY>EDK@BWh^2dEwyynzJj1Qh**_{xjK}3|Dep(yS$XL&B zJ4hSQ0|A!--{t&>+j4I+7bXi8E*=zg0*WQD9`N@+Pr%U1d2Z-HVJJqU#$d-Zy+faqq1=ye8!3snGouJ1%Mgp8t6adS~AQD}C zs}NM*ph&A`GKJO$YJI|LtVE>*-m|5B0mP0?mMAyCh8v}}C%i8eA`;kR_um4RN)^li z#Q1tBOC(zo#+0b&H{kdb_7C!%C+U!O#Ovsk(|1zfg*W3L7LphRe+`az&^yUoX z{b-?gHe7GE$CZTQA9*T>EOx*95;QNKjhs%7XAgcR{bWhnEj_i(X~Ql!2kLo89q@_ty*Q@^xlT2g>(nK)xYI4~5P> zb<;#A&U?U0`6{L4fm7NkUK*%8zP(wv*|AMCEoW(j;@5IEFyExd1@k2)o}5T1FF?DM zgwAjR_ycf=-@v+m(Ax9)Lw)%_A7;gXaTNC5rvQTBf{bHHfY9TFlFl!LSO4hJX&wJT@>EImVBay-iVE4TSZ?hs~?CHJX?r&0DkCL z{;3+csP|Z~38K9NZo_UFC+z72?VI`jk<+%S?7`YCWDIl?W#IH5B~b{VK?>Lh(F(+A z3gMvxJ}bF5S|5RD1*LW}XlJDuZv#NRoiX9QSe(qOg=+ovqm77-_*wQ0TPs_eA6EN! zpH}%~yly!-| z8Ta(FU-$S-9&C4UxcYubp5cYE@&mS)#eVIfkECs;E1emo0+vwx6<`|ML3_;_3qJP& zS`GF_25_<=p5u3iP01n6^XdX?r@#S4iZ3!$3P7h3d!d2D+1OslQ8X z0f~(w23eQvtEqB}k&-v>w%Y&WX>ii&00fno|K4I8Rh~MGk8z*Gl}9=D=B{BG+;qey ztl-wz-`>_rf>@{hL+SFVX5aqS=oz0Jm(aDm(&{Xnl?1jV5T z6afn04DtcLcE_`Ys(GI@=kZ=8rThQ58A$aJz$pb&t8F+1iynleh6;_++n@a6A8vhQ zJWv7nh%pz7r#2)I(51i?_7_Ck1Nq!M2m)0o$fLoe$vIQtV~9-_{ELJfvU*UePb74< zbhB?KKHJBJiU{MGW_$=Gz{L$XKxOt;0ZexXOrJk~K)RRWcK=p#lc-lgB|3AdpREca ztVzDixj}g?MEL~War+Oz+5&2?I4VxNWhLf3BEZlF<&GKn*_z*&D#SB))y2pE?&d?(zhvdY z1y{?imS=meIg9aD|A>;zL97cgod4#SRHH-f-Hy9XdJ~c){a>qSN3+qnvy}Xvh)J zQb0}C-bYCe-9s>s)+6{zM^Dg6r~=*(A1Q8mJnDjW{ZYaQ_@$#I$^UjQ2l*uo+MWQY z@0$8bsT>cCV`cfSc10Z@^@|Vx$AWNBXJ}PWkqw}yBe`06b#vn*BW|n834Cmq-tW6( zII;k%LeXq4=oJ9O9{Tp1jiHRzX|0>%VO1mdvUk1aA#9Z?72`(2W~8IIXTQBS8oa?5 zV_SW#i>$^AM^dHH8g&If+&??&=4c$~iQ}=@ovBHl1uOvb`fM><Vw1a(Od4K&q}IjA&bu>lkNIsAyEXgZt1F>!yOC_XlYit0OVbqmwq!e{-R$Ki zZJ3DUO@gx%@WomWA3nRenWBiTCpuS>Cl~)ihLCr?dBSpWQ(R!+^XVip_kSo3I3g)j|O+=YP_&93fxin4S9IpHk&ON)<)2 z`CJ|x>x&`fFFQhH&+uabux%C~RbDVqwAA_1JPSoX_63vxx;tVQD-lf|o66M3Z!0iL zueXXC$AZ|H%QiQ}ut_*2c=h+O3%>WxFpyl{ zZAPs$#%5F}&@DqUgV_cfO+8QPpleMRx)BDv2#)Pdrfn3fe?yOo4w>KgUhk)_9ZtLn z@7>yjm<&e4lL_ieHqm^afB34kftH`rg+}Ver^=*m9%FLrn9xZdDlFC z-Nhn$J1Tm<@bQG%`L&ntM0RovCv2EXU;R~*Hy}1fWD}DD`XVOO%kX-3I3? ztORfmJq6q)jX}5;S37FY!zq*j0(QUqTnb%mq|w(G5Q$KFAHOh}AN}a9?ft{_GCJ+h zXF&Ih&ig*WH8)ugVQMlDee>KM+xipj&G90bNB8vM`+4uPlU-5xc9|~Mq4H=ym2KVX zyFwe~8gDUM1EckUWz5`uvD}*P&H?7~&ndmjuVS4GpPJD;|C(-8?_oOGb0&#yQ{E@E z<)(EywXsVyiwr4L z;&Zlx zb~2aRsY|c1W>g|{-fv6`I~p3XWl_{U4%zD#uQw>@o|f+N@FJ3mGG;~X$meF)sd^Vl zctWUWz+oOTlHyTnH`nkTz8v+j?&mk1*iLCX;_{_$d-D2qQ(tX1(BQONe|XLqaEvbK z_KpM@wRQO^`E|1fbi);Q5_B>rx+EEQ_BnZWO;bRNr;VEs9+sIgUyzLsU9#5?}FT<;gibKzW8& zeYGWPJ)ZnL_6uT0&4f#rgxr^z z@C>`xxl@Bxm=892VyM!BU2~P)3#zd2(_V_0a52a1)7orE#m>Bo0DLWkS%<5V^O#TP z`4UR)uJb$S;M%fWll11KYFY+SC(V6v4K_Jlil;fr&UmW6e5QtmGtnigNkUrM2mKz~ zmt`3pi5zy;Ks&#m(kMD;=^*$%&Dz4FMf}g34qB+Mw{UA9m@|Jio z?Wz`H{{W>O7uR!jIHoLfNZHd!RzH8PlcaIszkEpq_1)gDJEOfx(mNor<+UE>OI8`g zLIfvR9Bb(z|bIpjkTX1;9lEra68shj%QCBrQN+*fXX?-X7fG)-mbb)O{#6$KP5ya(P zKXfj0oHcE0E+5nubr6okOsULUYW2H66l8I3qlV{IEk>nmrmjhtc8*`Big%M`iM;;| zZ1eksRk5tP+9=oq-UhFm3y$GrQH{@mJm@6P7fP#&3WSg2mbV%bPbwT5epCN*O6wRs zQ@=bd+ZO9;23NTf)im@M^>Rw*T4Xnl$vxJ`q=h8>}TG(Tx1_;kt<-vN(n!Sj59 zhh-P^WS5)t)CPnV@w;LCb1B!S)BQM-Uw9#-dJc2`C7GA_>?@gky~oMnbaviergFlw zI!7f@w~X<8-Let=8|M%rwrpUlZ_FBSQShACzv&8Qz|-N0qc1N~s#u~2hcXl`06yob z>#jk+UIe}J`bHKk)93o~V$4||poKOIf$h1$FrdD-h5 z8ZJ446E{jWz3;Vk)&bgrHDZi|(Dtw7TfW*bVIOw**&ggfBr0umQpof3L zWPjT?HKFD7+}qXU_ob-twG!bqpQUe49wY?S`4q&&R+|cj@o<=@k-UCkgZsukz-;9^mzu} z;wcTA3RPM?X4l?e_=@^4dv~RdJ%ik7P$zr!i1!a#64taPj*lJ8Qo~p`XRO!NP{F|Z zc~Mc3z(}_iWk~&)Q+t2Zl=dEvj+Z5AgZ86?TrjVCb#nG3civyn>F>F7P1sH1^MmU_ z!F!@Kk`7xt6UHaHc+Kuq`u?q}LH9dKY}4SU~VLq-soaEBiw@ z``Z#Z-Q5?=IQ0$eOB<_}_uGuN1X*+hR&0bu#WIRmRr1a<)NTmjmc7f%w%fTW2-<{|C?inNd<9|$7W9XxRK`z zkX>ZEAN@W4LlZu*zu4m4_`wS|Z#f%8$Ufu^xCnMP*ojX7)w7<=#I)FI&0ikg^>+0l zp@~stT1@bKy9DM|oyQ_>_VpmlO8@+TR2?SfZT{yQwNLe-PnGSg`BH1pjdvWHJ5BIj zcpumLqwm*wX7%L0F>x)t;|6KkG~O)Qx&ofCogCZbiN7bk2P5{p*4K133)V#b-E$3} zGkb2LZ6~deZ03A4$`U{FLn$5-%6Ed;`teFloie|v#_LDC=27+azAH(fxDP&T0tiv@ zNOJ${fk2Ja(&E$?GLQ!0P0+?Ef03~CioImcFwx=PU@39yZcmY(Wqu(t?;JwRahkhv zc2ZeuJY$>@Xj?Y%602gks&+-74Nh`rcC^Z^clAzD(_7&~)xuDgR<`0{GJdSK4F2J* zZY{6UWFv}aeO-nQf5hZnyx!S*aoV{X8|wb?TJ;t?I_jBD$USo-O!HaiTI9JRm?8tAljk&ptm`O1K!w@v+$BIP)R~~%YWRJh&3fxBeTm-QdW>q6@ z8Sf-}MAL+E83EkIv2BqxUw}19aQBf7 zW$t+I>c?c_${f4B_&l2uzu*(uso||H!SE+$QRsoS=UrFl7t#G`%+~jU0%~KkJGu>y zMLt1RfH+~zVCQ9i3|F_)z!`?;Qa1y+Sp*=nw{&z_AcaB-8P!)~6owkji_P)t!qPZY zYSa+jxRjqc_P-SVA-MO!%>fErPztYy%_Q7^TFaX{+*r<6K?)8nv{;o9@l;fhX=!;4@JYpEg* znKFdko>3E9O)g6Kp69RO4YZ{6VT?m=>YuNhs8p?M~OEK3*E z6`Gm$^{aczmy~D4G|W6+RH$G2?_D=>;@~&-Hwlo0;x+R-zE zIc+9-Zq9jJh16&!<<^hS!K=2qlIm+W1z?OUKl!=LPDk@2tV6!8pn(F}uV3pwLxP77 zYlqIA(@z^b+j)`~#cmG50}r_ivqV;z2<;xhX5-JVOZq~pRSQ0awlEybh8P|HG?8n| zW4pPun;o*qB3=WZ3ROnA!BKNvOa3)e(Lc3F;ro}rGRNxpE;1^v#4AG7-CyTYu7^z@ zZQ=7Xi_v^`x}S(FQ2a0Mx@nDHxs1plfuCh_G|fLhDPWweL)zl&B|U5SM8O~{%>NYr zHo9TVVMfABJ>w8nV>9S?bzvR;(ELCrOx~tb!^J_=y{n|}ZzCF)uJsIoZfEmgr^U&( zhS_ECA4VC0NT12^XDS}?N`-C9;o%H5lP(?J$$aek{n0TF(>8-n^hNi>I@nb6$7a2_ ze`Q%I4RA_;kNZ+18!Ncn!JnR0q729f*8 zdiu_oz8$&zOY>_7dA=Ys<6b6bS`4P5*75|})pT-Y^~{Dh_If65?IZoeo0D5Zs@Ez~ zVXuunbCzL|4;8|Uw|aj_42wewh6dy}fr-gGyp z>s+fBZ7S6dX>^Cfu~jV`2OGu5+^6c+i?>F~jyhAGuEu?xUsXvRLwzSp zY?UY`?zC%gWH{bA`H<|9%#zHkgM}7cK}B9ud5Vb3?6L1!UlPq(c?vG8d5hN4>NIXw)h;?-RoD;O}?Y(wV>pD{j!OpddM#iJ_(WklQT!Wn z0XCpNAuD=Xl>A;%QE}01P6CAKu7^%5fAAQte~2adTyEI9yVq#{afho_B}e|+Yk<%A z-$9e|t+bopt+buh+t8nVsV-l))XP9%70z48zv~v=@W-1PY+zAgO$pBK@K)GmJ9fI% zR58}Kp3g$us?m%~zT(%HdD!g}d29AIh^r$4v1@m#e5Tk6_RmC2n*~vcsQ23&F9lu4 zuVJqIUeg@4b~7$>Po(hgK-M%x8r-(a-ac}%?Mg&ZqpKpR%8pw9#wPCF!mF*kBwxi3 zzjk%UtB?AJa5=Z9e{1c8Awr>f_zi|tX@cl>2ff&|cCBHtiYJn?!4?A&L% zVmOoP`x|4N%hpB2QBcc+md7G#JBfbu*hObSEp|^FCTcFyS|BCVK|P0Ov+(eS`jp!8 zgu2^0M$%#@jL^B*p#ts-Dc25i*N4kBM}5Mx7i%zMufItCvKZ$a!!En2fpTQ*n2`cu z=#)GadGU{Wzg}t3b zdzRDIm~-CIv5P&*CCBcC?cd+ZqIaGl`3s>is0c711m+Nlk#E+*TLcA~uL%-x?Z-Rr%294K)bj&@#)Bp4l7u0=;PztM<@a1gD)28HVL8+q6g zcbmgl)^=?qVR*iZJF64z$q{VfQMl~Va~SbZLpXy|V#m-azsRad)d_LY+1lio0=TFZ zKlW6A(PG1SVnWlY{pl_!;pWq2X5rJ7Hu~>BOQRAoGzlLNo5`pDMfEvMLri+x?h(%v zT`*ub`ZHiBe5B`wgrE+DaOk%7mD1I0_AfzTH}En-%I{f-sT}?k3<&wV{hsI85Bzjh z+_dMZ$GT}20)LGsW)LAGVe(si2mTfK6Z_k1wZlFq1y-XXJ09RJRzJ{YmRPdS_%O0p;dk}D zp{~v#^k6B3!-Wz)(GlEFMpbN%JQvhx{$h!jI>p!T=CTT*<2v_fjc*W@3lGw0J=S9Q z6NKTs4_9>S>kx*ZLPC^y0k5O7LWgkSUc~GY0^t@V^MM}Y@a<VRd#6__x@zZqCr?K_|N?+B9PK?_YdBNASuc${x{yD;@wrm;8iK&0O977bG}{F*_dJ6 z)%{02zu#9X&YpGrxGwUnqoF|)%b_E(b2ICW*-4o|+K!pGW#%=jONI9am|gkkWzAi# z2}kpVvn?Y2TKilR-qhw0Y_8G!sF`|`XGQmS1q6}=^PT7!@r}edUAV___>!D)<-uYw z#=A@87vI9$>da1}+bi~_*_IegLgo*n0}n(ed=$nU-^{*aJAwbN_P#T!$>!Tv5nn-6 z6i`G#q<0XIUV`+15CH{|rXm7TG$5UT*dX+dNQu&s-g{9g0@8bzgeFy50ttjWNI37g z_nr^;^Id<}G7E+$Q}*7o%Wv;_rdu5O-Rmb?veo@xZ};~-f9!L=-+yFVF7S=o=#>ka z+Jjgn;!Zra+^e!?rp%`UCmXd{vyibuO)AxGel_c3#)=Z)l*yYt{=oi!?J_+Ru@s;8*u&Xt%6`44deR!zz$}Vf}x2-)vrg>I3Zf}SXH-@lE!_^2Qp(~y0NgA#1p9D`53OH%SD(q(G=J*56 zTIV3$bJ<>(_>4oEqWf`!{_7(9>rx?l#d)ZWLN@}-BaB&sj6LBUvwJw<8(V2PYQF|n z<+wjnU;a2=?co;rH${)yU(7TYc35k(cYSSY`5Ip%60<|M{50guROM}$yq9d|wz3Mu zDnk2Xc-?*`c@5+~fAk)kn!VpGO=UOPa49)#ZO-DG804BYjF^z56Z5aLw zNn1+!{+7`c;p;lF8U#0fDf&@uJ2O10KZo-fy7Y;s+pe-Llji+0_o0DmLLWYhV0aT^&@+k)^~m0KA^?X|5` z_jX#B3Ma9>Qb72y^8Cu$KDo``O(RslSTG5U!)8`ptB6VxC;zWDx1E;9mRlQz@Nk7o zL?_|8`4Il~w%6XK^00GsYsu0q@be2c`>Bb2F_jM>G6qoP8K+i!xH@t)5Zp7CyiC~8 z5lJpLO-I=AF&oVnL1H*O)`@BbGwv7aG9t?I3KS7$62hg~weCMm{h@oi#(o8UScL2_ zd+zSr+U@dF%s)pWRc>1cob|axMTh(zN1RCT$n)mq=U+&_^jnM}bQMvNe}`G9hrZb` zwpG!KH8!*#XQ$E zLeLv}Voo&^fLFY}C8jS=Owzt)WNz*8*)S&}h9`Fh5#a$t(-kZf3kesw4>JZgb)cbT zskZY!er%@(8ut_kw9ty%Qe3}rA6VR#xNnmxmgB^b;r;Q;+2kjlzX9y9`|!>l=hN`0 zY%XL1kh+t6yW>-$mKDtRJM*XRhS`9PM^mzZoJlpi>Pgoh{GpziMxON+gb21OquRJc zQoJhE^PG4Gm9DxOeb%xbDQ0er26{^+@mSYTVO|4uK}sD>Z-`W1=cs0&R_o6RuWK?A zahV9w(k|#^-g&mboGTtrc2)LlZ4LK4gZ=T+UUe};jbp)0iFH6r4A+JT7&StKFr(Ct z+^~V$u9cir_1txKO(=W*+8_m6Dsip7&EWJ;BVe%MAO8Xb%5(DDq=jwU1M-EF-`wr=2# zfRB%D4Q-b#_3K8SZ=D%z3KYl^`rb~szo{k7V&`gB!D;m6wyFHE*SyWO?^q*3M6-5j zo>L0-XPHz?Pv+GGBMD-a_?Fq&#`w?SzO~7}er-F&60vOZG8t;C%tfsCn9hX{w}9Pb z$mjk@cOCxW$rL*aAH>@l1bQTClpiCN*}}`d&KuibbFUc5sf`Mm2iQd~i4<5|cPv2-Qf-NrRg82tUuqIE?{)3r zlG6^ZTPmB7h!|--ZoO7zQVh_wl zA{VdN=qzi>{`Q~j9I2JQ23a|gUvlf$f`Q4e+Lzd;F6}b%GuIqfzly)KUdDf-+UQnv z7tt}N|E=`;n@}lz%LS z7UYv8<&id;>m!5w-AOHh8IVkDzv_FMTs7ns(bAofm|Dt+O~|5UW1T(0Z9Ggls3~~% z%Vlp?XMM&O!ipPV45JH+=0iCZDpW>BBdgwH|8F6-{e72+d&%cwCz6NFr>?!F%Oe;8-B)`AziA$5M;6`u;`-RkYk1l`s!<` z1lD_iAYc#B5p3rLxxcsSPb49k3sLlwbsPvCX1yFB3<9m34vIikbcaV{SFL4OmeeGruY)nGiwX?%GucCZ}&u7EmuzwGzM zQf|kPI%96IXwKntT~~dWN4G>*5>Kf!VxKB9P#k_&?$R$AFL&9BGY0^`f!`ZWmOaXD zC@J*@1hPO0`jf`!SUbh-a{TZ+6fad?88|svq|I}Y8u$-F4Hf7)iF3&BQyp?_YJVB> zP|#;^6>p7eJXO2owINlvP)jUU>%90zbxG zI-|4W#Jt#H|2CZU%%5yjVFv;4!F+BcADdhgWxL!+lI>3$VA3zWP7it>V?Q_Od3v5S z>2Vs@z1*x_T3<8xb3(AqxOu7k7o&vqvZHRekDK<3W&D1cPipuDC^sd3KNBoH>a;HN z;hcOZt5<~b9Lz>w=ob%rJ6;@)IiCU^($>D3aIn`7rhbtk-@b@U78K3W_>NH=jSz9n z`&CDS%A-Ttp1-dIMl5;1eKUwOLmSrm%Jz@>xGy?1zBM1U|2agB0J4;~*Ty#lGvfFrd90J+ChZ~)Ad*d6V5y;@boKaXM_nH>+YUMh&>QuL3B2lhp%K{&&qI{}w z*v+Ooz!*X}BR-Qhp@P5x{v)ho^%{r;rKIs+uo_5(4#0Gn#HP+1O)$dJ=Sjjq7J8Qq2YvyQHHnVhTD=_!J6vw^e zwV3pUuUsT?n6Cqz*h68`)YQ@W){oF^e>v7A3f8bZ{8ohV@GIh?I zj?|MsX?@3NN%ZeB1sh~J;i~(Q7tjtiPH*61Xi~T&V~?$<{^2N>u7fR4rNI7QQsNIp zwTSoU&4ZZaU;zR&569C~BD%YCLnW8y8@lZdzc--q>z+53c4gvC)woO|VR2=t-7@E% zgcbb}@jmc=qO$aq*tpAk`iOThfX)7HQO=#Z0Td>SycTKQDn87_B8l=mrs__>1vIVi zXq6;6K9Ds2i3``^lA*|_A4u!E1D2t8=f#b5z#FNljKkH{-#NK`;$?f z2Oy=Db|$$a#jyiMkL?V4kP5!o^O}vsiwu?*72fC*~2_K5fd~*G;Jv+F5#v6~Z$90&yNfezzty~F3;3j!HxeQ{^NCzOBFCFjblu5hFTF6Eu{M7-)qAlF%ED{B-qb?avO@e#PneRRD{Q zW?DQvY`@9&A{J0E1pJ@FRre=UgmImV(*bYHJp41iBG`%oieZ;Yy>xMa9b9?SXnBbb z`Q5pLs^)MDe<~r1*=t!GPxQrB#?{VAjF$#k7MG%4o;jKh0Kw!AHQO;VC0dYgvJ$P` zUJI8ruI9NwVtNTM0$6u<2FAmoYiNGmGx=B9A0rb35|HVm9EJq?lWYCO(YDU6=i8rl zS|R#k0UkFwi;|QbH=X?pJ0FN(*A*ZEIkBguk9e8I4yea_3s=rS<13eF{Qgh^0pOYU z2l>559==V8scuhz^CG`<1|t9FkHEA8E&%g?38PMT#Q$Kx|MA$YQfy?yBO}ZAUM#Qb zzfdsBIRC8Koe^?50P(41|GcW&nT;C<*HY@I+Fh~EJBDkVu9%|kYLPcS($-jt15fPd zNx4H4a?Gdy0w9f3JYnoK^1FPept;g9pG-1Pd|f89TD{@#U|@zQ87(kZTZ?`3==2M- z+le@XvGoUKxIZUWOk%1*4tBvhg39sY5z&Di1>)}q>pcxD}ay{98a%xW&E*Xo=I+7tWVBY&* zLq8oE>ijPU^Wt?8x(YONo3+Iq#Km$@TQ#miZ!+B>i3z*`-eS0wWe?u@i(p#$5@=&Z z$-45{y{x}$CBU86A~R;EVd@0n+^HC1w2a;Nfps!9(nzlq+78U9t>XMSl04rO1V#YU zQ1cuZrq%Qh$Fa~~m5aHK1}0ATgi`>KdlS5gJB^m-p3+}_@Y>HKV?s`(n2?mHVdxK0 z(4-1JbQlyqjgwV>$r|5KS|v3+TmviMpLl6?0m^vJu8HA!`pF3*wybKfX2q%Tvi$x| zXH~O2w%~v2c)^01cmY=*c50m26hCUe_}({)UXo-O0lvx!;u8y6MVF(I;eg^-xK|2B+V?DcQLn9@lltV+y;;p(r?}Xldx+-D+66T4f9cN?S zQ7RKVotKIRwYW!8{jGz;HT!W~0(+aV*Nf^tXNa z)IS0X2W7^%zI6G1CWu$HB-a+F-9{iwaQdFiR-Yc0_@ya|o2)b(?#h=%KzkzHQp)m4d}Ip?E#e4U{gw>U~~7S2m!yoXDEFM2|5Om+_fv z?(?7T%v}g^ee1D{VO}k@&%y1vWANfDwFxt|e$BdbF0wP-C5|B1T*8oCcPLe741h{J z>$1WibtqFx7r~y*5Us0+qn`ZTuV%#c7#QB*b>e27ySaBRDC7>4l>aQDOZ@fq4I6k> zHMkd&Z)T=QP9l@qK@0zJWR^*ZQS))ba$AYVYE7Agc3$0_+$ztP5qJ4+2VF1Iy#)iV zMU68R;?}gIWi`uo=8HBEPnwNscE`~&o4gul-6He6Vab5Y=3^fkP;=KJFK>(~Ih&fT zhsQLGGe~)1u1?;M4S>$-8qTR<>CHUX)Mcd#KsDA20l_%w=ryg8r7?9w8Tp_rTuzSY zO6{K(LhF6@;Gc-evi;4^2B_T$T+QY*@jEpZkr7Bu(k#+~zvw%@O$rdsLAut$x)3CF zpztXX=*6vgZZXqWT<7+9g|3oCklR=O=5!ghh|%KJt>A?1H^MHOx=y`Y36@#mZvD~g z>Wy>pHt-xc;l}!GW40YHyfg!Cbg9NLMXicu)2Vx^BUYz0nm@GJ zVXyVHha~x!%*Vzw=D;1#H-i(Tqifq}s(1EzkP!XgMrTln+gRaB42anIO57ui zTc;=J$43*CTboA(%Er)wHJd%#eGTTQyG|&=*jzY_I8u@6I5{GnNkI3ah?_t5R%x>j za+tV=TI>{B6S8{+_bcoK@m^{j#_V2YDz(z%Ge=5L@4%}}r)|qWQX$!ZcIMXQcMKkS zYKQSrT)z%u-kD5o?F6qohwCQFhug?c=~Qr9%w*=fdi?S^rP}&KPs2GPJr`ZN*^=&- zcqzbK=<%5=o$~K4;s&p^^}ed?&y-lPmUim8Ur6sgnkZ|?4XbH^@kS@m2U!w03+9Gx z8Al;I@e48Wupk{mzTZw9?ph%(B~ZAerT3k4H71cY67PJwY~0z1BOAsV+@J?>Ti2N# zDNP^b^I4~MnpMU8M%XM45x$wm;Cu2u+Y0Uc%KBC}hHLr$`AjVQD2`D(X^Q_mIQ?8> zXtMi47s>qu(ULgOV^24dHZxvbm1|@mKY!jQSuG@(olBQ8@h;g{PDS0kzCbfe?in#S ziw9J@DK0u{@N?5lF%v={ExNZdfbmVk?Vx)Eww62AFw^5a&`Zz{3*woH-gwP`giYA} zVD`c6cXYwDzTtEc&dX1=Qc*n;Rl~Kzq$0=jF${%=AWu+k?QBwcNX-*z#PHieZjh`^eV9+SXvoq!3Sh&Q;dtqoOFL@8@D z7EQ)PYp`0;DC5F6L$zq6-L-L<FL#OV~Vk_UTZ>+6vHo_Z1=GngqX}G zVvE!7X$*_V-zpAWm5fx?mu^z&L~U9Rc&OI~=&qQI??!k7xI_!+7CXpR5-hk1oU1a0 zysz0aRUnx7iAV1S+kzW;Y1oL@>tfbi{9DCPn~ovweaZfhor7n-6%z&q)+h~dz~VSh z%g`ZuZtNtas$k^gTw8oiXU<)dnPgt#$3f)@9u}+gSIatp;Qr{%!pbl&VqwjL>3adsYh^tRLSfgi*7pPdR>}cx zyK{fqM<=d6U74B|VJC^O`ssZ8h$mCiXUJsX84P!^6v||o=_h7dGRCW1)KXi&x5r6T z(bsJ^&QN+!)AFt?6?NvuZ?q_b(%H(^itRW1g0i?2E;%0gds2xr)&-nRH&%GEHMnS* zZELJyTf^&4pia$U+HbV%IjU-(NKm2{w;lR5L72c*X606Xnb-MZGOGXO_JxPvzGlO1 z-DH|LXJM6)HaYg0#*Lq%osr-3ccx=Ox>`Psd0b0kd>UMYT=>T@vDTwEZsP8aNj~gM z6O694w@}1>JR3S*vHe3$9-X)mKWBA$E+TpAw}RXw%*f&c!HH4nH8H8ub6EFD$bK_5 zF<<885(^HJh&xwiz1LJr9kX^6kolh6&C(e6Nj|j~M`OPUG~3uzSeMiPWdm> z!(2nUj-Tgr_e$(iTdh1IutiM;Gs~BkotVD!6Z+&N+vHL(C3a1)-c~&qcaxB7D3&Uak}-H&Q-O*ugj|-D9>xRA$QZQp;R!yCc6?XDrh| z>#G#&&~zB*C|I7{M1DV^cJ8I~nU{=5LBt0YAhw3Z+;KE7G9%)Z9?p|xrucB4Sy
    |o{ck0- z*NzNuGUiw$NR>LFy?z($vc39j574&0Sj(|Pd-@VqzaK#OJN{Iu{`AlwT`S*32yi#0QJY07YVvJ-qGg9ARniVx+Q#6%;+1G4 zd#LFK@)}p(5Mx>U`o|W|ribQu^1!O~R=#GZGD(PyoY4f=m-wGP_JbxaCNmdP93~)V zX9eqa9dN*L?X?>#?*`>vEj5cKs}2$quF3k2nZ%J&T?_wXwCA1%nYWD!k1PgMp0cd) zeOZnz$&|NV%&jB3TXqtZlYn3CY+ww3R|QTP7gzRf(3`q`uEGS%_-_!sqy5dL^79zC zkgB*=gW5t}&aJNxc9hc(!a|)s(>&4U(%?;%CXWLx3!ka4^)GQKXl6Fu{n|FJ)O@ZA zC%c+pkHPDC$m)_p-(zGZ^dQnULEXZKkV1kYu-`?qF3+fu_v|Qlu->pePACqO-02P=ol5_oS5Z%#roweDfD7o!gs z&3mia^MhEh0CR}W$NWKhC+ZFd>PDrY)q8Fh>9&tk=P+DUGc8JTLLL>XGh1nEQf~g; z_Rc?@<`iwwLp`R|?X-gf7bJ$!gHg}o;6_SPM4vbzs(pt^E^1$qCnWJbM#gjz6yte^ z8f3e?{O7kg$y)sjyH#}5_bD%mORM7o0wM#3E7S83`xI4N=Ei9vQbdN?0DOXGw$r>ONu z!ky{YyqRqYcKbh>KiQ#J@V+Ga^L|1q)y5gB43V%Mg~@|l1>uYYM4raC{c5k`sgt&7 zrSKD(s{wHqR#=&x=^|TjWZe!*$kd{_27j#_eic&+)U05vNG`Rz7ubuQ@f3wE1JHlO zwRs=iO%Dt<@9`RtEcKLCwN3}7qr7NLN!L_@Aom*U6`d}F5zwB6l67%j!xXyP)`{w@ zI1%Lh9_ZsTl@l9~5;h5y)aZ2w+05XQP_L2Id&O&b?9M6qP&v5k2d;Aj@shw(e|MhG z1aO3w#v?_plQiRp0}!9?x*+T#fCQub$doRIat))|$BPI3zC`R_Q#f@4`?+X0d?N`$EM2DN_d1d+ysqVgH90`*MT&U7gW6CPYRu+o#bZfT7+N1pYr~ zRetOEboWnmi^Si07)rLzs~Wc*x(e5nJI(?9V)VRbE5$fQ8Y_dl`6y{E#kP8QP*OvR z035xtmca7(O#>R_T0~d?qtchX!!^R2pkf|cXBvbibQ5(2`^~Nj-Qx_FTX8*PnVos} z1{d6}s&|EDIbaEsPLY~OOfl_`b*H)0HoAH}a5R5XlgDWq*Np#RN$}vXz!1zZBf^8R z7-=Wl9^H{Ln9MU~eCZr@S8Y2t<`8)LRCj=9)H$~OAO$1W53ur)cSTzOV;m?;nm%@t z{p+Web5$|ypE*x2muzQp5qNvxlcSt1F_PK~(5${iK8&;jKV^ukQN!h%rU_5{ql0io z14T1-J={?@dUn)J^pfv>`%xOjOB-S_w)WSKtJ+zHZEy+CRrnSQWu{X!4sf=X?3P!2 zh&I-gYfpES6ihQNqjX#LJz$X2f_k_vA=RUT+Bd)E&xt&WeTV%Y@I*<*Z=Ry6hw@d) z50H|!BDKk*jKwt&Va)0zYlHS11HUf&ynUs$YyJ9Rk0~lcLryr_{jttREe;mlJl2E( z)LpV?4-)$BzdR!hO?aYVY#L&jV+j;A^3j)0Y&=3yFzQAju*36`oXZE>e^j{1PT(2w z=4`Pm1~0P0qgc_Y7suYE=NQgw)0w(3a(~ToXW}dTuV^_f4jI&GpmgiXF!+J~4S*!{ z$0i?^R8X9O0T+>BB3|_I;SvB|T-n~W`YI{})iBk17J1{?aW_QL-b#t2HB0k4dnCII zy5ZsY^c$KHu1ej!SCNG>E-Q@CA6DMFlLg#PvrKRL`rA2|oR>#ZV>nm93d{_6f}JCj zvk)43XP@N3zBXFuQYIJ@o)SKzEQ9nc1Fsli13uJAPRQay`^s8|;IJ+YLmAcB7$~KD zChha^`r_qANB-t@nHYCDYevg~im=L}z5?X^f@K2s8_p8Ocumf$cvZ>5^C+Z+AK-dx zlqm#K!;K;k(uzjte5K%Tv8>x}1z4-tN$l$6>h#=?Ft!6c@4zo#0q=)!X=`h1p;+hR zgdEgCa~X0=OKH0O_O(4gm5fSlDff_yL2~gVm1>o!@FI@5Mggx243?obZ2g)eH ze_7)58kv=vzJV`3<@@ueQr4KVtn?Y*G&W1d#5h#9%2S^gDm@h*>!_XA(XioG3X<+}@F>*lJs(zZ`q#NZBdK06SzoSN$^6 z01S82X!8(h_PIm>l8{uKK7vQ7A^{)>_X3cLM}olyR^)y&*5)SNe^1A}y!UHo)f;3V z{-jjxob>(`9UWaMeFlG&^gRKJqa|NKpZbw{xdX8Av~OR1r4CL*{8ujKJwM2RDJ3GB zNEyj8^6g-heLH>45yl<`@XaEJ@1NB`Z<{i&)<~pCuE&9V0j!nQS7;N)h~LjAG58pn z-9_LI*(EUG-;vC*)Ga_I#cq~1D=MRgCEeTnr?HH7%}{M;L%-S6)j*hrNEAmtDV zBK@={$N_{iM8y6d7Ur)N;AJRC2FFm| z5hQ^&_bys(Z|y|Hj{*zpk1<6~h#=dJ6o-Ml$DXCx#yH58-jxVM8>USxQ--H>AS9?5 zwm-Wl_yXeA9rBM5F$;+6{Rc0OVC5s3IYuT&a~(dE-13AQ6o`QnvMloa9clqv$%A4H zB0R@GQO0$@^MZ!H{n#kak29blgD0t;ZbrV6yd^5wVRh9l?ve2@JU-*X=6ccQFt%Wq zUoD(K!9!DpgQwU>Ga}8u7-ld9i7};1pECUEkR5A1AIAQeJYy&z3c^Mi-PPHR7$}pa zjNqkB2xxnss4+%aP|{uwI~8(`Usiu_HtiOY*^}A0xTj-es{-zAZOXL33PF zo5&NnmE00}qwtw*M0sYzIb@jZ6#995QjXPFvRwms^_ch+1`^Z4JIUPZwOjFwb}#_b zWX?87bv}_LFhny`S;jo284xXC{>cpz%x5 zp6M~u_le1bc~>{%*o@l9Fs;XUaF%p`8uPwrr`9$rJAJPUVW_$&M39s(-hV!>>0PqB zjVKQv%))**3lpX@k0LHgw$=#f74yhJgAHVpiFZft#AW9c+A(O3(Mxrp8|-fLuP$1e z=d7rqRhNH0FdfO;OlV7Y4ufrKI=6Q^Ic}&f z^w*yKr0Hn(5ex4JxLT@hrB;IwZI zGLQ0=$k(u#9-=ZqQU_CXHELSf5TX&O9$+l9ON9* zNoFFVBSXp6OI^((q3q=7LN==XQm;*^l~Hw1tuNCv5q58<-xW(f3%%VJ7#T6G7Tpkv zD1l5x)Y;=w)yCfEnMR*ozPs3HF+)6^%T~}PJ;lArqun=+=r4V(HEcho+MmK`UVdU- z*gz{4Wp>eur@zG+Lf zjd1O)TjxT0(7GundJ*BgG!0M8{XMwP^Lxh1KjpnyMhN{TW~2X|yFknY>FGb>Lj*y_WmZt+bXHXcmd9OcJN(^*Y$r0wjYHheL9U~3)cck2km z@&;BEYbnS;N!qo=w-qj-XZC-SRW4!PZQFh}rJpE5#JYxMJ@6Vc4L_f0U<(ygUU+-$ zN%Mp%<{rVkl@4aRt=nUmc@fLW!){=UPLqs@(&FMPHZ7%2#;##%ZpLhhQOsceG!tL% za;?B5BT$2jm3iQfFAI=Q&EXT+cI;se616tV3X;@+$?wF~1j-40{DbynGi%vxr=_ow z+0k9r68zD7eI|mwXD)x<6dD`}GgVi-&fiE_EEK{Fa|so)D~M%&f|)jK6*`D(8w*AH z5onn;uYC3HFp3HO=rz2pV`)m{WlMn#;3w0cyE-S&@b_Fts|G`Qi+QNavJv|Fb99ZvoW0e6j3y^TvJ` z01+?z2tN-;t6UmVqZR9!T<4)Q^c!s4Taf2t8?;MG!%Jz>JrNcwU9q@OcRTp*B6KYD z%K9mbit>Hp7rJ!=rOsWr6Y2|d*<72!V}m^FxTp_3(WN|2HUrECg25M?E;J5wj)e5p z>hFuCtybOEmraE4`F9RJQ+j;$>K=cAwxPUATKrMTB+0+|bOMl3<_7fTUtjf8ZT$vu z`_v+R8ar9m_4Aj;0~$2%^c2S9L-S$$etO z5`)m;G&F0QwWEVu@WaiDjh&b^J~+T@xJnW03#t!vqv%4MoK!q5h{o{oeKwua9>^$k_Q4Iia*mdC(9sEW37uHwVj{|jClCPx4O literal 0 HcmV?d00001 diff --git a/doc/source/install/openstack_role.png b/doc/source/install/openstack_role.png new file mode 100644 index 0000000000000000000000000000000000000000..59b9e999a6d1a1f630570836aede326061c5dbfe GIT binary patch literal 149316 zcmeFZby$>Z*FH>xv{I6TjVK+1z>or>(%m4<(A@|Ks7M$f9nvu{N_Q$bguu`xGQ`j@ z^uX_SKYKs#-oK6C-{0|lug5`n#}(^Z>s;qLuWNXvs;odk%0!BTgF~VC=%G3e4q-9y zbx1-4Tv3;fp2EQ)r?HilRaKOgWmk1|va+?e#KC#=Do%%3^L-b6T2nUl6@7L*b(zga zZ@Gmu3%<9=Kf-%)h3{Ja*k!erib#^N0WE66QM}G)Z6)_f_m8FILbVL_FS8qvezVh&LW-0E7}>8nDC+ZCI*N9Dw*IMC>3#e@mO9i z442~uc6Je~^!(8=RQR3lx#*dRTwD*03y#z$cICy_A`hrRL^z{Af0FLvu$Rj=3>McL zUSfN$YeaGhPpbWvh*7xEEfKA7SdQ@L!TT=*aM(4*?af4S9$%U1^;Z^slXY|6=FZ#> zA+?V5+Se{qw`+}5pFNcO$X~{FQ77k}QQyHj@o6ZSe;HrN(v6SQ>^$>Va}@ULJJ*Xo z=Xx;yssnikSZqF)>6}cnm=`5CQWqjS-U((biA{0 z_@Pk}FR#8;s4eQH{7$R=1N#+2XD(9-AI8ru{3iF`-g!*UJm0){MBFK2YM92Z+(!{w z)Wf5{VUUu{daJ6Va_39vCrgiSlrMM8<^Ah+=J4K+;QD>&O1pY19Ub6ktXceqIXZ;5 z8Sm`*rFM8u2qQbqme_If@qEyiw^GeSGO?82&GIFT+mB+SN#?*rw9h54{lFc}PfvIM zK)jQAhR)aCufVnaKImVdK<`cJLRk{!H_zsM_4K$I^}Ozf*FL z!`B^(uV>wMo8UAki!2O6Wp!u%`qi#^Kk-%#nLG2B=ZP|tor&8+?|kQQEm}F1nOEse zWM^@2vkBf=QxEE;1cw!;I!jK`>_kkt5S38aZ*B9^m`pLFPy`i{{IoHyC&oSLOYh-l ze4rZRF7+jh@4(#t`n`3sN<1DAmaOZ3O;hgfW=h4FzO1pE|DCqF#0?-@n+gh zxP})E6B=sin<2#M8s`=ho#D^67H+NHGtxPv#8KJU+HyvF8C{)ni};8q<#TLg?aAUe z%7|MA-U8t$X3SE6oSyZcD=x>S3oxWy&)*Tu4&v8<1{c9>mhAA}h`6n!i9 zi0XSpy|hHu)yHxpYDUAftP!=J8f`e3B9-Na1`};?^2ORfq ziN3^rdfD8RD1id?`ZN5|ea0tkFUIhS3GTHOe)cUUJnI{UI~IdZUeLIR_O(0AO^|N^_NMUN7|o{>PbU;a zrB|pHd|c4dE%c%Pl&T~DW9S4Wr8PwSu82T0CEiloAwt=f7S>IJTvi}BqVoJJ$YLh3>a9yzPV zBawyuBaiHI?Qrc3E3Q|h+U1R~4e`}pPL*34_&DG-*#F&cb2Fx)tuFn5wT%n z!C}E~&~|7qGJjYkcM#EhHTMm*ys){@0{g=4h25*ue2wPb=Gt!xKtn>PP?W9`NGZ+g zl~pK;0@1Jk6ov!453A5u(x)nSad@(>I8jz!WtTp(0V+sRYr=-Gd+wtJ;7 zs%NFEt#ABU29^Rl)HWyqRc#7T$Hh zSiJ(Lz+!jm?CXp?57tK3?o2RDRE;&cz}@FWG(`=)vL6tMa=Y7m`a@>y#GOu&+f}K{ z%Ja%SDHAD;qIDuQT99XrNXDcAHqlD5r?FNpomh*Zxcii zl~Od)ePV9CR6xO)u*~mr)-I4aJdiN(dO*P}-mDcK?=0y5?C^N&il6<_alsJtqT zI**-8hjV9&xfuR9ZSPT>P&~~>S*W}E?`-^R>B4;vR^CYcA(-RcjVQ3vuvTdhQS#H@ zFE<~G%PY$RJ-(KGGkZ4}%`@cn5=Mo+X}5-TCTgyRV&YO}%@PDK7tQp8sU;~H+AxYN)-ck6Z)_iLVWOB5m>&z*Pl^OA zeEqU*IVr3r{0i*xF(MA^Bs^1Zwfph)o7c>7fpHXCqFVO~CG6)NS0+}=Dgu)?D!)6V zSM65K&Rj0f`aDryYk$YZ`m8Un?uS?E`3PooVtS(PiC56{jj*2hX3*GA=aI|DZ{Nvh zyC^S6X~=7Mxv9G?4Zc#C)EC!}K6N}fNz)ERcAGPGXoK=tctj=qKh3qzbxpm;=6zRz zI3~|{Zo2VLTA92HKE#P?lF?Wn7-Q#RUd^4#$4#Poo#JG#%jsq;BOelVD>s;Nq!?P}aT zc08NT^o!h}+4v}~>Z7`^x@3ltyni-0us#(~+|iVWRqM_2i+K=ZB5@gV^xa_Z!+!Z% z;#t`Q5RnEacqb)|38$Y9EH`>G&wfa1ofrFBo~>?~NCZ_--K5+DB9kq!YzKy%glv9m zf-=8h@;{VV!VW~di_r%`C29gi&mYY1mSyUhJTmES()V{bqCZ|gbMD)6<~HbzHzwpA z8z>jVnOVbq9#4RS<*b+F=$bKBm$ulHyNh>zsb|Jo-GwNF&GX$oF#0AAv8jL&f%ypz zdyQ|$G0yOyOL>>v2h=iqq+A%!)z*&r`Knr+SbdyBP8K${34ZZY4~Cn5#LwRgSUO3( zqMpYF)}<17tbp(&dn9o^aP0R+91zcdD&dmzBRw}99Qs?ozHk-QZ*Jk>;HKMZ>bmPH zJry-~a^NR{<^#_sK4@8~A#4Y~fu6QaQHzb^A!XaD07 zcPQk#u97OdtdpxHyAZb^H}7=`Qg(KBaaRi~QT2!Nf4&a<4|3hc-Q8J~hsVpyi`z?p z+sW0Mhws6I2Ryv|JpBAzz!O|%Ths@4x?y)6(1ae|mCs`*T{r1bKel;o;-v<@s;hz^mfFu8OMKdRyA-KD2eP zbaVsykl+&(;urtpg@4`ppDzFQs@~sT72+59`>TJu_2;YNJin&!w<-O{UVmH#3?@M; z&hua6OOT4_`T(^a4vXzW6;0qL-mk?0ew+f|IR5h!_#M|FyJcDH0}hT9j^aaUO>f+_ zY2td?k?C*S_^+Qor#DdXK4J2sav?EvtH4e zUy&7T56S0vO-LZ!96~}bwFT*K*6Zs_9j|lS+-Pv~3S8O^o~{!U5Zg^1=kHHBQpwtM z5D+Awmcqfk@Y||h#^qoSNGYx{+rwe0*8ugSFq~iyPHlXFU>rOW_U9LT zlQKmJUv&MCYq6xbcxcUHA}hS#_5Zv%$>BGn2L?xnM}REQrRXL2P3O;Xf`uk87}S3T zB*jTUQmpZrDdEzE*7;>NJZR05GlcS3A?& zzg-`a;&Gzi-c-LPWt#eWr1|NEX8$!qYTYu5-`>oYdEP8aR}`H1+qDH2p82;o|9`yj z8F&QR#kyvVhd;=MKFN^{mD>)rKPk5#=P$DvygpRx0WJH+1ii`DM0B&N`@qg2ljR>BLS%pE!sth_GJ`rUA$`lH&7+RwL-Q!YtoU?qE-Lz%E zBtqO+8T1)Qi>3***s_Dp&#VwaqwZ93nE2+};`*m7ziEOa<~et4zB>EYa%UT(r@HI8 zlUWK>LyZA`2iAH{mx_D3d)hBw?2^GdB*nU)TkFq`(OPyo9Suoju=g)Ul8{!(4pvFR z4ou+jcM{5`M(WD9!+(PZ!>P?7gk-&YD??G;p|nH#RgMny+=jZn87;iiwH^YjVxISX z4<{%enHm{Le^dAkCatQm+u9hd6(c=#VMo{D+@9;aEWg2~SoL7_2qfmRc4Vb7KH6)> zr{lhHef9mKYPBqRtGVW2d7aPqEj6y~7HH%p2_bcFfb|-*XBz{Qc2Nkom=)z=IIzIr zfa5(rhl%no=1PgP1Oe;lK}kfAlK%VKN>9tI5FwT;1DUc>c^dCWONv}RRjP4SmAui{ zsH#o!GQP-t*bVSTW@&nnij6n}Lk_j`!rr;AgDZ?Ip?caYLWQxLZC#gohEJ!0K_geu&cMOv4j?#BtiA zDdAdRd6HcgzQvuIJ__LBDUG>cEe5s-?U?mPt#!Guz7)j@Z?2ksivgKb!ZKKbF~lH$ zQ%Z@$InG@cjJ{&H2hFgEfK{(^5K-0oM1=r85jlrX%uLf6rWExllDpdvrhfXlU*aU8 ztY4z8uY^VawNG?XI37CR!M%s8O0a6i7y6YLDavfx`r?wjPO^tmj6>aerB$j2(uK+; z@`9no@&_k{BQdPnk#Czt=f8cp`9hh$I5{fcxE~6ui1#_&?ogVnbeJ~1Rl}uY3QL5J zf64LL8&rrY5uH#qFf?sg$xTig%u>+tH^l;xPYgk|S^9-jFk6L_Gk#aqxwjkj?>;SL z=Gv$v1=9nMZ3lZM3bl$dKDZ3t_+TrAhkgN9lv%#nRy+QZ^evsP4?Y(4mm@|3P1LDX>vqTAd=brDIp8lB)`_{STSV|G z$gGQ_?XE^{Le7(q@{l;cwG_8LzLU+SppDIN_d6vK9@=)gG`iqySe%dCzhg+7bXc(1 zGZ4e7pT`f^-bBzSi)2pl4qy8=&ADvkh7ejvD;ZpHx#q4(0d08R&H2J0^XRdxX=?V* zuTwQ{QSnBLlAn@%^@@5G3r1y!p>Yd*BTm}R%#P5~2G7IH*C)x%wLi4)lD+tsnd!Jy zC*c#@!`3#b-OZwiTg2~(kdC(){3ZwRpj;n)ug^NSQJ^;JHlq!DiP^lH_co@*b{?=j zEd|Jr?%3`gewu)f& zHSwDE z$aR>ktg*EnON00&0a>I*@lGR`r75e1Z?N))GH< znQu}jLMF{tuY}>F^DRAlvNGI<^VLvD11Fda^~kOPdK(B!@JAz0J#LZ`{%6a$tT@$M ztz?Ljx($8atUIzB{y%Jv1w~@B172+11)bFz;D!F|N95(yJ zHt(Zug_zOKP=O@GNj9)=-Pz6hAqRq_P49(+l9ffd+iv`a4*w%6hM!8MikJjpJ+jbn zI)K!mtwcT7t`Z=J@>KdgqG66xf!O0jD@2kOSXVKv-@+6Yy9sa>Fi+#*v+M2gTn0%1 z@`FiET5re`BERl^izf4F8=^XgrPYvn6p<;lEgS`PGq2RfH9Y$B&yZEl$X zhKSY9x3X074(FNDhcy2VE2)fjyj5>qdtp?o9hgkCu5oOjRw0J1hLqOv)OFg58?JqX z7v|3iQtPIt8?(F%k0$f7G<<3b+tLl*>wJ5|TqhEwi`FE@7?;}WqK&hkeB?%*oos@+ z^vj*879AJjyV7H(nrB!shZ5(fyA2BOrnRV?jq<;!+Cbb;Pu-R?Bj~4##2qPV^+7J} zms^GikS>qKqyMo^e|T%Qj%oLYyH;KAZV_7ZvS`-@M_z)Bm}xIsVRssVfllS}!?~B_ zrsZSU92{#`azo+^xB4RgdwX}wCw)WR^>8j){d+v< z={m0sUw_9G*BM~O$gj(!7;YHlL!)7>EO5$k(Ehl!G<)7Vb~PoTLOOzDp%+@j@-3Nm zHyP}ND2j!YMkuv4`X_#|mdAP&kdoT|+T~BPr$>7rp8Y^WXBLLJZ%R689iSk8`%(`K zJu-Y!{7upu0UK0n($4;(ifvRc^uASKOFX#tY4m8c>CCcE;$$sfE#x4nlBdC>oO}8! zZH2ff$z@7-MTv^Y{EK?H5xTpH*YYRXHv0ukR7#KFhYWfZaBl&%OpgG7GN)@2At?)U zt4RAbelaH32P+N-1WWwarVto<0=HX$hHLo%c+WAG@2-YMZZ_F{`bk>-pu_nH`Bcyd-)EgtW1rOP zk*UfK5k+IoqAXj5Pfi0*)*1BVTT2%VNHiNX`^9&AJEb}A!nL0l)C$hS$4s_r+?J9a zxSK z-F@?iuXzOVn|rUaj3+~hD7u+bnnhzmCI77<1P_yO^sB$p)w0U(8s8!<8sV$ZD$@R( z%F`1So7k*BU+9YpPqrhxN8m71?{iqd8PpN>jtTw_56wwt+(!IE=BG?B2EbXm51B80 z6&vRJL>9;e%vxV?swfbaXc6gSng#mB&1thElJTtLmYk&n-Zkqhlv0YV!MAQkJ^r`` z&>$@~DoN@phoj(w*L3%Ke-h{W{TA9Ew3}={zkp|?3Q8EA2r`2yXx)E`Q%ol<4 z6doP3Zp4$2c8sakBSxJ`FsWoo#JymTM(xZOibXyyHX?45zb1M6s4|g zh$Nv%CRn$*i=~_JO}VqIBXdQ9vuF(~O~ zdTAcT=Vv(*AbpBLF0dvp!KxH{TyTYqV%%F*Q| zNQ@lAbp6em5%I#C-vA775FRrP4at({%90-lqYpY=0Chw&FX_E+-Uk?q&<%y?UOyPz z&5)Y4arsj?vxI-O4-neBxjKcCQ~OtRJZ~EM)oGSKF*V{;%nki_0%KP#mg2-mZ-t>= z8zsoJ$%SRlvsHwB^Xf0Ksqqbp=uxj$9i+&c{Bv|u{^2~=SHepX4#QAeUryOfA59gH zu+Q_kGtGrH#ksC5Kt;?4Fjd4zASD&(Ol-qLA&SY8i2N!woA`lOiuGrYPi{0{ygs2a zg@Z9KFW=}r++M(bPzaF_C zUFV%eMu)xhmGF&PTkZDiOX}0`iiRj(E+scKtV)AjC{j4fSdiu z_*fx{b&ffih$k4_D``AloxA1!xh+W$<2q|s1bYhjlR5rLS)XVRi)-gKp+x@{7m{MK zovoV}We!FHei~hsB*Cr}?;o$UNZupQ1I~_?5I~O5wiHz}1LBY_cP==I2}3XbJ6$|9$LHmaPp#7;vO%Pzv0Bf}0p5Jp)`HD%uIQF3Vyg!FLI730 z(c;S1SBdukRR=`CP-3bQe#Fg&bgd$NhP1up?y`LQs1URa@zcUO)0|`!1$fGIqZZ8# zB%u9ZVm96ezk@z3Obh@zmeY$QeN|$-0`lVb$eY~A6@aq8CKvOXa!~*(^Uim zk~c4yR>n)NlJ(btNl4jNB31ehJG%~}{=GMn7M9`y$aL;Uu8qtXGYxVeel zpX~2V6$67CH*6Yd;RnluH#WDoK%8DXyXoiRr|@yWp?X1QTM<*0N4*053A5M@FRgI& zOoQK+Xqc~FhY_!4&XZx;I=wP0pP^guzJ)oe3*`J@QR+vQrZ60z7|1>^DK2zK^Sw7v zTy=#7xHOnzm^g)8P>I&4aE!^puOgVi$X!i!b}VSB$)vAYzcfoK00_{1lP}1Fg^-1E zV|O*OVtitK9YA}^|8ly@6|~uKfC}q7lX7an#6MWg&pM58J!orzr#-r&3q4d|^JzD9 z?YdQ)JX+2=1Jp%Ufcn$#H-+8)CI4p1MkE0eUu}eI)jX!e%pO+kVFRS>5#)@|jQ!Rt>gT6GsiGeIODS&AE2E_*&YAV4jmMlzZMrT)O-)5a2KyP@{DcPZN+xprzHvFnC^x6V<#tKe(^Z~$E4V9o6V{I|NKP_Z$C|-W^sPSl&PwiBGZ%QJ6nifvTfa;=0GVj_k6A* z%Q&>--@FE32^`uPpMa7&tu<^_l_8Q|$gltp4VYA-8g4!eJ&Xx!^FFV6wh10?)|dWb z$EPro$?Z9gomZB`?qwW&K2O-V0~7;&?XPGMU3b!p>niNR9;f7YG|3zRqSo-Lkl>f@ zNDljGJKt1UDv010u%Ea0$Sj+njq0Jxab79*`>LhEn63FX1TZA#w7hp%){(XeB|}Ux0L$%mp=RKEM-P|Ux{ca< z9P8AL)yA5p2LM+iz{F#1mjj34XS^|zXWrB|k7L3%8T2c+;4VSnpUo3oL*_9MEAHw! zyaVe`le6b%2hvfy)o<;XbfLbY9Q^iUyfj*?H#RVP)sKCrJSQD;on{-uh{)13sBDZL z?afDkN<7E5aI8~Yd6d_8nsZs5q*s3F}}{ zy08ToI{L*jqFES+jg>`}M)r`tED&G!3bdNgM~@5{G#nt;geHZ;!4!NfNP9%GgV6ZvXcz0Yy+ zuct3aUx0471h47xtD0-a7>gt4e{D|DayJ3VL|R3iFwPE;#>u!{tLMX+lK?S)_-nDI z4+SDwrWMFu$+=3&OqjwE7+zmKKfTuN*i2#zK(GcS0{c#q)zqeu^jvs6jGimsZ|Qza z74zbrfK>)pWR1GJo3$Ry`1SXuOV0YAtXH*YtbQ&%S^=bOXM;&h!OQ>vj3n_UIhMom z_w}w2?AGco-A=Z@!s2`>Eq^|UbRJXcvx{nJ!F7n3U>h}bLog}79lS7>b-AQW8zw^h zNPb@WzzOe5<_Nk-onwD4RSYPx_bF5?jHH4wck@!7-BwOXP*8h)JnuvE^P+SBI1UN0 z`8!Tc{qnNvN+jf9s_gN!6(0I3FWZGtoDmP~~&umAY{>Ustq{VN5vaB@~KE1 zS;P?zhmiqgT>U4gnNA|kg$g1t#%y4WH_521)TC}n{Z%l9L3f$1>-5#a~^( zi~((~i;$%FeKMbX7-_qa#h}U5FL~{c;uwnT4N5q<5c=6w@qn%MEA0HQx~VyC!`U9V zW;2R7n_AxIeR5G=e=gPiYcIS~CL#5hM%5JJzN{)>`#qaeU@%Vwx-xITq+kBoIpjbp6-Rd4;%z&0G4OP2;9qf0a5*B)Y!X#HJ-v8>3_F*`RM;a z%m2gYEFJ^<w9h5#ZZr*ocp8BP>e7O$9f1D7)_4;q(|1Lwu%}3oj)MXJD z!#s(F8~`!*ph=4lFCvn_HFa*FiygZ5wckOE9st9A-APsteg`oh$N;4EC*JK)$l>A^#s;D>lnK}QSGDo49Y-KMzl1{1$B+mG?D zzFC|8Gv0!K0C5=U_KX9#%&tCHYi*U#)UdDk(@p#(4qn_n_`!2Z)ZVmk{uH4_*FO*` z*cV_KC3n1y0iwgy*u3R3)cZt$Uig&I>=LkRCXW{?Y~xTO0#}LpW_k1f>e2=XTpI?q zU;KuYbfB!3ol50;EF-S3zO|KBS;pT1LsbC)0S^L155Q^{N%E~QcMp_cDTL1yfiAT1 zz?2(Ll+gN@`1$_<5pezI&At_c2fvukCxT@oBU&2#Yropp`*-`qzuJG^`vhp;@)8SV zm11`=#OWS1QU^KD(4*#v z0&vxqC6V+WR{!hx?M+(Xuqny6IT8zCqN4cs66O9r|6oxdag@+l857_Va4G`heTTf= z6Cz~`IN_6pPR)OK*k5O2*)xIuWO#`b*ztkkFzR;Qzy9~0hhG7=Q+ao4*EqRgEUU? z<6mB$UD5T0U6l#2>30!+x_@}{pT~yPZ@?|aNd>S&PQAxyVF74qg}fc=p7;#{q}k

    0jUa&$?vb0_Fv0#Cb$6tjb%d5;U^^ zS?l0cV46kXXC#1DmDOC_-z4N=@hVE zY&M=1@$|&c!|24XA`IK`_^6^TKn>+SA&UwN^aUzhgIO$QHzB0`nu++y_iH4&1AA@ zVTDoMD@MEivq^^`kE}El-=AK`@fqmnm2@I-*z+ch?#?u+TMdjxq-BHS#bi9X)SP>& zs!Gof<~~Ap6HZxr_wTM}MwYKRy~&njwKqBJ^eZ`V!x>Db%-Z%ogd=mTD!orbS|N5WBC|j)c zSncS`G~=-E@Yf^Zm8tLU<)zXFTn@W+X?HJ7|v`V=XO zw0i7vpj8{?y--G@{AC$G(@%{_t!{*HMcLhT-wHm4%~7#2Hla<@)FABKBq}O~UaOAd zufTI_3$Pw-d1XE$2nyU++W1^PHONR_*fri7n6}F5HB<&lQZP4^OAI%iBoH`&Ya%#@tjUL-y-j;s07=N9iV0rH( zKri{KT`yS~B7jCx*te-eh_zG9VU*%cx6vSV{;TJWp~mG+p^}xvp%^h+m=IH9+WtvA z{&k^2!v}rfeF4*S=G%)BJeay8gOh}$Ad?`CHht1RKP}SD6I;`q#9I9W%(>oNu%uOF z)#VGZ1+RCC8G}JQhF1t;y;vF&SH>HiRR*3dtv;Bn+x~J#f<#6MtQ@epTziy{o|Hi^ zeQIiOvd9tMiu6tPuf;j~62?-V}4$ zoeRk8-()^J%RE9`;Gwh8cN~H-3?@7R5mWp6GY6I)J=vjQqinUW1`q6Nl1q%7Yp@bJ z8b^6WR5R66Jd6(l9CjmSj+2hux4+M2AVS-Sl2DhZcI=p%YL{RSiNEf%!P6cDRI$C? zZ4V`RcZ?mKzJFPgp(XJ8VYV<1;6j7pIx+LM#Wzs{R3GISc6A}9@AnBzj)Z`dyv z8_P_%H1zHJb5@SN^_&vin9>Ty^oht4?k%AGj=l+@fl zC^C@zIG$GCuZ+F#eASAF+HC;2LSEcCxI;FO$$HOlS(#hMIc2=_glQ}(MRuXcf+GOddM}nK1FV(Amm?3WY@I^qU^%Ezi`@ z0q6A!rsQ$5_=Kj(&7TVEiDdWc`)vu4q$Rc_v{wd%JvZ7mlU{-R)okGPOZ!#Uha5Ia zvKn_`kvgp967I=;s28bqIcAlleW8J}=&Ex4!#4#XH+KHo0sruneJEH+%z1nAEoiN1 z?OXNxN;u@k>&W6Zv7j8%Vr;B71P_3+p>lXtvWJ5u^1kcLkB`-65RK_$MofB4$k4kA z%_gj#%I*+i0{oJ&etJJYuyx;Y+%~JSfD`x1wiaq+uz7*k)j)w|&UZp_o4gvd&-UA3$kj^kUcEoC9O@|DDP9=8Oz z$DMAJ1uVBdB|yrRfyZ!%Sage)_OP;GDqjr-_~CyY#CV+M`I+4k|&IlgFZKvx<8R+ zS2leCIkEq7Qq*RLCDP`Z1MS;dPfTuldQkiLcIU}Yelm%^%{31P=@va4-9)e0zZRLz z@9=o;cUbN9MwB35gS=uX>w`D>AEY^Whtxt8Zbi%;xUhq-He%rKyxjarc79^x^eZAx zQl)Unt7g3=aRg=14g-xGRx-+Q=l2$BJ%|@`2=?$W%MV#_vv<%BK&@a`h)b9~48=+x zzNB=tw?nt0_YUjM#qPv79R#jNg*(;ea-*tSpPuhA?;fJle^M-*XQtW_E{3fNgP!>r zczn(gD2H*wZ;`Y4yEwMY^dvRr7P)eyB0t{(uX+1Qo^E2pv}C*~4thWyRCXCq@%3Sl zWB=K}LYVI(sBVR`X~T}1`Aj*cS*5|L@I+u!d&qN9Xo`maqfRCp@~dd42_nveaiAft zfn>NgIK47d$I3r(l&th43|b4F^~}xTDx<0DJM}@<8$a`Fy=zL+KV)7SaK%UUX2P}2 zCYW!rd1Xaj7JAreB>Uq>v4u9~`_)BdPVT(lVN{5oIR`Q3kt=}{Z1t|h+JY&S=0-2H za9cm!noR_+>k+Q90BZMag2xQO6=tA)&(rwi4emXApVOvIJ~(jY>ntwfAw3ahSd7 zG``5DtCUHcds;f!iKWeup~FhNG7nyK)3TWLCplY(O$&N!lw~&XKI0=^Swy8DX+3?# z9{@(*ryz{{Qa2E8CaTd}alG%jJ3nieHeAjMvy?pdfX^ewBYW?7`VI2ioAxx)(JVJO zR7D-_W4XON!Uw!l>%S|uL~fa+JqPWtUUD!WW%;7`laX4K-E}I}=_j`Z45QWk#otFD zevT@LakcDJB}C6k^-tRNC|D{NAL=>x!)vab#x-S~Fq@Tg6#r2kF~iV%z2Jx-UkC3y zdSVjLadug;2X+$$!+rSycMW7mHr{xa)(RA+G6$t;GV2JrF6e(xMXh>L)72L1T4s(8 z^Glp>gs@wvdFL@r&olLujqg)! zj&NvW$Lb>N**=gAoNP{PgNPWex}#^q>huGeP6v;L_Z&b34f7)f8QXU!%g`~#RI(Hr zMigkWputIdYrktygM;MC$&U^s4^A@+D{?DX7&a%x4&Mwcc~uuUo?-g^ZH&KE6kT&@ZEW||2G)`5QTsn) z&V8+e)qEHkhU;9W*R3lfHn=Li;0E*u-T_m>Kb!T9Ir}sDS?wIB4Fv+_omC%Co&mW! z;oR>h@L_c-ZMK^OUj12joEYox?9J2-80(I(KDu=K%c>+{3i0H@*Uw^}7!W1p>uP>n zwQIlHSX78y z12LvigU4^SpaK$`jjw)QPFzllJW)!&Av$o`!y!zDA!}IrKp<$MtXuEyS$UbSN&Ch@ zIXSYbiCAHR-!H5i!%;5r6WN6-y48bUnSY?Y^^ogqXj9s#Q?E&1G zWC%d+|F?_`2g)QPrGUla_2!z?xN4>2VWai186x_Us(eu9263gQ?`iXdkDbB+zu!dt z>V%!bsM9fST}%K)L%e>b&-ZA!uR!5ws=ktEQqiQefBQx(n%0=mo76p~O3b3E%q*$Z zwCQd(V}SoTduQW*B4bTaWSyB{Y13rcN45L~aGYCp^Tu)$?U05u>q+Rz8pM~V0Yau{ zljMjMv4+rBm=~rhU@Wv=(mSr+!UXv_+0<4Vv3`Im8X38=-WM?~if)OnKutU8e-rSW zZC^J>Z@L z-&}HdaMz1vnrC@^IXtSCVst|L>Q0_HxM=?kZkWo__)uQVk@V=49H?!`izsHy_e)q3 zh#K9TWcQ-zWA22AfrPZQZS_$POx4GY`Hd~HC?{Nbb*^9K(Bu6X>|jH2|GLX6c7quC z^{K)<_HJcFjiZrRyYZOEOW2db0q&B!W2&kpWDx~5u3Y+{&B=+?Kq@7O=Sh=QBL2F2 z5gATJAG)wN3MEz|1jN9MtV0y$z(F+|)d-&Q>Z%ovRd-qeOX$;zm!2D%!sb?cKX3jV zvFfAvIjwdV$4Gb3;o8}Oo`|O3SiCR^h+y|N-PDNmcfJlxKN$>@HtbiXI)pZ{q4HnJ z=$)H1iAi;0D5gt_-yP&V4%q5wTcFB58Pct{M)^s-N>bPDkAYj#M%Hy-VS?saOR$X{$L&;=x*NOjs>`eT6Y{Z!wt#3#>b64(SQkvKND7plsX~yEIX{XRZM39yS;mon9WP>M1tbbWJSdjFXM!@t_ii#`KMeHSM@yMh&OL4?fPC^p~fAPML>tBBh zAmAq-)|DZ1xL`eE`b^(~ia*HLQ9R7*YcZ(mX;S#)%5VTIdG_pxHH#glIuzZlMLeFW zz|5O}gf1WO_Elam_Oh9sD<4@6cgXM0?JembaF4$Bg97Vh%GY-Y8@Z+h}n6 zvAr!-aR*D24>t>`*h)Y^+B$^Gpu)|8!Ya;myXlFxMA?p?&Mg7=EqTZ)RimZr#$2An zc?Ij4eHmqo7ixLQdMWkQoC!%Ep?OjXNEZr0o4!tFV7&?Puj9^jh!jG+FAcIQb%WQE zzYY2ML0l5bJn$BX~zr3iOFKsCew*-E$ssP>7pLXpT621r!<_rK)9v* zgmJi=&NG0OyT?}p3J;8jk115{I9|m}A)Wlnk;3?GU$g*X=}OJDs2YFWMf zNrQZ3FUzYqH9f|jH#;qsv?r~yCDQX*#f@Xx$@j9-sh&wmV?N?@F{MbGya%hq6`s{o zlt)g1wZRX*8RsLYiZNM7?I2IGkfl1~1oN~YryMQbg?&kkuIBpG*?vgudD?yrBla|9 zipqtd9Xe**0R<#(c0-Kjl5aC}CgMhvJ?TtF6T>@VuW~A#X}M zL$2f~Yz~aLm#2}^c?@zH=_)JXj%DT9GoE%HjM{ExWBb?Pg`-mz%=QJ~6m&pN>a^3h{y*%!cRbba|37YK(@-i| zMMS8qvPVR+_c%t9z2ewgNHQ|Z9@!juW z{^|DYd7hrvbv^d=cwUdkoqb9~V1JyNvFw!UlQ}WZ$DE7fhh8pQ-r1k{8xup2f<>y| zA;-ObMwuh{LMpDYl=hH(P-bui--B>)lL@FQ6n}DUHEGOl@949B`i14Cqj#&DC&ykz zyo<#>a3JHPQxLa+R5w__oPsRsO3buwIyezvFJ)Z0f>AzG7_~A zI9Z-#lo_sZxShEdCbBKRexI)Yuw@Z`VoFoIbi`k{w5^~1eeL$g+O1s)UhKivP#FEq zYr037ne;m8q3xri9h{Fo%r`HXjBA|~DQE+Mgga{QEj1+IFK^N~h4_S2-F{)fa;{(?N>6mCF$_86KqT(MMg`3oQaf zm}93RS657Z{aSg0V#UDvXzJoAKm+6tO%*kPs_pzE zWC@es`*HWilH79_Il115eqW2Lo>Z+}91Yv$KP{6Qq(-yS>!+KmQ0NS7leZb(xgx47 zb(+In#*?6p{!aXMCi>pB`9sr^Hs=PXmeRQ)Ta+XD^;2uYj{MJuap^mEjz+`T3T1-} zJPo(!tZi&Obw+DzJyq5$yh00FF*k?Yg&W)%434m4=3U(nXd37Tb@ya?tA{9WUfWD} zm~Lw@bu=C*RqQgcq}#7m!`|8Vt&4>b;_;a5dg%@gntre9avsnyDHg8w?KbgC?Oc2A zb+!X@4Z3|9ZgjlKZ#amLF9su|H7ouo{Rl|drmz9uIA(AN=vp@V+-1s&Wr?8{6~9`$ zZeyX7%Govz4;F^oS{l8l?5nbuwd^|tkXk5w+3s#pFBXw_#{6Ee`42gsX%PiW`3Dx#|{T^%%FYb})A;-X}zV6;RO8#-*KwD|RNr}!` z>k%==Uu;->U1A}m<3~wlsqqLO)nE`rX(;BtBl75;dq&dfv7N$@a3AJ;7Rtf>o?6Sx zUZV|(-iiHX-7GY9z7>O!1&!?zb74}C<9!?C2+hpY`iX;vozNPd=A9pfr<(JM0oA7` zI|~{^(KQ1Jl-tRHYsuOW1DWkiLc2e^CRyqdyr(XXEiMGRoC6V#WwC;RrPowU*wp){ z?K$;Wrap;sy5auTsAeV1A?=Q(;ZUq}%l`IHS9?tLspp}{l5)9(ikisRlN|vSwK<;S z)P?+kKr9pLq<#uJ^`L3#klf$C0?yl#cRSL@3RbgPqo-e7|9Q0PmA=Huy&47$jH8B% zkN|=Nvf;m%KCh7HRP-Q9U`08P>4{}KyZ1+-)o4abuhN1?Z|1F1CZ2P`^E72Bi{VCX z!{zuD7xLyq<3`(B0xA#;MJpja_xb{290yz&FAIOJC_@_#+@jSz5_5t#aM(f{>?d3< zx-JrUjXG&*a1@>$#Z3DQpt{76C>5T{gUo!2)NlqN&NA(qAPrl|+?s(~8;>Hr#T?a4 z1fP}~HF?*mHYAr2GF=4XJj0|R(W}I3t+$0LfL!6+%rLU_)%aXzaT?E3MlrtKR!&&d z=3(gLAhCg}^?25QTyL~ZUQ$7=Q@rve$!iI2*FmR4XFSuuqkl`$EkF?A)<|zfGiIY)inOX>tav zeqxbz|?w3S$EVuj<4jV%4&@b$)gi^gMeHj_nTOEQyQSj#abPX)rAbW zW{0mWONVo=yJd=Pvm8${m=fBg>dhb|urC6H4Sqg3n#kq5XBa!xa9VC5G`gOJ8M4#P zk$FWQY4B&d3wOv>0DZGa#BqipPMrjGc-OPW?P?9;>Nt(rX=p*(-a^M|lfhW(^F2?` zhz;r*#(^lSDn5F;FSS;dH9b|PFk?5Zfj;G;K<;3dlX~=np&jA%;nWQpajfwD)h(#O z4egmIo0Fc`4r;b0t9#N@UY|YZT?`CB1?K%1Vp&;Wt7LM0CGJ^rSW*vMotTE>>@yKF!U8cg_J1PLm)0zn40ijLeX5BxO1`|A#Iw| z<|bweJ^hxxTd_W|+jQk3;*)t`7PY(e{r9JRMhKJY1`g=>ZuZQl;bXjEhQ(W`CWxwG z%ewj05u(zf_+iP^#!hp~wQ6XdVTr4cIESihj!X0G;kxmu&!Wme!%)j?EkY}IvBAUm zz18lKEStRM3uTgS!`w$v$Q7lk=UXNuQj&%;Cq?9Xe!TXzL>d&68ud3jMCKESp`3HaI}yt1WlOAMntsr`mR77R!Ns#ZtZR!|NT@MLq! z*&b)A807-sTwZy5?MgvH0?EVS7IFdz=MdwoJ?XiKhB_M_@;s2z*+r$Yykzi zu{rOzYkR5L>w*y5m_wvf)*l@$Bx{N9F@R70bO@(I5e|ixc4-m6wRX4tM|l$LPB@=> zI$g4=>^zRH@z_da@5!pQ`qF*G4Nh~kjf5ljmGT5(HZt#x7?MaN#-YpQNeJf|AS!9& z$NRJKoS?0UScCFi<*Tss`al+{9Y00GnbhSXj3Z;1YtQD19A&6VUP*I6*t=9GT?ZXHSx~QB7Wjs5n24CBJ5+6nwkCi89Nf%pHTFjNMy}!DXw1Zfn z=*~{t=KY)-WH73nowSUP4zyhk^Z2q0dBSC2|Mhh3wSG_jQgu6HIsO>D2*#J$8&plW zY47faCaF7lfr_u9xHEBFZGfnMf7E^Mq(SY8LiF-&&7L{7v28>1UPCLoZkciKnuD&< zlG8dR{4yAw>LPRL_9j{yeb+7HyI*e&2HoaVH4}6R^fSVoY}-KV4*K?DV98|#4bNZl zNW-q~tF5b~mSj+1mYXB_R-n3B%{KA08n<|^{&6^r|8~F=5|o;R_iH^LC|qX7_35%1 z2~T}c%!hiJ%v6N5Ymd17WxuqPh_-^?+&>f2Z`GBB~oi$5TO^@uLiW11=-+lFOR!9ktEQuXV?NHJ>=?8~jd zO&#^C5)9}JG^2l1Z5)f5SBcp9VUV9L>7sq+d+OhMXew}v!HoJ^K@BQN#zgA{s9k8N zFI+a>M#M5{IjjIJl22F3)AJz1i_7_INhxuuRz6?z$gb_HM2p*ltZzg(7I;H_>Eu$a zF1|UtnO>=ckaOP8`gSgx$l_*o_=RPU)4exGYmtXO%e{GRMG;j!_V;badXk>NtPW3t zGbR?|PQ7F%6PbqD_n-7cR4+%0-++Xj5ju56CYQH2m?aDodU0CKbdnv+E{|TVZ41#K zY|X%$mb*?NqRm;nAkLB&b%tJxH*>w}p0^<`g-z1XP|}}udAUJ|t_~u^$rKM+jGJ~a zMBZ+Qgl0KquhDHzY1|c=Em=T=Y(5zhp|E`=CR8n7UitoMTi0l1>(=k1_I+o;!N91I zZ?uOq7vSg7)}hU-Uk2N$QxHK}{`X*yl;WZg8W&IA>VEA}0=wg1vbAZf;>SJ_7Vagc zq}ZMAYp825!JjohG3E3MeJ=Mr2Vv+_4msC5e_uoOz9P?Uh!VY5F z;cybXOM7OiIe;8?{ON*PLoqg-jI$Uii{&pZ94k0XwdPx$YM*@2>Q@uEhyDEEnr_`* zyaaxCDbMA+0W9)(6N^`qOP%BkAwqfbMxFH9I`@7#p4KycwH#D<(6v0Yj&gkwB0xIy z8CChS(FPK+zD!`lP_5c+4$nUe8Ot|TG0cnO$}cEQfPdESISEiDHmkZ;V+BiVFre{_ zLN~y2^@oJNRCU{)bXK6& zuU=Ij;LyC(R$8-~I3MO!YU6oQG259|J-VnbuH&l0f=xX=;Mf=2d~{?)lPE&&r(B)1 z?5XsZVWR}+At zWpnJH>;@{e7Dq@roh?E;?w0Qoi&tN=cCD5rWzLiIfJ0ZN4P3!9x#y_A zU^lU!^;>Wmd?R;XekDYn%RXkOUSCswC93L0eti0@hMnd94-yI`XL?HXc9vpt?wVD0 zKRNqIFF9OEq>6utu`*20SxfB21BB?TBA@wgR>pkAvQHhJYWfA8Q9Q+l<*a$OchiNZ z#}uN=EOuN;>9UHAdQzY+5f>9_k}c%U(N8J*=>q;YQyLn*x8k%o zhPrp}YaJvx@%DB>GPu2?hfgDNf>;X^8xu;U~Mkj;_I8PpBozsU<#{ zW;S~;hz?xABd2TWJ{6m7tef?{fovNq(~UOiG?!*5`+|H$TmvTA(3@y?fP+2qCG-g|<{kBDm~qod_!gU3^;K*(QC#j4Ty zd%O3GZnyh=qcnOnwhj&5rD|T?Kp#p~rVYr(VNNu|g-tShF419E8>Xitsa>nX#ShmX zAPZ2U2TD_9zN>bAzQ;4we>U%2x(I4NWlSK+46iFzK3iR(u0KFdy9i??$NSGHKvt+xF1hbV89>=& z#?92Kki=hn?d&2>{izl-g7!KPMz9+fC7=$^uGDF2vg zR3wimJ-aJZsna0RcSQ*6=&)2hC34kokmv|v?iF^)Iw{YQbS~7`X_nJg>k2*H53Bo# z#8UcFRTXuiH-6|u^$n(|+EOd7zSAG*^`~CC?Iop2o;Knx0lEw>w{vTzsMo3zeJQo7 zA_X82OEo$}m+p1dX~yu{h~|R3aK?~bEjsG?2zAmnXYUnDr`IFxbArG`$t#m`J|)wawh#mcI?gk+yto_h2+NW zsjs;eq{fYmr72XSbJ1r=KBiaDEvM`^EfZc%s-z}JwQaFtSVSG61X8it;rpClhk|{O zga)a126wQf!|fItSVG*UX&0-F`cE#y=Cen%QlQ@ z4VTs`)rp}OU(?Rhknr3}kaGAWjc!L|fX_IC{P5cg;>Y&!4E`JFrS=AC@m0;Di)I<7 zELItJ*nXo0H>GFJ8*ypG+0g+8r7^hO<1#QjFk~EAgt0#g)Pm> z!&a~4&+R5xN-z*zHLY@p&)G=!JhCFuOLw-uTKSG*=OCbbYE0zaE368er}BY9l!?(! z68S5v$8Nh0p2T3*GwC03`tqfMI7;+2kK;Z*iF9s}Kv*LE4bk`UBqzBHsUoj(f5 zlt`mCTzyVR2#M}-c#&-FRb5E!IIP=Jkka*0Vle@YS&1r|xhOtv%gc{WlHiRRWlYm& zpUH*#)~v=)^%}Wz)%OS#ThCfQfEBinBJI-#F6kEK9c5NETr#W|I%Zg(uc-we==0Z! z5P8qtMdKH$KclYucG)w5SGa!_ObwijJ#jwST>uH_1!TE8Ny5&-C3|wA-jQ-0M`p3( z6i?5>$XfB_W~GL$UO0(Q^H^nwSXaXGJJa6JeeYSVXVNletYoobOD(+J5tWGrJ(aJl zKdCyX#y2=to}jDSJ7-D`zQ;tkyC)r_i)pZo?|9_g+1S!z6IK2+GnV1s?-qc%} z&;mm8!#A7B2})v>d+(63mJ)BZHnv4NLVmSclX#E@45d^HjiB1JW&SEO998#FDA zwpdYgcBcDK>o^sU5u2!A)E1Lsa(A--*^{ljE^$gnKd(G9w9$HbA3Y(VWH76vuL%4B zQ!eLfhVTb+giB^qm7P_z<2t*LF_IW%e?PbpJ;HbrvEpU%kt)JPAAx3A4GF8A=nWBH zwR=D+l;)`+8=S8;o4ffjNjT0~?*)3yck&j$=Ct9Pk39ZW!K*%W*O<$U8JUL9s^(=4 zRjz_skyRFps*55lKDo=zo2^S)K~6;eEb386Wi zL^;4ekZ_NqEys(}QW(9{isIf(?Tkt+)xjR1^Tdjd0J8T#Wo!iiDwJ;&sB~lr2ec1 z3SkFF{cFXw)nXW4Kb{OIQaoavhdwZND$Pk9+p{ZGUllD}NR*bUGAn@u>a5(VuPjel zGDBMz@=F&ajm^X*8tK4Al9Au3eF_pX#dBkk;PVpPn8yqh61`Ej%4JdQ0HfFB=Pg2Q|C%%;zWDDWRlQcAe^b9>44JLN^Z2GCht|aIIr%+B%P;LIIq*s|El@}_; zcu1dt+vQAzUjvzvScg=2e$971@nzPmsr=Oy{P$FL>=eY`;8ttQv4K|Do5=UH_DM;` z5IU)@tI!Pv3*PP_=&cpXCI1BKmzp}##e& zDY@ho2)4&Ztn7VsM{=cK_uUtu7d{}?4E;&@B%+_y6XvS6<5HxA%MTGsQtm6b00fp0d=wkZjybU@Wq zHFP0STw`J^+sgQshp5`p5tpgylM-4trUNkuWQ!h2GG9y z&jIM<4rkg{18ZXs4YOIIakbt*hAMdOAD2)`i!25fwMMl?V_3_+p=gavc^(VaZt4d?-4;nG?@{I5#K7hr&x*n3i- zcG3l0J~nQ-&GctKlrw*1lzz8_Im-q(nA2c>G+0s&RNC>?{?N1ilOebY zJPup|dWXV$W&%>pT5y?DYMMY3@DnGd-IxE}pyU-OGmKQ9!Bu%>_=89Bucb*dXM-oN zKsIj+{&V~|LqO!8AD)~A1(XnQ**|orD^wCU{h!a@{JZ1ILr_!rs9@tV_;XcU)hQme z3QM#ac=8R*{nEesTs{W%{*V{yH*qz;xXUG*cXKk#xaq&(Yx{SH$1ET@*_yu&(Q`0^ zNAW`Q&VJ1YPs-_thX1RtJR2Pd*B{)v0IHE6gG<WDdC0IlTJ(;H*5ZUBUR9 zyMG)K_@nxf1FV$eA|uZHkmI!L^UN)xd!V8*_4~!%r~hn4Es5)Uqo~|TbWSp(A6#*BE|8UjomyD z5Pu0=#y5uOi4)c&#k*p9>@h?%fdZfCJh_X_*ta(i8vU9- zUj8508vkC!kQiUf@hV(}oU%y+R4%gxG?2L5$y=|KO1Sji{NJ5zI&T6o64bx>1_&SL z1}dd>KJF-2d-=ylLtM%8bX${8cP>Q#=C$yqf6^Cl^&g^`FXwNBLgH=m6I?l$;Rcn{ z<`4f@k0U5<42!|~vp^M-{w!A49)0>z6fE<@=L{uj^5>iXl(>vZoSlwHqmu&ye@;~iz3Jd@%QStpkAzCYNMZ&J^4>vgd~~YgL&QK z4f&^X{AsU$DJ(}Uc;LOs``iCC9{HSMSek<3n zp#q)(QR%3^eDz-g|HE8lJi*CQp!uhNzV=U@|8=hekSx~YF8_a=Yu|sIq&YCoJQr@q{#$JSp2Asq zAYG~7!ZZKlBs*Dw_}$dK`ImS4A5&nC#(AsvbTa=mOJHh$KlZ-@@mp>r|5qS>OU3^m z1mal2YXM*Ak=n_oLICYc{>P*br9Z1m6R>j3sFe15gMwm2YnI4PLudz2b!U zq>>rp1f(d;=5-+=W<2m;Jc6_Sc>nW~^&wu9FiV*x#eaGL{2At)WEm`qu7>45zD1Im z1q?+NkfZ+}hQSS|OaHee|3Qf58~$L6&HF;;zYHY}hPptY2>-tj?vJ-HtAZYy=tK9n^#5h3^Vj`N zQqSrZ2LC_E|L0rq0uD);b%)x+$N$Sv(I7TPzx=Gu{Xd!b*IV#O2Or@z)ruSa4EZlZ zy(Im>ql>_x49K9t*8gN`S@h@60fcmGv?dfHb$CHf=>Gaq@(d}foy85%N+sHg;P2rj zXUSOY5}>(E-3kn@F_idl2M#4{ARL#yrll_%|K{T5Az}MUw6qLGjq!b+qj31t`|oTt_J)? zx)Sh%*M+89EsL|*z`yPivw`WP3e?N4x}n+vcK@EfHu#|-%w(dF@0G9ApHdYN2o@FH zb9FLnSHxyk((5l(lq~waT6^S`hls!LN|Ajqm$#sDP&-P6J7tLdMTQJ5@Pos3-Ku5E zEaSiLW)St?`%oTIm5=Qiz2C?9=R%qL3V##E0D9&-?_YOSRSB%F^y{@44EkEwU9+V5 zi<6dG1Xd%-q^J_%phddB?s9MhR8Rsu6K-onxgd1~$zMFI;0&l(Q6fTXr%LVo%~{&G zUu567^uJ{Po9rCt-idmAdM^wu#PvmgFX8745Q5M8OFEOeEcP|Ipi$ib3~TIm`L}yi zHcx=mwp<<0c>Q{G{me}E)n9zGB*9-D84rk_gMQAe z)1c{Y_5-TvLh#;a1OUWE2nPxSsdWi6%vEp8BCi6Xn&a`=>DT+@Bk^2aU)UC0A&9sc z{Rq6T_ItVi;ji~G^AQ>O9+-1!qe=7`Q_eTs#f6~2=s&vx7FNh6W0;?Rg z{z$K*Ia8UBjTAG@|1IdtH~mu~LiflenY9Q!s>;t5YoziRQvMQ{{_7b-7ucV?Ga)Vm zm`8e<`cDM{q53yILzz{R{VA>X9DaNW)2;vm&jIL-TZN85IET`#e*W@R0dnU*_t>*4 zEeA^8BT9)V=@lIvAw_k3qp<#EO9O6Ra5Q29P!UyG(Dq1%h!YlFArF&kQ6f=-dl?Je z$rh&_Er$(zlQypsp=!ZnS-WG&>c0(zBso!8xrYBYHs-Ujl9Q{pMCf-H{D1t?t7s(x z07UWzGj0e)YANL@rxxpSH|djo7fv>lD|#&}DOVA5p?IMu)dmL)cC`1vcDPLU&TGzb zAod?`QUw39nHly3!z0ie*?2_iY<=Xef`QONjgatPMnO`aC=Gzoc@-daj3>xZ=S#@Q zVlMf>2m>sMo;^XQf3e1TE#)NIHqEqzJ}SzPhW%z=u+?YN-|Tw@*!Rn|QL+9KhuVPH zfWqJG6Cp}rX#m)naiYcNip%-LD~JfLPg7cX=qZ*l3fZR+2boMX9t4<7h}*~gGH1Z) zm{|OBI+kYw>c;?OyWa^IUiihe^X$``8e*ih{ni zNxsrl%Y7m8m-z#Jz{El*?UsNvGZ}$LC18K-yK{qNzbvd0yFbgs4oe39*;fxgU+Lc} z!yyRRUJ)(6k!cf4)>KP%p8EGY2mnVe_&<(6EAKCvhT{m)O(LydR$X$N%tT(ukJ1wC zjn959XJN?kvznJGQ;roJ!mUHq5D>eS!OJs^_xIr20z;O+o#aDSe-?&wv?I)Y&j>sO zKP10_b3AV7+wK0BB$>$r zMCtq04ild<5~2C6oVjs)Xk`QPKK?Op;oy3-1x_iAog+}cC;O1#a{oE0w^Rg5H=`|N zYC9H34H^G;ng6eHb3=lF&!UGP0CHpMz8LV-(7=YUh_19R_TL+~V#%s|Fcwh%}6zR&pUJu_79sU(| z1qhR}TFI*<3wGBZZ*{%Z2cUMM<;~<=y5HA<&cmOY^rne*U$rSV?LrR&M6YLv?NAyf zqeJX#%?Iz9`|t>6+oOa)xBq(pyg~qAjL9ZnC854%xjX6QP~OE;_l}xDxZ(lR9*A%a z11Y+M)XS(x+bK%b8@0OIxO?A^r#Y0OIZK{CA~~8PJ>275dv|)Jp2lfm>oeO*W4^W@ z!n;Q#$-UVBLq3gPymbd9uDX5A}Q!}uGB*1^=$mv8+l0@4OqjjzD>|eGGvqWv&64* z%K(C5L$lb22(7!{Ne;-;2+%^m-G~xZ*Y?Xvm8F9WQql{4TdAIneEzZZQc)cD+u0-u z(2YL#U^Pa&GzKZKs1c4duH%8FA-seXMa?pG0}}(8bK1%d1D$!=XwZis+D@y-^&< zL^tSRzn-MIdjR^}zsDhWn723XHLSmfqV)1sE{ptfSZ7luV?rtzEj#)26()m`_8-iP zDKFpo?2Sbm3(<)0XJ8xN?u$PCLTb<$V}b=BoL8~p939)W0FTQx`INbjlz_?a_#+N= z;~PMJRMmlk0HVsZ|B`{Nnof=N`*X!b-wW0aZxEvq*j0ylUqqYBB?L*qpfsQtD!yv76Omnffo9`Z?5pQ^%{-B77lZyZ#fXHhl47n zobe|-BmvDAKKJk()8a{91VX4aJ?K=JLxcu687Q3N&76>!_9y(NZ&QMtH&iv;B}Qj7 zedxQsr7KxxAM}ET>^6rCRxE&X9p*Qu3U6 z(MF86RLY?IjG(eeKhM}#UlOrm3^q&X%!DV;r_57;`gt{&mps?O?`RAM_({d6Y-`Mt zL|drfP`_BB5X}C4ET;R3=?^EdKqrAFeXQ`$Xqm@0EY-f9+2};XvALeEw3~7|#&C)^ zao)9TiWFtlzo38il`@`4ogmlB%IF*YyCklzvvP2ABDDUQr1~uL&$sOJ)3;c4yEe4E z8&xE*6&3Q?Z|-|Mls(6OQzgxo?npiVe6WI^>xem5b3IbD84B5hlsd8j2)L~xlR8;4fX~PN9AJ#| zUPxy%T89rAX?flLs87Lrm<31Sz3pQVvO2l0+n%$pb-+q&Gt%$rIOA8kSa|+2W~2F( zQ9Z@En=9UFim#JPN>9N9somW^muTF$(-tOHGcCa_$Z^+DuyjSWA^v)u!0QI|a{f+% z*BNAq7arUKFt7ru)49$>Q>nOj5r8fmCYJ?y?q@Y}oq@yVVz!SjU1)DkrSLryJT~pY zJV@M*jXU)rqqB@nncS}`rw&P+iul`6c{lYWsz^-f4^W@!~ zCSQ`U)JgT_u__0>(`HxNbYmL-3B!IUCOL1DOwkluC}@1qNIfe~Tp({p@{!#@{dK8~ zco6KFSyXKu+dE8^m5(X%)Ejn@qC875z%fv+&z(lvBe1*A!DMfD#ga7X%k=~N61VJk zE~C_nTWcD_vC~Q~w;q3}1_=no>s7|SAlelK7#QgddXDWuAwN9PB^EUhm+!K2ZN)JL zc(QFqDU~%gTex5{<2!D*n;k;*eXr`$on$euN*{N9tM9GWfpDgyL)<7!A`jlAHl|~+ z+yU)~N=Dt~Gn1!cSIU^%DlY}I!o>p&>}y(im{&au&oX<;bkoI;*G%V$mP9?6f7fr? zjiq;}|5!0KN!2Y&gx>Av;!XTuQu5#j%HtNI@GjyMyS_@u?`C*C`t8;4G1Y-OEJ zKL9T8Ef4&o<|4*dhCUeN^W*(ZSm7s?k-GB`%tjAecY4O1Yd60=uF#w?r6IbgKPxhI z5Q$-S!yKktCpJ@ehKt-T0U%wd03NIh=)U7-PxSA)u65j-g^h5Am1zlacp3I+dB@(F%xI?FjJRb;u zL&qJmNH67Su2%Y>PWIzVB?|41&s*vF7LYRSDOmkhXFt~`WQfE!$9PlN@FX(kO(nqN z9xwjX$th^iCz+7RQHt*oUd>8l^YIv9fTH9Ew{9*4{fK&8k;TJMtnO!~zk7`vl%M0=0@?cs#8#pWqpM?)$SC!0~L z5p`+ab*w>3DhJTnb#)Xmx~x&bMD;%PmE)75n&nudyHmjV+U{k!r;1RYuTv4jXmS-k zyh}T0%3T@+LgQgAQ;F=#lH#+!qK@;-APWX5TH+ZG`4Ykr2ycvv{|V2DW_rBUmatgD zYPt?kjw)!*Vd`?7&&wNbm1dj0CN2(rvo0pJuD(ad?bJ`PM*DDI_TM&Y8RK0#`w z<_At!gX9t^rLMf+x=(TS$vqwwk=2SMA=f5hfoDBlthW1sLu!l`s%Ls@RIe%bRRfIX zZwzkZYK7Vhy~60*V|y8H?EqTN`eze!lt=?3d#~M;&FB@|CYuCK<1!ZB+YLrM_;Oh_ zr=3QFl@EB&AdD(;DGUB2No`6iGV(1+Oz^X?-gxQ2>h(QK2 zaIxxJb1uKPDS>O#yZGw0hHBA!H+kZ*NRHm&>ck(BQvUjOBTFb}S;6x%EWQ^KvzV)M zILk_ezQ5Dib@P@k`Z0ho4f3&hIOZ9vmR1aQINasz(4LXFcp}M>v7(a7BIT5}yu+yc<9b?c}Z^%u%lflDiM_H^C|l{n6X5Xw#EB^r5Jy(fId>pSVa z-8~PWuXrFD^jq(cSux~xStlKeBP!>kFFsY+wZEeo_M<&qDUPtpCVK&JEw3+==v4sqBD=XUXx+Bnp zAXL&V%pIpdnaTw?eD2Kf0D#Yr2UFga=vqIPa?(4akU(q$*E4eRyr@L*ifDm7-Mjy4 z@Vyi`Y1oPU32-I}tvd;u+$waKUbczL`;3d{f12NJ1h8D@(iK}>?_w7MMV}jz^KoQX zP&JL7`w0?1@-es&RNCold8gm~<-?qH<SCn{6aaL)V6ZuIZ8yep}%DTae^s@LRIe;&2=xwRBBnId^mU=3cUt4 ztT(xJdF$2ZxP91b9O+C6G8*|n#4_CPrrLK&r(25T0s}<|!`tkaU-W%?wYSQsJSxhn;KWGY}+06BE@2E_hyXeJpiDZ>}5! z$kT5Z850JzgN5EMq*Ai@2`t|UQlvochZKv_dYg4)AmQKl(*|lQnQrvLa~kZN=MH{( zQC6H6#Xr{&4~<3t;^Mj(L$GmQbb=~3jRYC#hacynGqOnr9v@EVxL+}@FOq$D{rkJS zW{*dfBLX0ti*}0+giWJ_=gs&i0^!YGwq;FcD(R%WG-<0zqx@}&Nt56CS-AX1MbCE`>s1-=U^Tg3tUm1=9abF+ zd1h-;56dP@x3#v}?!yXO*u)n5#t2(NsLIprVKv*{(ivJH7wRVvGdxmqFPnaVW@V+# z(FOifzbv8YY~|HTO2goiXf;||bEMN1CGrqh9JEQ_XRycymnpV$fS>-9Eg715#u4mP zpsl^|MX~LvDiB@SYR}OD!3C?ukIsh=%9IQ1xIX=tX7XTex zF!|->ZY~eHHig46$+AcpN7Iq# z%K8N3ZPK4Q)qy~@ZWW!Ju&M&&+5(p+ri!##GFkBT;BitOQ)%A~d>O~_iLp{x0A}3J z>Kg5jyBx=yE3cZrSF?WCL)bMzBgPM`)8l~=KPdFviI%2ONzw7msTFumo?Q+MlTK%| z6Y0-{Utx?ryF(xqE{qvBq(HrfUW zYZcC0YRTwNeeV@NDwfT=qqG*mlAa^F%<8)61);HGvt-sidilDp3%&Q_nX9*SZQd$zS6;DoQ?+!%4lLkul4}inM)ZCYerCwt*~*JN z4>#LE#)AzQ86d{|>GVjtl@+I@`zIquLW&ww}085G&SxO*E-)Z51!c$i{gT%A#3vgVhbyjT)XW#MBc# zBT|&y1KAE~pTSC5fgMQQuc*C`k)Y(6JR0iIAePOlq8aSe=T_O+KC%2${04Vt{{H*N zU`zeUhVd6&8@TCPjH{KN#$2+6vFfAuAX&RL3uy)fd7-C%2VKT1*DZ2_R>xJb?z&YM zj52ilfdTT6FN;hH6tq{Z))X_HVF_fc>l}I&&FoqJ@Q3xC4&}YrEn+%{p*=fY`Dk43 ziRWbzNf0?zmgCtiM^wGCbXkHBX5H`9%ZqnN@3wb7&p6z8KoDTd8D3$%@N)D_pQ- zlNoKD)~qf90SgM3j{cP@BgG_V4QC6()}gIWp-^bJeuS}i3UjFS7+2SqdkE_i-LE9;CmXil??3ux|`Xz+V~^h`~i%vut%+Nb#UO7ID7^F!?en z#W4p&10+Rkv9-8fxeO(`DPu9%#irDxzq&h{Wg}6onj2|li$l&8oLp%7)LA4D&FDUj z@8!paZ)Ezm;*MF|!E&1{4TpT> zz~r@ZH#_)Mpn_1qy;nOQj00tKhpEYEJUO!k{|ZyJvWI>Q+AmnK2Wf>Df4#5%qOJHL zxnHYuYOEA|%J=As$NervgO4A_#`XIY(?N9^x@(Sjj2uF z{-D-#H{^!by<67?xjK+BzA8UWOQuMP(AFPbyD2*q1bZZPMlMbv5Kz268E9(>EUxir zVtv&8OMK5GG1x(v2}8U##@ho(em24Qy<0ao!JhQ_RDV5y z#(pC`Tk`?I#QrK2uDFYs+=_cA3ch@Q5@LWbtnpb!iO?InR=iXvo9kKn60gQMG4vHx zdQ-Q3CZ;kJewgdK3ecprSB41n9*jQ$A5@@YX*#wp6KD=Ogzvhcx?UK{Q$qoZ5*y{N z8drB+cNuqT9|>N`CPg&~5x6ky&BH385o+0TSN5v%%}i;mT~dgCvGv6IqyEZ@Xz!1o zqmii-mn&;77Pk;tT4V%gzP-QFo~gm;A=__Qxgh7Yo>-`M|(WT^DwjjD;+jbtA%| zqTQOp+C2BGdxX)$_s)yZPAW`#;~>44lrGQd6kAWk*%&BXe`)U>@A8Dvu-3zEVKt2S zv}0TG>9-eM+!i!T^>wZr!Z6D8)*H-b`V^rDX()`ka3CI(+jQv($%Wtsv_@4G+y^O2h zhD)S(T--#Vp_n`M>vzN0R>PA99F1;%i>l-0ZFM@7jb6F+Iv;*I$EE7!znjo=e6(*> zuDfoo=SLvnphqBCR^Tr=vSrsZug9Ckn#3$Y69K7^;?mU{c7xVDPEH=^>hpP~PZQnd z-=b>Y)BZY3>fCjBW>xx5-nC01E7B-e#>rWH@>|jVx`%=X6`^{r=0ht_>`fgOPn^4v z+Kr`SY`|O)diZw5tAp^8`ii*A`mpA{_3JU6I=6O8=dzim9m+Gz-&if|#`Ii8k>f3c z42`^$;9I`RN`qi@&!AG_2TlBl;fb9qBQ@AluBp9Lbw6fVPy)lKwuf%!$d7KcIrB=t5F>SDG^zt4a_;`r<8c?-s_fR%TaVboF{QH$* z%Sx*ND#=+Y&W|TziUfYle#g`Up3G;jJ{iG5WO1ad`R~QJw4LQ2kCRb zHdtQJj?38&-MxjSGrR{DoFDO&kz{Q?A$@{m;i#fD8t$dH-okP{$2#THP*nP~l_;Mm zkAi;&`kJi=iLvWiCfd=*o%327Efh4( zFT&;9nSCyT7m?uLr*$0`hmTQL(CmtCY%-@AZNYhdy|Fa(RxPZ_TaMuz)2C%!>*9bg zzsqIO!+fpns1vj{wHepS-wng|Lah0tDBWdM!L8$a;S7l|HPhAIaP|WB^~ZQ93O-KX z?+4v>UW4HYwQnYfhxcRL`*JE$9<^PP#aze!DRus%l-;RqT4H^Xnwu)`9*w7;RLkeK z=PukoIqSDrz-PE__H6nkHF2ArRJrTd850UDSAYnTMxLjScf+}VTy!3m#MsF2bnF0@ z_A`0b6Ke8-q1YxOuA6n-IpykY&(CmOa=S)oIy$#r`elCF*fBdX)WP*c2iFi5jqxc1 ztxqiN-^$n(_NNel?GC~5Gjnd{&&ja*@zja)882IWopB@ab*yV)ow3U>(%)wNkn%k8 z3a5taH%l)TFYn2EPpL&yfuux8ESgF>^t~LlE1plhZlU@?Tc)DQdX7bYQ5Bb&Y)pSq zbv85g4y8uy<(r9qp$lGyEN44YHi~cP4VdTFJGUG__Njc6o17N?(^(=3H#0w+i=FacLbe?uu3=l~dz^pn@Tc2m5q z?1JaZn`w$}iVJYA4qhW1kM3voEmx+WaLUC+sjzgoNXTy8WY9d5{Y9%?h@+HPV(cO zK9Q5j#UXPoL+$;Cf6y!}=zn~p8)tk@5l3?8^YR(E%`w&{cPOyst2-|OKtrFal`7}t z^zf@Fj%i0E?KBjArMv8RqKOQx1zG9Qa(r6r`f3Fs=i^ndxiTnZUbxXjUPTC(4=>&}Y|!DM)(2l>Ln9z0_zZxFbU9T5>=%r`A(v0p z5{pME=RO92T!51@rM<(9JnQJhyS#vl3Z|bvf`@2|p1=`q@EF!M&F=@P?L#$w(=kc# zqZn*iC*147;>l;{m}qldw6QTr3y)}_vkcI?;&JLHeX#L<6FCp>-QoS+oCt)-8MQaX z9g?Ey4RjFMzeUgK>qs6|ed_3QqX#@VG?4`VAVA#h=D$InA^BrcHA+VJgT!9~;}dC1 zT=Fbdrl{a)Aj$7H5r@;#S%3MvgmRJpU`NcSeiNqRtGIw2MTlqyii=>cSj&wJ_FiA1 z)5?-~WpzX>!7JMTfBzMQSG#y*#rphk2>3#-mOKUiC4H{%wbUwkOEm2b}`y!mtr&ZQ)EbO0r+*gO^p8=%c})LR7pX3k~Wh3 zHw*XnHYo?PgpOfZkUX5AUKw2&t1Q&^SZ+m_zyaG+BjD$;62=KR7i_N$DtBkdzs%Db z$Ng!kpYyx@#%_-;{%$|a=_DGxoIvptxKramnc2Q)NX0+uxDF;CxENXYtHNsuIS3F{ z;K}7t-FTQfH;}2g5P0Ek6_B?k$`@IGvye5=!jZ60EJAZ~pCb|_Fpjgdr8FQJzW?VmJh5m z#hS&*|2*OO4_+)5J4^fbuPjFyzC4Nlg%|%Xs{8*5nEnli)+F$3PF;Lx^YoUsC2(DM zQ#?0by@Dk@H}v+UHo!!PK_RI%$b8nY_3bo|hnEKd_#4dCEbLf}6Z5WrbWUz(*Jqv6-Zeq?QRg}HE+{u z1=)XP`SSr-FOZIF@dwhE|K1$o1D`IC^Mvy=;G(|n!4uUF&;Rxgpl8HWK@*-w(fnNJ z$QMB%a`9<+zMX;?8}rZ$p3u92e`I`r@6sUlMumNqWB=2$;xzW~{P(kXzK%D<;0cb) z{}R{0c7%;N+K&D-B~k#MUk=1>6ilRsC&gm6)(t-G+E>gZm zlsyFS#Oa%`5$@n2@dt#M@mTA>iBMQ(2YcJ=nv!@(m*El5_PzEy+Qb05>PGe%xApnE zBg1WVhi|KQw?9nJEmJ214?cKRz4~ljWaO<0&S2pKO&$+%g3|Rq(=PH(cB)`#8N;pn z{}GyOgF=bYuIt}@eTUkI%)q7b9S|Ptq({OoK;X5E14z3fX{G4OzrCxS;HnhixuQCe zb|3AkWw`p%Mx)@9J~k%)X$R8lzhV6qc@TX~RKWnQ!hD%&f>Eat^kGhRf$;X%pbGH@ zul>B4G~tUsKFA@hiDblbTa~hVGJ88Rn1mVqhP`}9)eJ&w`RFc_n+@j4E>`i~rL$=v z@jXdG2}((Vci9JXm=QpJy6crmJ*=CtR!5wTYcmm-^~!5;QFoUg{@%Qi&Yy~f&VN^o zG$RPQJ0KnA!F8;j(i*Fg0<*qU353-Q(TLPT#y$Sl7sz1n7uIdWBkS$$w8f%uu8`A$ z`I^3_`fI@(dc*~-wiEAM-^65r!XT3B2&y}pUa2xiQ%mmOQ)`GRHEtM^BiY?I9A@vd z%%eLxzy-Y@`a~K7k@blR%l-8cJCZ>i?p%V}d9jUm6PG3OIH<*gQ7c-SHP(l`Y;9$- zMI1A2RDY+9`K6;tM*QzdR*rY$r)9wL=gxF^$P}l-V>zgA^7D7q7DE1v?DJH$u`US@ zK*rJRr@mSO2)f6`s^1e687=d3$W;7H^sn`HC*eKdj&@T?Pn{iYG!UIciQ;=#ZMKa5 zHwm^nsJ`d1qBau=f`ec5^#e1l+g-Ice~x#Dih8(w5~%buT9o_so6ARfR3{Ya{o^wK z?tL39APR%}vCQaYLyde|(N#y?67{6My)B&5s-eAN%(k{f!`1_pZnpGp+iba&j8mvb zN#OXWPf3DKTm;EQUfaQki!itDeWm%^H63EeYnVL!_liPe9~^8pjNQWIR~jsK9qBzH zjjS?kxDu~(}G(?cvNL4?=;XDW*q;2LqYQxxP~ z99VYi-x`1z?E{4{LKdh6fJqCzzV9!NAweuqn|3j1626DgXCC>aN7iN>><30xLxYAb z&_O%>T9Kbol8;0x9%087csTLg{i9im$-TdikJ> z6gE%?#BO~LHLB~EUB>;_6q<^9-=PSXOWjVgsB!*jE9f{MHE3`>>y!OV4=;~d7uUYK zGfz4JH5D2c+iGE1cx5Z0zAEB7G>%Da?grg=K zF;BL;{h@&%SQvwY51Cn(v1j+AvAoM@Y;3@!|1W%RXu_{xfra@Qo89G-aM947S{+7? zF}rMSlWj)H)AU;OT0u%@&cUWkibtcRDcua4D$RqV4gR7p*z$Lmq%HnzUXO2sv2$J+ z?RQv2NB7b2%BtO42mMFaJJ;15H_cpfq+#TP!GmCJpNK<{XvsNEx6<|vFlRmnd`_F4 zoDJ9Qr@M-yIZ8U{-avna)g3G64Sf&jo7v*k?$fIOSPWbgI1FZle+@>5h>wYE1dnn( z@A!t)R|$tw#aoya{I>PfMP=H1>G*{YjE%bkHP$8;||2rxXBDXib?YdYvx%?!9!(M7}i%N695*b`%9xA zNgY*mWyE{2!);|A%OMusxJQ=M&F`Wmp$Yf?yCz)1dS>A{a+*5sL7TTVlTwQ5bWg(M zHP4X2=g4ID5~+6fk#6JZ6&_q@I6Rn+lsYFbj4ipVtMDNYX*nA2rJKnWAeLMmtgJa}X6bkyetDlq zU{5*A-|X z8YG}Qf{Y8*f9(|5UnOBSKaeU)<~9jL=)|rvi@~l`2|sIVZ8BnPV_t`OS<&p1Ej>5B zpIHSF3i=u@>Usb3Vk{psToj_o0LhJVD2lbuS!#1>yc-lUDyz4B0KxlwD@kxTM(tH* zjLyfHM@{$pnsq|f=a0wh(o2s0Y&fjfr;xC^xi&wXdwVOAKy0tCDqv^mO|po4;0rbV zof+?}lZ9EnmW4aH0W=L8<%O?+u^n1?Fnf~1<<95m%Vg|yS2*S)s#rW;wzV?;IE02R zV08#`GO08bWd9_8Fr54Fj_o_Wx+R{D+uIb}O11{Gf(ZGeZ5n+yG(pPkSC%M8_0$W# zs8B1#Oy&G6+Ee!!ZCSGfF2)Q|hgItCsqMeIZt>z3z}wnDIcaC`oYCWsDk%P^*JkbO@2!n!Yz7CpL@#h3x)4@D(&5cSzxEWWp#5P7v_}&yrHygbeMfm-Ma+D66%g0;E!k;)EZ$Zc=HtFR<7r-90 z>>jY^%$!E;jP?7vBOc)=NS1ej3t|Uw>J2uU-Dr>1gvsmRAE$YmNNlf9`=&6ISW2`X z?C-{&+m*~d%}exd0!erlQfs8_R0C!fGp}ebl{SZve==*p{msZyePd2?P)%T3fPChL z`yOD5d=1;(b_%CyYC|cki+s-Ro}V@@sI1N`^%A$*K8O4)+02g^-5xi_gV~#6*AVVTNPh#8n1Oyt zPK0ubMfJdNPDbVOgog7v&PGMn+D}*CDXI3H$qoxE3if;9M-y3(HEGMuW zj)Cmsdr|X8rgtl+RX{%UnO2nI7UGQE`5$#7vzB7ZK$;h*rbhCe#~EYw?g6qhaczlM zTUp@8`BE^-qu+JRi(l|N!l5g)#VS}uL2UO^Zm%{yYP*7f80(}!?Up(t08_uYkgYlft|_)Lc$w0?_ee?yHgerb4v z+oUZHq+LZ}{NE7SVciX!GZ*jVs5SKiRclN32I21SOVRDEWr^|H)o;J)h6<2m3C7KV zuRvbYMD_AmTE=64>|&O!!sFNjgn2(`J%2Ug#OkmW4k6q@BtCw0>h|Hc5Ki=}+h{qA z=%~#;yQPW>^F( z&c=~;yU6@{FR!$xgqC6GqN_IMr^=NaqkJ5N6*H7`XG<_ieideiuHOX&3`Ogi+lFyQ z?9;@N>qWN>8VQ7diEF^KdDU0wTF}C++?c}G`Z|uby1V5s{vI}4s?VK;{F~>9q z5mXvqUrQ2oTQhh5cuYRKlY@G67gfxVrj_?`eg!1~glttH@_Z-}2l+Pur@mfo4CVH7k-2zR4;e zF$Lj*|G}2PZ%}mZB;^ej9|Tb0JrXziVP!2A_!|VjCr*iCbv@M?;tn>o!nd&HHQ)>7 zM1eLZc@OOGm6Z$==j{s<+poD*VOLnnhfsY~>~l>~ob9(-SCT7G*Z8&y>)Tej)84&u z+^Lk?;72R->)VeS^j}00U!aoURtM3CCYyuiw<99495;hjD}qu8)n=fnJH@_8^|-oX z&ITI~jh{eMtb>&^0ITj^^A&U5dnx|iUntJG6=g)8RR9t|U(Nf$lAVTc1{J=%hsJ)qMLfgNbnBH&gZQkvt8}x=hVMj zoHQ)!O2)Rm@AL{0a@qq#KdMhgEc0WHFHACZcGtkSfa=(m{Jk_Pf7UA*PpSH@Z2LN@ ze$T+LOoi+o4A;N#V!O^6b=^C0W>-yF#FScfc1!m3^*q>RE3B`sp9;seA={9qy@U8p ze7@z=`O}03!*s7az$x1g4NxUbmfI@0mvR;+b2#N z7ie{e{#8DoldGemjrH`~EbEoL4n%IbJJ-Hb!F;%xVo(|zS3d4Ermd`em{>7<0ncrP z3(oEX#tjBo7NEd7#_&@|Wc2y59GU&SjDYQ_M+>J>5(E~N=E;UiW5fz~H9{CQi1R#Y z2RJzk4e@dncIx{llT$qGv<<7m=(I2ruP%(PzTC;UfiA4y$=Pn>uQw*cT03bYW~Vw< zZlIY%ZxKk9Pl|F)Dynk2GO8|kXZa~V4+}E*)awS_M(J8o# z*p77Prni`a(!x;vg+eu|R-70z$G%$hFy@;H#!ikddu6xDit*7XULmpd`sM;>UUtoX zBtwC|;L=BnG1rT`Be_Ecphn?)+r#jZQ)}1r@yOa(wwGMq;y`AIkd4@W)whDf`kN)+ zBrrlM4JW+@w&sL}TKmU<*;nuT5)<<%5&#}|Nr$k?^PtBp& zhlo^z`Vhf3(Rp1*TTpP9g^

    5j%}zb~EwmY|Au@V&pi8WaL-n(rUhq*D$wNkYlDPB=6A{58heuNKPK2dmqsE}beVIqegVJwTkt zxHPA;Vp(wEaeHufz1Bj@dxeP(rM^4{Ew;}=*quhMno7wpZeBSIOEr%_Vz6}$ zXY(CI#FXm#dl&}vbEgGhUA-S;V~ zQ#D#+jTPIp7HdwO%3doZY^t>`-y8RgcWs3|8_~shos+EXYi$wlx-g`9!-piw!o}TE zU#s*WCPXA%bTFBAb(!m5=XL1KAJ~ZxL!e-9riii=IZs&B4Hwid3D!P|pLd6N?BcypbOFQOyj~PyL++Jb__6X*dJ0kKYG|p1 zss=N0ycKiHGzj5im4D)NI=EVRMK+fIl>xWrW2}bxN5173D$k#u^y`f#@Y?Yp%JXv= zEsTYEybj`FySmK%3VGq7$!C&FvyBlGl6UY)^47;615Bm4xBJl7jIj|N92|{p)sQ&Z zrtqzxHhD!*Wq{0$O`lVN>TpNiT66wotwt1IARcN*$=NO?o1?W*BRf_>o@!&=O{>y5 zQN5@2%{i-x!-vv9Dcr?4zsEzx=QdV`PsTY3@Z z?9YW3BwX}F;q;A52XTS+1c`rJYFx%AM&nx-QNxnM86>u-|%MWZTky({n z_t`D82uXDO%T(nETVKYeE}o88@z-P)-u>Z`7~P_;spOi(a_NVg%F5P=^1WKsl+Wd9 zW!$%p3b7!by*00K2u00~Uy4e>j7$5aEK6F6wg_rFsVCVs9D$DRCSgIvcpKHEn;4?} zm&CcPq!2?~ko+(48nBCm3J^2Fn_>J1~5 zB+Bcp2wC1SSc4i7r2@05ev(-Q!o}rP1yrJma0uI=^*fFCV(NxX^^ch8r}W{tHZMbb z3SKG|IoS-@r8e;*sRf&l3ZDnxsk<<*Ya3TTW7RsXbiQ9uzF5bmqr|B018ZIPBi+Jm zv%ojnhCRGD!@XK>#qqJNd645E=l#LB2YM&|9{10OgBK=>ZFRc~ab)-4fLKHEh?}cL3>?b#_JcD;q#W`tph$fC<4E!LaURYGy3dyH)<(h>FildQRGZ#|UFt}qI z0q)osF3HP^e&X@I>j;!7HM{lNj^UOfVN~SXj7X&&jyrp9-n3$^v8ZNtcI;VZPj7Px zS;ol|BV_Mho|mLx)KZ*vD6S)=lcD8(At&#u6;dhuZC$^>0kj11NTD6aMsKdy%MJ$1 zN|%)$Ck_$T!Co5b^`awZ`^`_sZpHlzvyC6^dqp0cbo_BQjxhi2mtI}U%>dihx(UGI zNWcoo2ILRey|~bHe)5YTLA+GFqJr_N-1jv|MkOFYgJ~5?uQ)b$t~T#sX-r0|C9hDY z?>e{cS+23OHoIS%zb5n8)eEf6lXVNJnbky-SP+!n!$sAn=4lo( zJ`SZ4LCayI?uAPjr%r5RMmUvc;m{C$_jQ7ktH305(MAE~;TtvbE3vY;Ir5S@*>;+d@UqjWu?9_JbIn=S za~Zd2EE>@Si?0jjewsiAsHCZMx7HiZDT;L{3VeLpfl50=9_OXcY$l0|e`*1!Y7v@7 z(y$xfIh{Ojr{Z0>^1|s#At+U;&!5mT=8Y=bTG*`L6dB16^kQPv%6)!t;?yFh_VLTx zeBAi#>rkR(Oy-ilp1DZIm~jy0g0qQ^Z~5dKnG4!2<`Z+9jUxU-pNBTKkZTsCyR-87|T}hLH)=<`}tM;`2Jhk4gYJ$B5l{nY{ zCHwkEM^%WB9L}{{UjjBo`MkZC8m8wevl2G5y06=lxoAd)&Xv7?Xjq`h&*RaWY_?k- z!z;D3`P3hXRx#gl;g&M-VOXPTd=-Dro9a$^B0^qO1g_Q!cQ!=&r;Mi?N;n#ILf90} z44y4)Ip%6($t^85XJ$wj`fk)=2?~;wpSHh}7%WZTs;TG_`ye4@uk~_yU(~yGbuvKo zf)~vQ!HjC8=*@;@@2|Gr*w6o9KN9?GcpeuY^(-?FMOME%=j@YZh*Hu1Usj<$8n?E7;k$vS@hzfuNt*iLyLXLyR@Jgf;i1dHEf?@-nD) z6$ht}anZQY$8{UwY9gyj1PZcYO_9N*nYbKxT*jD@F@(xk_R$OPPML*-4 z7rtA0R{g8#^x|CVMP?$Iuk=W43_BzNB-VXxQmBMW_%<|!Y5`~ac^z-jSAu!Wzl_a`R1zXogsQ4 z>b}kOy5CMS(Pq+q3&Jn;pS*S#$B<*@h<3j=!Sd81)ztLqEaTHxMfcYR`EI?q(6#o` zMgHnIL|+sgWxt{(9<7|TYVFjh<&E%_C!k$-f9*C@tL91&`dUp}5j=hNeBp3!NI z1mxZ}Ljp5=FMW>+#vr-i^C9nvMuZ=<)pHL!>F~?xc(B;w`fJcwP5YQFIk>bPw=8dUp9Z{K-QukmL; z#NFAqj$VTd!RXKT$oX=1m+R-;OJ&h3uFO{N+%}i=H{K}n>||Bb)81XpOBPsz&>tEn zm9;RVddA&^Kb=#rlpU;i#jSY(Uzpzdy&klje=>{gvhQ6r6pda4oso20KaT2t?Jt5y zSuTuV+00sEP^U>YE!dyF)#9A4Tpu2aI5Dcdbzm((E4T`OpD1SPEE9oL<)}{PRRH?6Eg_1BD{=mhyEzU&8k>)@ z|F(%n<4=q`Z;{q~jpeTt-dN|)YAsE3F;%#zAxDJQN$gLiYt4;O(ag>Bx6)Vs5v5)L zHJ4s}m5`$O0%_?Di|Y?`CoesV&XZX`Z*NF2lq2H5vN|$&Pefha`oPk0M2*@NL;nk< zYxn6UIkuy(R`3G2@&6#TkU;9iRqvf#^)dSh%+M(%BJA?TSwRv?-*f zVe689m0(-*fhEv;DHqn9S*=O1PG+jhSx|A9`%_t1nE9H>{c`W z%2zuu9V-|Sl3yoz7`XPb;q%opE?-A0Jk)EyE+l_?6c>N?@*{j76C{$taV9@~ibyPz zFqhGZ)RiyZczKKRg3Hw|Pnh>`WL{slY|9m@?3xwRpLJ}>kxSsw`pD1knjr5fQ zMc`YZ6h21B{{{iPZUFMS&mSWorf~SbPCeB}AW%o*(bGhYHHkRxkll2E7wOl24$9&V zKKKro@81`oXrjyID}U0t$FLtmHDJMsNC7Ewc!=KDQGj3r8b1r2kzPs%q`7<|X~u!vFwt9X?0w-*UJ zD3PW}O`oxVhr)4DKZHjClTAY?jGkwS(CP~Kj&=&_2Y~Umf0z54)uo9PLYflTb!h+| zx=fVU=~{$y99Nnd`Ynl60J4_WM8nsW9PPX;_BK&KVEYD9pA zl$>f56l8l0DdkTvODXL<5&?dvjQBSJvUCImU3(x$uN7-A4J=P1usnoG@a7I0Qy`u% z0Zv13V(SJ1a8TX+Pae_d^AXl|zb6s_fS$sL`oTcb2*R7s=5+v@az6p-z2!&$YasuB zHIT)M$bkb0Yk`Vnq@JL|(q{puB=z6+198Q3kV5GYNfS7lQl*fK{6Cyh?W36>>}P^R z|IDZxq@~2-;P}*9@Dl%z8GRzo3yWfSc1U#QXgz8C&(_n$+BK-%&;`OOGfXvzeY?R) zr>s@_&5s|MxnY>|n(1o0iAT#toA!UQTuc^VITjPC9{*#=9KrAvkZ94=9zEYk^MCjA z=5vsc?Lg4WWK4g0&=HawbdIv?bG7|qaX6(A+H+BwDDOd6obj7~=yx*H1qh(deJagD zaLoMPLpwkM5mxgJly1yyruN!;Z4-h1QkfM+d*1}Yj~nqige3@V2ui&(bp}P*Offr$ zh|}~KoHrBN4>SxL|3G86j||DN|C468Q$g%-ZZp z*h5?|!S9@4$23$Z^4?o+pGFP>X-?V!hhv{$__~z6chu;Q>&i>6FSk?MA)Yo!+;-z2 zq~4PV9um`Na=qtn{ArG=_h-nj&wBH+@3{a<(&ajn^k68~2yL00-$iuuvaz~o9FtO_ zd5`F>_3Efe#7X5>kjX`d!cjynoB2y&n{ znq*BF$-(cmptUQ)bh}5HB4s`Mm5^OMB8%8>w#LEj=vf*n_H+H^zkcFDJ=zlrXD+ls zB}pQ;dKDa@s~5Yr!*Z@Fi1b9-IHeuh3Lbje0;lLpFB0F%Iip!}Z4mev}d^0V@`=I)g zphLE{b#W`})Hg2V0c(=J&A98=JR@u%r)nBT={h$!UH6v9W(`Y>RZ{zn?@yL%0cp3^ z{=q9*&SGQkT>XkO>A9(23PR8rr6r`;DEyOG@oKTZE0jIXi`;J|GmPpPfjG8siu+Ra zG?EuFox)?MbqyfZ;3_rizJ^rn6o8qtD1g`NMm`Rh`JF~L$n{>oN$D8Hp6f(q-^I;0 z6ojAKQ@4TcM9~vil#gY0^G|-h{P6v8*@>@!6@`-~OuBqO>+S}DTz>DJ!4&J!=K$Hn zQ96w_RvmmOb&u6|D)xQ?uuVh0W_7OG#01n8Nf`U_yYe&Q-Vi~oZNK}tNEgJ~s=m-x za>t7NO0M3_ymC8DeY*tcZ0kQfclK@Z&F-3?L;G1v?(#@SugHl{nhcDriSXU;h%Dhq zbOxZT;sKL3jw;!e*H=#nnQbh5(lGq|51f3C5h&|tjC-jqrmmE1&_Ls908ThMvp>{8 zJ}Fij@#enC{w!{%nufR$kKrzQqzjVqHs2+7qYb?G*HNW9zU%tV$vC`zO{qKh)~F?lC;~KyV7r!XYv- zv9aIN1hm4+G!PkM%3b4Qwh`;ASJ?i8>aU2;zrzB2oa+y8Zb{Nde&>iGh63X@21hLJ z&G)XFxu%Gc-tp?XuD}vfDnfI=ooXQ7!9{!5Ayj`;d16>0cCq1&wn_D`zALciKC>># z?`zJUW53QKq4DierR_L+ssRoyU63R(k2snvowYj3uW$&ctWO#m3TrSH)LZ(l!=US3 zsFC_n#**IT97@68)1kHlg}+KpGt`LAM#xf)8FM=>A&?nZwz!Ep_puuzD>Sj16&L(3 z^I6AL1MVOx<&~ZI&6({3kI+`shvY9}f6_jx^q>xKYk8kMty!CAeG;RxS#Q!N9AuP) zZh7iT3&qlpT{-9D3&qz_`n68w7w@i zKn|Aq*fQvXN0f6_3PTy&^`{}Z`7#uw909+zT;PJPB7UDqM9{yK18F2KP}d@KNjdsF z)qO;={guh@oL;{b!;#Iqk1J>R@@COxt<1Bmf8lqx#=r18J$pWYgaM=ljOvOv9gG7< zC`UUJ#U4Mr3p$3o%U6ZxPWKPDXo(q~Z`2_<1bfbpzyF$4mMs&>;zi7GHSWMjUWdvt zWwF_)AR`K7UaMArsaDwanq>R&^~RsQ_XBe1XeawOUcOHB5Gnyg|5gG_6K79rQ(C`T zEjvfWx#Gp%YuSFP5&b-S`RMm2pj_TqTc(C%y}6KC*F_{D@+nsj5G=FB@{x)V=7VYe zhGJu}y-us;)?p3Z?lTZdLL$6K(GWf@cdav4Ga~sTXZFrBX+=<+D74b{%H zbGBd5U?VU6^6Mwr)tarTG3wYKMCBvQ-lUTS+StIne*i8N(U?3_65mW+XKF1=<}&v6 z9MmdAS7wE~70P?T`RO7y`FdO6yo#279bO5vwK7;+Kjh6loyq;Z`>ShP5#23wrCpz0 z*S$4SHL@|i&hw*PJPWw7;$_Tx}5KvlP_=&FO5G|eQ z;t4Wbec}4xV^_^b-fKKyZQz~_MBXB@u9QxL>F1qpZ08rFw6@AiuNLmOh3YC z@t&sDvh;A43~~9B`&<@{El7pwjbBh0*Rss5R5!2K8~9656|j;F>u*D$y$^^yoR4GU zZC0}7sdJZSm6rdK_$0XZWs2&jztFyUu(`6%XZba*^!@HsU#_8aQNhU^ycwdrgkwpH zk~Al<33GV3aIYVSW9#MP{+Rn9%BeY50D0H~q-w2oaDD~)W1`p2oT&jy=i#pw66LGI zTEDR{Yf4rwAW>Jn4>!FtOVD>N5(f-N_T}BV%s=6hu9BCyfsfjuUpeW>PW85r=72g@ zI2Z^(0*YG`WYfd-z_IpLc88%?p_`E?Dll2w zFQQ>(>irIBxKXz&iXJiTWhv)`7P#CE+CA(n$(nciZ&H`j74|4b9t;`kfOypxkAB&a_T^7M@1JgNV(-zR$ZvO@gqvjdX^w*wK)(W9KfMF= z!}-`JB+2Ve7%5|pe zQ1u~2jL*J(1RTaI#5FUJ2)Rh*Hv1a&QpZ?Euj-5|2Y{er{5?XScyF##x-S%?PPif7 z$NF4i0d0hN0Q2)yJ&WUVm+(1y){BgryO0u-)608%tG#2Rqu<-Y>|?KbDn!OQm$%Fq zsm2kzde!z(AGikK2|%6n1?nR2M4LZ@7cT{Y3o7UTsGXp&A#OW!KPL1ivTLmzk#eN`9{msDg)ph<(8)S|;oL6*%1FuHM;^*qpSNwL# z{Vz=!MJ)R7NB#2XpbZ)|(KK+))CR<7kc~WP9f0OP017;EPDQo8%u6&t~nsGRl>tV z>39fN9dotUX}${)AX%bp2Cm(9=G*c2>v9~s5sY*q1-!}9*PGd4^91WhdMWJVb58~u z){2y9A4O*z-&h>0RQKe4RD-ToJ!xG=%VmSUp6(6hKV%Z@!-V^c8zVmN$!cija?EV^ zXNSMtZoh0F>IyzZK9@UEM}~WV(a4IZx!9x~J*^LPirMg8&HQ2Yp1(-Bk2Eecn~6b= zt~o_36Uvu6Pzc>94pHHT*c!f1(b)QzVsN}ixo94%54b4_mnoQ}3JFeH|c_@W4W=

    u+cX~(gF&& z$Pimx!KS7vxq^ zayAV8Hakm@xMV5|c3DMIJqc;A8`R>z;y6Z@vA?Jqc#_ZCm>U4W9bAcPF9KYB=k~W@oZf8t1L!o_N*g8up)fC$*NRgjw z=}M_r6(TKc_Ujmn|KmBrN)bmwf-HzA=O)B1X%es#aThCl#HK6udq)ht$IT6`t~<^T zec=3fR0gMss82E!f$2yy8kWQCqWmqGrE8UamMbZn1-9H@G=RwnX?bds|elL>o;v!|rK`~}6 zf=kz0{(h+ooK(@cC?)@bwYlcUu6ridSqPNpIju+ZPcy;w2UnE;I0Rmo(;@g9$&RHI zxuN?bJiv24PUrxlPM;QEO9kz;BG(|$+|fF!8%&yqbC&%6CAlX~hYs)Y1{#&Aq+vEi$F9Jl@W6n!1N3F z`YZ-5p|9HT@$ttqzGKF_g0c#TuC(S|HTmE90u2_xAWe#t83=F{K^d z->CqXFA*;4O#Kv6(V6+|^h=F954nH}W#De3dGhy4yZr?=y?hoep=F&Xdp4Cn{iGlV zl+ge=W>NbB+R5nYSfk3xw|sZ4)sk5bM@*md^P|e%2W~L?2Ti)8|5%PEPmZ!N ze}?YpHcqpLIWQSiMY>GS<@y~Ok}qpi{@ErIbZr^po-$V~`O@8HQ2 z%ZKukpWb*vzOI&D%(kPMmPT}3;=Ql0oJD%jjLwp+s`+ur9_Q~wyd!(eSU)g*`mDcWcgHN4bn#&9!&})2|^l$E$}`mQfiBsfCqQ5B_;CvN1dud@iEzXsEw#AY94Lta9iY{Hw)a=45sh| zHWJX=xjGMl$(>B;@(-4}d`!9NF9d6nd~RBkNQrAz26L@O{`vmQCTSh$#&ADzShk@uc}znqe@MEevK;E^%IhJA^_=pH)IZ0xHlX6Kr= z4TdRK%D4^=x}P%tc09xYPBrP2p#96S9ja_41#7N9t(dIPt~htx-x2e0n&VH~#Z#rB z%OzDt@SAEltQ`=0D*e8t?Cgh%@-rjf@0W)slvSLu@-KgWAC5<5j#$zDj_T?OWfwRp zk#YR~dmLk698I?$JN_NVz9;aP<1gZ!X!5L@T0cOipTdias(AgWBIc`GViBKqekLA9 z!tWF`_>J5b2nXB4mV7`!u`(K*^mS8V;P7-q7Fl6_VvOVT=na~A-%M{&3 zd@43C1oh^Z(|ln(PGARIq-&}nG@hdUvnD!7;2lfMY4ct#r^9`*k)p?64)76=@flO< z$5)**#WWxm1xnaa2BjLBJ#VLLip>dvdMkylI_(@^k5PD%UV zpgZq~^g+=>eu$BbK(V6cxcL1@XAVj{A>)QXKJ2awE z2qTMP5>dUlBtw*f4!?G-b=$w!ubn|tSIj!Ogs#WXKPj>msL;Fg*S@t7kIpXyeNJmV zL~dM)5b5b@!;@2YFrVxZc{IIgqMUf+QQBk!$hcal9vqqEGQ7CdibtbP>grats!rt_ zm5j!eA1%MfFj|bGn{<{n_btl{^lSMdt=pb5Vx!4@ZW!C7p-XaYE=8m6k&+JeFIKei zxeca5T1$wGIX7(lIL_rSvY{7^6Yg@Z*G>WN?-0+TLc(DAP14lL zs;Q+do=Gr~&nA9QHO&P=+`lsA$w^Q<4CDsH&b5g8ze5msdw+-VzjOQH+&eLVz_UH38MC@qJRLZ z4PSL49EgBI>ii7U_9gcvD))u2N`iA4ry5cis{q1MaE5yQe6v|Q{csARWOMv7s!bRyg}di?rH;wi#{XQVL0j8*&2d3W(xr%w#8WM zNw}3_FC)bGEasQj;cmkrhNEF`WfYSHQEpqflZ;hAf)w|B09cWW&!x4<=);BoR$vmK z`!!Iz`uX+uN%!jQ|DAqI9>R+(DZr3k5Oqp01@DpL-ObxcVz#p1gu zqr@WH9A)12!s==Akp^#XZ%eIaK_x9sdyx+d5ZlR#R};X&szxls?QC*NmZlmN;md!P z>M<;KsrMy$>qK8U^lNDH6W(9d&euu$c-{JHtxl|j7XR^=&0rSivC%n2sNP=)Mz;hK z&4-Z7d0q<|XHWUiswZ+9R=%welV@2fN-;0Fpv^uFkyQr-F>08o!4zPWgqNi&0UO+! zP5)ZOcrn&Do9Bv-&eU&;V(DER`ivFs^KwJ@OJ*qq( z;|ac$L5tYVZ2Ezl+bPp9%pu)%7^?S&`;&*;BMvv_(hR7!* z#2~fNpyKc5Hh-vN7kYif@Ml1Q7jv$4(7a*;l$@zw3+(ClipExPjVL+-?;q2k4dt~k z^~7Iu6m4#A3IOA`|K|RNP38DBkZ3kRp7|IALO-P1gImuQfp7n5s>|D?vpJY>!tI|a-S>G5#lCII&^~PM{qFH{_KnBW5+woY)?eQZxJPY%&VZF$qU0) z1b-tJ8=Y2E52*&v#aZ)PX#+ai?K#@xdas6^#JDw`*1?8M@qK2h0?}_3m=dkrvuH$3th(XEXqNlY&8OB_3n@2VsHuh z{^N~Uk=(hBsZI8t6j?>-ZB8Wig4zG^@=^JQOUdJgbBklucikNw6Y_@#7X`oQl3Hop zY!4|4Cx&w3gdOBQ)8|ni^^kB_>BiHuZRx5sApMvmUl<988?<`m)07hU{x|mCJetb= z{U7d>ic+SMAwy-#7#TvM6roM#y^SH6m0=@=217DtEMtXlW_8PxwhcK%-Xi{xZmtSBtB1FZ-CQI&!xgvE6FrZsUiHUF778`wv}h>M5tjq^ zANeDG>G>Tvf_9xxfd2ET>f=)1LY|?H!Z}Vn039=X<=3u>+#7wrINjgfoFJWVex;CS zr%s5C3ehtjEe8YG)YIgX0F}EREBZjd%z=y(pt34I&zB@XWhZdl+^c|k)i092PnzW$ zkmR2b??Rk!VWStsGNs@T-ZN$}bKpWVK0r=D&2#@e0cmf&=!PGsVjs|(5F-#&V<4)z z1Hsw&RY|H&cET7!!(QEmBle9o2m?I;cJ5E$=-go{FRP9guvi8SyR!g3=S)v!0sH-N zPd{u=tFTdaPLb8UrX(dat8vUZu3I_6N;-7Jx_d`7{_szImH5k7LS#EESQ2t<=^5EZ z6Ppj=u%?!&9B}ytWK3TsZzaSNu2O}qw{UOvP~79W5p4!ooOSw2e@JxC#Z6n)JL1u1 zAvs0ob9@!n;U>uWOxiBeg36 zd)Ic~wx#04W%5zB(knkvQ`U9S<;br?0qzl!P;(sjG!B5>XmX1m0H zp6&XeT4P|Tgv5lofiuUWb`P6O=a=29y(+K)VF#E53*_HG#>siDE(i;9JC8k3E<-&1 z?o`LiMrYC^Yg|SqZTl-RXMkCtC0-9Us1-eeXB9L1J=Fb{!e5k}_}#Q;+5g(Q9qvPz zW(rJSUm&_{6l}5Er6A^6{;2sfb8~*$96K_wXAtBT`fjH!jSw7Qx}ZaS=GL~okog+`{I?Dv25w;CUL?426#LE%4}NJk_ee+sY3cX{wI zgqn&oaEPi#-8AAIj6rt-biV>5K8as=J_rU123A5uLm7yNVvN--6+q85frOO#sDsXX zoh!tWHy5_=Q)sFaSyrx8iq@22oSa4fSa=1nDLO*8ECxU#*z6RlKHC#>A$cqfB=@_ zw$#MMLa=5y7J68`^1WFgI3OxIr^Rlp@pT9yDr5Y6AHoxY%W+{>@Ta*g2|fGZjyrFU zKpB@otAI|wQ`vjn~q9fFVZgAFTumWd4xXQ#2FzYdZ&M{ftsB#t=x zxOu8850}+$v^^6jr<0%RA4Aa*QHMt;;Q3j?heU0AF%29*&G&~^@Xv?F89?Db{Pq6o zc)ZKn8(Hk+n2g)hXNY|WujREVJ%p@XFW7UA&>wH`Z0nx+sI+G=r;YV6p0%#_G<-Zz zl0;_8t%~p)>{5XrF*xTiK8Q{^Y3t92b~}MC=j|j=3FSytni+pD`4t(mDP|Z&Os3)* z3zUA>rL@G1fLnh79`${@DP8*K+dv_d^7$3=gHhA3M`nlw*1WRD9uT{PT(q(&bFqM*lSdGPqEsq>YOQ1kvxC z?uy4)hUBQrs|O=MQUa{BpH25g5mOC0=qGYPTdzPI`T#vp<9q;8-8T# zaQXYqUyW>Q+w=V6oe<;JgN0UheKd+-vC}Unw5|q`4J#s#de;s`055l)LvHNazaI5L zI;NW&7T)j!uiW7=BP{gT+_ez7^}qP%@DAL~_#0S&UBIyo0jTyxB2|IYzp4Uh*ud(H zg1%TW?|FFG?Y!_u`5nmf9!1?ffq(4=;&YE*@3|v*y1S;p2oC zka{xMz?(efKIGbga!kY$kILT`q6BSvjSTN?Tl0sMB%UolL#C1QR9p&HkJytxSEsQL z^_AA7=_H?x>&4J6ASYO-&mqqkx)SK;2BpUnMi-4?ZmlD@9*@el?4X!KE;9DK#3|V1 zqc{JEV2w*eE>i2yi_8+)5y`9JhA1l%y=mr2-K`-o>zfROY*kPjE*SP3^-fl49(Zv% zeZ7-%1iy_5{|k2dQNF1wG5@)+?7~j-;RsUgedoDM591?`kL~J(S4NA{!HCeUW{FXp z>%aS{5Lm_%TZ0lpb3(V6Ar1&OxO0a?L6Kc`72$N`yWV~0Y$ENH6O2IAbQ~z2;Hero_5@!7YRYw!E(&_4Q1yTXtt?r|+=1KSLXD)A1~ti2#{0 zjv&SjP5M+!KIy4`LvPj#c>V!MnEQliI9g8>9g#hB5vc^WK1`TaLXQhxqCW8JU9*N% z@j-|FJi+ui?&_O*b+LKPx0xi>lV#Sv)(%4BGP0L@DE)-Z;Tn}lo>D|hjLgcJfwVNI zh!V4YXkZ3lTKOPAs2|lKYG@JitO5cO&whdX9t48UxX6?h5!MX=&iZ8tTHHJf`r5K* zwpc^EhFRa56`%>oqn;2aHAPHI^VpoSk`LP7{cXI}9U=V7XX{zj#`_xqGeH2D6UBgm z^$9P+{O1xx`G6%6n+JV1*JnV#__YGyB8JT!FX1c3kz!XSc|WC&BAoqu(x#H{=CpVR zGyNS2z8!h*F<$(=$i8DyfjGJcsD?>^zDq#|%%_(msT{RGy+d8Lxh}BZAY0_9tXYk3 zp8XVJa8jG7&*Hu=n1Lfkj8gewEk@gNyYx*5)g~aDHda0Ql{`%890UXp-FLuP^V1SC zH`V+rsA9^XYiZq0vIv!~M<%ojVISmFg8&uhnHVJlfo$B!VeP?;oWOt71qbQ=b=X#DBcZEGGL+vf$cp1N6L z=v23V5Llc6P)Tbxs_ILw01#%hx-h-IOiyepf~UJYj{Jtke08bgxqEJ(fH%tt{^fps zCbLQah&Q*jPO>*A$`;RW%T%@Dch$kYMF&kIi3o&&A%o> zHsb(NG5Jv1C3wb`W;hkSLYMITDq;yxIVT27{9f`%ckXjF8MJ05^#&9~V|QbuS;!Nk zlZn}mAvm4&D{M4agS9Ixy_HQK$lAoq*?pi=n}p6rJJX%ug=ENTFXZ%TDzkmuD>r{} z5#;=yFLCXMGDFtKOJ7w6*U>TDQXyG1TGN={a z!_e%!Ok3~O_*ohXb10ZB9*5(IZ zTJiN~?^%y1g>|@2qqNSsCw?WPr5&=XFe|rl?vw!MT=MCIJ^Tf<{8R0@IbVi17Nj;E zaaptJ^6MEo*^|&ie&txb4r`{iLyg3naP7|K8x6IF#1 z6b$zfyG?EoHsfVg<@pKG(fGJ7n=5+#Op0b~y9{Kr((+QGPvdzo>w{msyR-Y{^%l1c)V{35zqf%WO>fB?r`eJn}dACB0GLI!^vOh& zm(ItD^km`bAj39ggL(Djykw-*^?BC;`Wckv)tIas-Hk3Q=@iQ{24f=Mfl?3xU8NRr)LVIm40z}%3z zlYl2#&m|1pjhMB+^BR!i`i$WF-=EJ8`_>fliP@}m+my>O9(see(c2z`>uKwioLXl0 zb~ftIG|U>;%+JMzMN>RjDiUe zj>yC%cIU-%=O8C}OwC-A=|;QPmm9+ZUDjHrcA0JB=C3HLGlC~@S!Ipcc1puA`*77Q z&UEqsq4I|HI@0xS)_TCM*+_r@iY1OIpj#6}_BcaI%909`>qQQ~(v?Y^+UPvn(yw;* z_8HnYc{v+1adHEJve|`Wo185nUQ5POPIFUlMU~N3!d%ppTLB$HRu=MdMaUAwu3=$A z{*CzA%HtMtH-@ef7G;K;C{Pq}$C-qb%z-}O;X&bIV`bXYx)VXz6GI-0Z`#drR$ zf)+fvrxf{Z)z@ka@>^6D1mb(+@}85khGD)JNAfftV}mu}i^gVAxNdH&6XH!X?O%gq z=0i6*bOMISjo67-LWN`^NxH?OR03Z5CU2*SofXa3aHh7bS1WQi>ndP!(5hWV)ElIT zsw)}J9phLRi=y=b9r+rKE$R*Jx~27a-Y;f@vzj7L5MOL~RocBT%Wicu)kxGPvwXeu z(W7l4IW0!#nP{PfIC8s)5AS#0=PoJoP4rVNcd!ktJ??!(9!#oFYQPg@s;iI-U71os zPK^FB)5k7RW?9r@s;I-&L~6Uhaog9atNyKrQnIKRMhzZ{W>N8R4=I`nwXs?5Gg1B) zax{Jg-U+VA)WxHFAcbx6~Ak#8%8k!YAT*pXOnWh#PK)<~F~ zxyf#3{QLk14U8_id`H1>Kk<$!u0UN)&kirdjfcq$y%9e5;s-7+hS~{9un#u*77|$n zsGdld`tN1S?6|pyl)N^^`@H~CUBGdX*BeAPeLtR~C5qna(LDgSYTP5%opG_I&sK?0 zHkr9cq5oBIL{no9k!uUwgS%wc7odk+;tE8w{2~S8a){R#hkdG(DNd8(rqTHyGQm;K zW22+(R&?3JDDs6Kn|)Qijl-!;8KV=V8hOOm^7NER zS?Huzc5XdOnp)<`z)(``y({urowUU0H#r3A&jxiGQ+R%iEjHmXwmH?%m}BApyY6bBStVpSwN3PckkIipOvS} zU{(7h&z+_52$yl;o^1%pqvuQ;LF_OX^pDrbP`vWYhayXgwNXbzhtJHRx5I4RHr0;4 zw?)$*t7S$oHL*&%{wyAtauJCq9K<*U%`c(#(d2hD*vd$2Oeagc_dQ%yA`^oH@`hX_vU_%NvgX(3}Yy#e}670wzJKsG@iAp!=Oc|*Q+Z+ z%yJ0xH#M(z^l1qDO}qguOf$5;>-(Ek+hum=862@cYDTbA%S05t24ob2mgL6C2;@!ryfEyu$;cLFd8OZ@pz|RTW4l8A1vKw1-ME=207`B1 zq=+@B#*KY0FcZ_0JTn&$CVQ`B9Oy?R?()yZ%i-UwRhVB!-{4y7cndGDW@_=Yra~LD z!~(8+<_k!ut#`v`<1@{jb`k~T+=oc6&>37haEjJf<$R| zq~_-&hKnb+VrYqjGT3X~9J#rqp)?EBtphhpd)Tnkm!+@^RuW{pmPk5cvr42LAbB>h z(tH%<_zC%kaS~_hCLpF5LC!PL$rzW*4cqyoXP_}~tMnwVeEJZBw`o5Y-LnMf@Zhg{ z%*Uu(?@FO9T8cQvq@!)n;3y=Wbi&No#%PXtMSK~t_{p4eI*C;3FwtJC??AB_7!HdO zjzkfP&@)qNs!%XSU6NcW#+VTV?VHN<_`J)WwiHaFUKf_A9GE`2KG!B2n0c-eiYdU1 z`&$yhPmmT&0uR)xHaX}V!Fn-Fjr^7a@%_X*V;109lOPGYx&dYxPo{ z$jvMB2mn6MFr3+20pi;d2tQtqYu(9l^BQLJHHXi@aBW`X^ka%bF`O=`Z>AUNa|$Od z)p&4%=L?RKR;=V#G4{ba^=Ho*R6IeYo%!CMeTuV-+50Gmn2&giz5@wt7TdRy0V3*F z6EdpZIpBF^wv;OiWl{)OH$q5eBLdocxF z*yaamQ19x&bW8}FY#^txyxF>Bc@ZavswdhEGtJO=(P0rA{7q{Zq+hZd~ zv*^Hz^lbNB;u(p%Qt}>>H5u(1^EQ$^Xpm{sDvD zKg}snBNO_G(T;p={7YABtRU<4VZ)h?SgX3kn8{PV*vzF;Y2YWQDK4n@r(ICD>nAz{ z=18iNMWO?-jtl48-d?-F!Eu5Xczwfx3#5knWPlEHmN^}Np3)RTJ&)89eT1-Qg!=3VOw zYjA*8zS0ZfUe5u#$k=75#J{sIW&;U1%Mry-I|xE0kft1j@jz|2WESnju=JJ&=zz*1>Pp5fI+wDmM{r^hterY z2e;!MqyRZ_yY|qpZ{mDMkNiG{BiG|s{$D=gA3`|+LFPLp`a7-7oOfpu|)s&r^b$0rF=!C7jL7~woKl;=11uA$;B zQReXKBmiy$B)W}8f&T4Tc^5M<3xKric!EnP38wkQuHo=`s{Etx zP)Ig{6buFM@oBJCv16fl0*8R+;pG89;*UE9{~AAW`yhhE?`HoEOTuIFraoh~4r;3Y z5DxiM9W{MpWoKt0j$j&q?JndS+k6Whg&V4&R81G#J2?MKI9x4XBU521LC z%)bufaW${LrzQM=p_VCcez~*h62!RKjr}rIxh&l0pT%W1nBpb77^Ynm>NTm%e*M*# zVaTC056eeV8WbSVib&wNeR4Mmw4>b!nL$nwDz)IlzYbv?-$Bh3-gv@Q+ZSv5*#&Fi zi_NOhb;_YcrtBgW?T>=R!2iWN(bf`o`L(w;HR%PU(?xF?iUxFbTeTU42~|e=?zNin zbwiY9Y(ajB@P+<>WE#pdMf;Kyq+JGpclJuWqVi$C9y+BVuFZ4Qm59g803~t_kz4Rb zw&^tNUEf%ND2D`}4y=lhHQ5m1Q=n=x+@Uz0QM7=tT0E$@F*T?2EFzG}&>Ta@9%?F) zM;>T=zt_$Z=T=zn1d{7=z+hU#WTae0wiavV{K!X)0Xie!qOZcVZ*#;+2wYp=-Mt1;6^dShwzo z9=yk~5HcP7o1wn&yf?hSVby*xhUa}juy-Y-1Xc|b4PX0=yvk~Azpk!##p3Jxmxvn3 z%__mkKVHmIl{pDQAA5}W)vA6R16?pIg;=?4W0H3D6oTp92gCCkza1mFb~tscbV+WaO`}pnLBC< z9r*f*q8{_zt&P6d2e7(~V~qmYf>f@Rd+RCIPKwVGz!yx`FS6HmrMr9VB45YLKn z87tb@bKR*DxT4g6HPV?`n&I~zfSV7P+?f8zz&TUB>AhI9TV%<}2cjEJ9U6g)S0hAS zxVzK3j*XI}=R0_(_zb$3(@6Mtp4rF`Ey|l&ktM~;nAddohpl?MWMn`+;lxT{G1b~` z#?~5E39HqO^1Pd0^H>4WMbHrsWvq19b@*bn^)&~rEg98D3?hT&#=;ftkGquu`d@}L z>Rea$%c|6$c*$m>uzBDi#0<0w4}nHD!Pmm5W1YK?WWysBKPiBzZkFsXVNgHuQVz7d zx#Fy1{5(%B;MgO*c>HgNwi{#{_>n`*d3)!y-X#cU2L zmr9#ULeqWYlA+=;pyGKhwJ8T(d25uwZ(@~)3LR6p(bJ)PrDLPVHgcQ#GDU-}Wzt00 zW_=z`0=VbKmm>@Ch$1Pt%4>c!t!L2zPbW;5XCx5uhZe|R)v5hycHHY5w9+U$wOmU|&1PoGIK zL~&GU(8298~c;!%}|Pgu}0Ht-b0|xBjhJ zdgIXi&PS8`4reu4WXk)Koov_J@RHZ2O&mMl?P>alXj>sqz`n1@tH@E!7kfT)13Mes zc-E33fcvD*oMO8!Dz7cn6cAgJC$`mDhk99xjv1mcIsA2;nkQf+k2}oRSk*2x0yeSM z(Zm2!QD1rw0}ncVE-mpIxDPxu1vCjH8_d;?e8a%q#){4cTD@_hYD&1Lz8f4*RM@AN zKzZP03T^t@l9(C$oR(YZnA^iPJc+*p!~DtYS)T8c1zg6yMo7zeKU}dW8?y$1?uP}tKo``PdFo&Qli2IBCRiLZv(9+ahC|0PZD(8*CG4ALDl+2ah?R5s0QYN6p)Kbd1oYED&H_Z<7 znwu54M2({zD{*#NPV1|S?ucLCD}x60FgLTizHEM1dnAzgt^zlxwuBbu5>sgGUn*DKtPEeH;ZS$!t?;YTzN?|aq5HHkVo2m30$hwX|!@+ z4jjOjLvHz^dw#svC`sWu>!cBz8Urm2OjAluS9Gn*1s!k57C4n^Fk(Qj@2r`;MitM6 zSG7iC_AuQ4AYE3Z8uT_nx&tDdZfvYoLj{$leTMm9XsR^SOU{Tie`>USPb{7$sAj z!U6!=GK}iMKf&RCqgn&gUF*L`py{iFDq5{%G66x6D&dvAKilD?1V>$%_xzFMXyc=M zqlLjPqL&O>)d7dXF1_e7{Yv>6>_tLOR&+jB_3U%ETlHf5h#yGleWZQllYl844*I7! z4Pbz5DA2DTg$fVD%bBrX(xLy9VKH}&443X^nMYiX2H8pXk02Em z={GjWEbwhi!$Sgwx2Z*qjDaDB?bZ263<%}Rs)h<113#a%$RgXS;-hx#!vkYKlXAl* z;=DwNaT-QP`J~G3<^GaU69K)(BQpuM(suZi97rK)yW!XRq&qe*1Ms5ri*UwAi{Wd# zH9Wixjg+A*oBU^424(D@QrUlRcq_RYgEXdvp&M5hq1Y@r=2DjN;OucL-AS`;hLVg{lWypQh-{eX@)ME6$~MxBg7>mFqo9FDvEoX1SZ?gCoP-p=`KArg3?v6No+ zp5M%@nI}v}5|}bmi?|;(O89yY##HBcv&8e*b!t88p)ZBop}3owJP1vY)}Rx4 z|0ql|%(tg@YqpI&w5f`D5A`FTT!5S*ciRAyRo8_uA}ZT8@$-@*`v(%Qr-@;|65Q^{ z|Mqsv3|`7ypS$e-a{B=fO-~}mYWcQ-*9Q)8In6)`dG**5lHjurN}g}4wGBIKb5+rB z;tnF2#Culs%N4T^1%8D0WV6+n>ZDS9eRkklCo)n?3L}XNL$4}XQHn!JNS}>CA*rd;WVj)YBuV&&;y4^7@RY7-4V*tg>|f4WLd;R&}Gbj z6kdBNv(Z)wFW3Xg+*L?@4Ww@y7A%NUs%Ay*Cx-g3Ru9_P+m5~tTeLh?z|LlokfZkr z$wkWSXyQRvK*=^xI-W#Y6wqEl=+a(_haU>D5e+>T-znt}!jQEqFrjR{#Z)`(Gz^-2 z|7F6xb4g=m%NL$)`TzJB2D6<$uSvc0=wkF)Gd zNh+y3JGu3K72=(QJxB_hh~wYPh1kl{-h8VYHX@=9sI7WNIxYs>Vl`#WBOcJ}l|%4F z>R%cmZH@lEcO?1oWUs^OarZ6?+1P~V3t9c*(7=%5z;J>VOftGs|hkF!Fr@{{<1HTFj4 zHbfF0A}LoK5e;k%k?OrwStCn{T%OgxKMWo|Y5R7lQuQr!zu zB3WYR++y`+Wwr`i3};5Ca>S}Xt0SMKRKOice7?ha?P#AngwU2lpFrH(UC=q^A2FF} zJfySRRDwC-+$3Kg@{p$T;&)f$9VSn2- zxXJU6Ze70e&sTefpn0@ArJZMH)!6wl5k;c|PMVzI#J zqXhbcZAF_KtL2bgdp*Mw&erAHl%;|y#D5kq?ea{Xo#+s1k{#z@dEXRR6z{`zpO+(Q zBVO=pAD(qYUEunlYhkFRD|GoAhz81_u}ZmZtsC6tj{`pyS!)Xr zkT1s(y*E(O^86+YEBG2I)R%kl9lxS6GpUU+rX?J$@8(uep;Vxe=FqSbsV>uidL))M z6$J%;cdSZVA$kAYyFl_ay=nwR(BR7Gwk`n5NbGC3kyu8W#AwQRIaKn4Fmux)pl_ZY zHrCg49{lpQCrl!of4c#`*Fv^lnX{YC_;dcVu-}vv=#Tv_)%jUTnN$So0idX^GnYXi zU98H`XFl#^T^_|W4jowo`eKXMp&R0Ln8Ro%z0xocdYMcoMDZDx zXXMcx;#P64yp1dCa3CPqg?CLOpm~{-%KH8llPUp^$hz*IUR^*w=`eNR&f7Wa!Qs1q zB1#CB^*z=>t?iN^?3aTGllLfr__GG-D(a5z6Liw8`@?1$k}#2k7GCd zD4`=?(I+?=C*0CN04igU|DoX;4<(6F`R$(X5E-mfIM56b5x<0uiwNlF*@67;&IczdZJluSQ1XfjPvO|9wR00(h$L7Z?dUY9G~k zrGVdZwGt*_Vxw=g*=X^2bj$G#{w+U&X^-ZqM>r6gq@bE1a^=P*!#_rI1(fRe@{c1{B6D0 z=Y)O!eF*{`QFrS{pzAmxMw`_6&cyY)lnY%58FjkmL79Bh=j+j$0lg7FYj!x zI(gm#Lj*g;=qtodQ4f_h>v*jua)kcPn}aR#{?622^2*11XnU;YBWDtOzp?dp74GcM z!>`WId~%e&($upXbfCF8h*F&vemVa;FE7Gpp7~CYCj7I`15T<)X7y09^{kIEROl|^ zlT*}W$ZT^%7KKK_@_NtdL;su4I`Qqayn!#)>vUv}-i^tr&A>|FCmDCkf}4XHzTb+E ziF!L|81^CDa!uerJBt6>UH)r#`M1{#sP_M#{YHT=VNiTz?6&{6{W={Y1Gu+C_lHY+qssPtl^g|tL z94l;`Srz^;8F0iI2rs{e#0;>4l>Ukzmaki?=h8NJ2(m5#v#Q=YzWMqHSo9~p$A@HS z^NK9%!_f6l6*p1co)9!L0P#3p9RFKNtNXxRM5~4jDOa!FxdN$Eh(FkKVG7e#_y5Rs z>2|qDE~@b0vBoMJz;RWSUezR%hi-mD0l2cd{0+`s3p*EH#2PkthLYsB`@l-rb3QJJ zibjz~#p*mTU;j496>!0$6D#{uzhQiX`V7XQ((VEAIwKL$m>|Lba&9L6!H9mlD5w z3{tt%kS!#b{n?|Ygv88pV{nq)P2O193yESd%@!aOXwA1j>Qh2n@gKy}Jfp4Y8=CD< zwrsySo*T#KD}PC(bd0_j7!RF}5SL$N7$5Qi%eyog##5NyvXR`g8-^=Uc=d+>o36RND$qLQ>-BW$DiC=EM2B#dx z5yJ1?kyQVF067rL57Uhq&{>Xq&qXG`V#i$>hdyx-JTK<2MgGD$LFzx_hJ&_TBvijfSA(3NvxOK=Copb4SKTtnw@ywpbVVvsB+*MVy zveza%5)S~%!-aAf-p*4DoL-MvwN3_$=!4<*`0Zsyl{`_;kEVzStNK#Mn{;GKI9gDJ zb?YfYJ_UbFL+rELp{ z0vx<1=zOEtpkGycI0u}QmvjH{kVc#_J1$KC>Y?fIE;~yJeeKB7qZm2scbx8!1a!Uh zz+AB50f0fBUe|i@?B;eNsMMc7Lp?n9vgwVn zTW=i_(f;8>yC*$n>8Dc+=Sol&h#;%tWnm)f0EB0=+%QN!<+Vl;9PzE|TH84;d2SDa z&B?~lu>1P5j%q4>2x4*7{}eYrp5w-pXlvgJqPjJi3BZiH;p5UwMB=8j%&T&rbvGgs z{`}}JD~Pt30ejbiac4Co5Twn-9dm^wWC6K+lQ2lbv6?6`5y@-=ndsFUZC*(5jfMP5 zTUZ5!NFv(ooRjAEJI9Wf#Vz?}8_M0S#F#m+1Hi*SsL5b_-z$c-My9DP!`q&cG>MUD zdK9vL%OQsK8r`q6UktGZ3!eRbuqkNq{b&-K4pZ65HC^@o#+pnh1^%lPiMujCx~;($ zmGfMT+KIr3A#98=?xx(SBB zl^-md!4=07ZE31LzUxt4!v1ANLvKp}y;56$g<|eO@)mGo+K_&HNaWQzC(uI|X2Jf{ zB}p>y&+XVgZx2@#=C$J+s0By9m|YcoezO7g$d}UdGX^`Hbu#-!<$@miKS3|PJ2j|0 zfEBTv79gGC$8#4(=Vobfes?Wqdy@sQi>SA-^S!D~T1k>C2FojXRTPahK1+(}-JvGw z7)J;)P)=XT-4B+KfU{6aufO)w2~aJJ6sH>nV<&-!#4{0_KFKo?#<{+=O{&_KSE4JE z(DC%@8j{AbxusiTdQfvZd(pW1S$+NAs+c^)tR@Jp=`RPkg3mU8RDTgfg<5;G*uT8HE`fk7A$qaWvoWssi#Z zjWde5ztm+s&)(2-FbiCiGL@ZN1jP1L4gOsH*g%D7_weV@&xzy$eDOrDkeyz4c*0)e z0ATH^fnhcY$8~11=|2)8>uQ(YzxXhqg3FY#kT z&spxEU1fIv*VVI&ZRtCp)Jf0l@n3B1lc|xhqszE@5`14PS2kf(;AfwE{?!zIO7fg7 za*IZrD8_SX9R9Fj<2U&<=STT6V_hLgs~qNF3iLI1X{9$z+DnzZiC|l)`vxc5D>ieE zU!KD4Q0l2O>in*}9#^0KDbW_ZpQA^_Qy3JSOC#H5SAIIK6EBe8H^ZBha!;E%0Q5s0 zdsoxYhRhUAlrcxbU&Y*_{y`2p)5h31q;Y;vxzyr7Hf`&cQ1w$w2yk^I^hAwmBSo5I zO)h`!i3ua)G86QOBIaWRAv3`KAB5nUSh`Tn*Wwe&L)U0oqsy31Tzl*m8|Q&cby3GY z4oUZQW7=t%p4tpT{J5FSW+}c{37UgKW!W4oD@(qo&a|UfJCZk$K&{&L zqg|bUwKpux=q)K$xjx9&pRawE5vSL7%KmS<+w}VjMe(;$BtrWq89RAZ50S?=c03Jg ztXl6M-e5fCW@DVj$;HZz(p67EwhEU&&xlUVX@pn|NwT;@Mk4<{3UFnSi_6C{#IDuy zsASNy=;<9%pPSgA4?1qu;tCRkBzgHUy2kg{@MTuu3dvOs=3hNw-x#N>my?7AE={0G zviKYFTbmc2WMdPtRkkRxs83V zYAIj&(oXrM(^Hn@3jS_7uj5DFau%};WNvx9GUmxPQJA%Eeu?|CxLK-LzOKo+tKoc3 zTG_aZXaSO8nt6e|{fEn8Ja}C>1xtNfuR(U#cc;_7FjOw=^PyTe32;f$6W!~0zkS4& zXYGQ|_#HRF>A1pSAc?tENwVZvrIL?GxnSirb(ZWsuc%i?CL|9vwUC)SB;FzAu|O4>Ov^ExPEVrn0wlTmmA)e5MiXUh>JkOBw|4hVUfr@?PfDj zII<9!PDN5>PX8dygO0|Ybp8CUL``qNs{JDG`Mk`m9E#F0vouEdDGoMspE2lZ+MS3narGbHaKi3k@Ybs>iX5*c^QrD?UgaQ!-fn-w#IV?g&AEn(ic?zPag=5{AFwh_OTb@5V9t-dn9vTH%a-JzL z?q2AX)xQuo>FDEl<icEm`)i%R{DXV7)buS(()XcJyQddHJQR6NE(1?2e3gHc z?tps>n6Fj8UCa>l{ZwX)SJC5FGh#Td#Jf#L9(C+_d#Y-V_QC>mWmZ0=F^ojXE0*p! zy}ZT<+nOrt$m5!&QTz3c+Bj^1pnQQVxM!(}yUS!_t`hC3d7Y|jgm|4tzEC*3{UAi` zy#o-pyLu(pzv}gu=iic_))-_TVd!;6l_gcQ!)Ivnv(14@Z^gyc=+SaSk9nHaMQPgR z=4Xt&si|T6-9t}#rMyxtgmWTps;x?czcQDh(uY_?N5ll3_ZmG4;c?=u_*2fa0rG1v zt66bZH%ux$qr1#mq#P4ik6lvOcE9nNo)AxEyyY-{#q6S8BF_zF{+r70hL&xwX}kR@ zKmo!0pS^&e4;-O^A&^hcD4h98V|}lkTnO!D(w7To1d@10E+q90Psn63y2FqHmzxm2QPWm-ZJ1RS${OrvKCu3g39P3&qu)*LVZ+ayM zDIPp`5&0J2{59vYABx@P?ME~+MC1!v{KJ#WE(Jx!S?ReMu9-2;qWo%f zD^YtSdtTgNMxm{-ue&sYA&dKMo~X5tkdTcE?O+oRE=n*4&o1X#;KMG!%F1&*v#Pvm zU%=1kWhGd?O9L!HubNbBU^8fx*T>X-1drQ~heVHsG!<^U==14U zxY?r$J)zlJ_4*($QaK;7+S}^=$=xrKhJQhlsLFo0LS38Wxkp9N?u=T9ik4KLxOco! zYopE^i-&4$%bFREm{%;RC}AcO{npu^Trt(H5sP)#tj4tGf_4Rd-~3h13)socAN{~f zR9wc9e7Pl_bQ7+~R%j3+h2vHq9MhXi^(}vun>QV;>-a6zFYStMyeBl&8zqf*Zz4A5 zJ;&9RV)uGi(z~m}yzpG99Z$0Rp*XdcK3*_ozsMci9EWmhw!Ya&7wa_c-ZO?eN)Hy8z?wG_DfP2#D^y* zfbsEDR9Af$*EMcB-cL^Be6mDU_k+-ns55l$Pl~ka; zK)TboQgsttxq|Uqj#m_^i)ju$)F=fBGMM%k zWqq3K2g2o30)1-oKroc|q}DN_HSVOAngnX;c_0VjoGC+Jwi71Nj*Uaw`DsT9;V9AB zz^wPt31)}!{=9Z+a)o0z+9IUq%U|XC>>xQjc{XE0lu;EOAH0{O&wA&CnHNaGqj+?6 z+!jC<(0QrIl$4z|gz1o~Fd$&#Xo>8so4>X>gcs`hl36Ot%hcJbAK7@fnU50<|0l!c zuJ^3RKc%xkB<vdH@?X>VkAAV=TE=Dm|}MMZDE;~k1N7W__Vx13{O%KTK27bzIE%zs3`S22?-=>x03XjzoNX7eHU9spvVr=%<#J4 zpa*?W@K9X`&4s`jD1fSUI;)NblRItlIL;&8>txlzU(`3`BII%6%dwO1v*gV!x+~RQ z$2}Haa?X{V>}znGoNBe5>`vf4f{%~n>4xz0c20yB|8bx^@GE5cnSrQ$Qpm0poatmA zh-0j{Nc2qODSnI)7S)FZ+auTD^r=p%R5_>tB4n*l7iDpXVn(7^{L=Q3D{BnAV{3Qbk+!uLQg<6p16- zkL%{^tm)iX0RyAMysITj;EJS*rH8W)lk)MxR8gw@sc9KAp?THh_G~UZHiRp?Uv-#Q zvIs8>Z?upL4x_dmH8N}4FHY=yZpK*{pCnKy8u8#}Qd};G88%z48k)T3KFfWKFdwjh(PrM$ zl}fycAx7QCU9gFMe^1iO*07cTfYT4co%0vr{tZVJUR&1OM$*j}y6p*#*>YKD+c)k2 z?%TT0&SurdJv?87t1qI!8NB%f*b#PRzN|6C7scF-NYNnz>Eq1_YFWu2DC?GLX|EO6 zR^>FsCn)B2#G1NxImk_LUwh+4{d&&=6){s7-|k)fs*DHQncvl)bo~>vDZ?tjSdQ1I zy4Frj<<#=W*wM+lC{7+EbhE{j1UVYjw=BcfaW`V?H_efEKkaCeF<0F>Un`R0T zuXk5(6#8uAKB!IXhF>J?B6ifSqs9}w1?t1MR?g+T6>D0eVy!heeTIo~rJLr$`U^P( z0Xe;>r7j#41}^(ulPoe2-4wC;0BZd z=zlU8?`1gJwj?2%T%Qkel^YY8&a~trX_rqWh%TiGwH$qMfV5aNlYV1@Idze0`SZp| zs;FfK1V1U8GzxrbW4dCK5E7C`>~^r|Zk!W^2-sBZ^&W8VGV)Z)X1bSlay=$HPQnoP zkz(>;1Qu8?+sMV-AOWgjdfrnRnWj1)Vf5w1Bpcex_3W3g?Y}@>YgWYMJZgP4SC7Vh z!(@GpBl5_X8%=|obNG@Dt-*jC_5Jcva5}`@zSUq;9@St2qbopIuN0VO!q7+zy>eVi zNh#tcAdA;BWe16UQ1j35;*D%GCgo8)ZB-Bs{z({DLZ~ash4^_XC0_a0yLj>Nw~pPo zZmCCS81H|68YC&2I@BNw@>-*T*RuHR7&F4-uNHhCq(caXi9B8j7Jve{G|m+9XtFOp zI4s-p)3^>fcQC6oXUKUvX=Xrf)f0kqM|9lBznOYbORFLD=24blpf{ff+^kTH_reJ}WLn%3ftsyb_-O3?gzv1^`t* zt)B;o!Z+6Hm$~Wo*6;oz;ottEcf*f=;;!E%-wxR%iIlNo16whHT7!=gd3xrl5{Hkz z%(izGsMczh)16aHGq3IY*fl?zFoozbco|icv?$FIDHM9Ty_ay8D&xE#OOdk-B4kh> zDt|}bOw+yz%BeDM*KuEUxr{DO)aTIvvb})uuk~r>Kg3OF38b-l>BtXxMFACpBbX`u#u5G-Zuk$a-~WB-lH##B z-UyY6cm_fLo4BDU3R#&jl>i9>RG^JBqogEVx%Gzo*SU4;XLgjWhcH+b85ZDwT|WFS zp~j$_Mcn#g)s|xh$iEgkt58!04#h0fYH#lh@>d)%k=}+)U)5^cV4zLb>fN;te%R=Q zJCEXjE^rWMT!l4VZKm1EQd7omI}aL^^HwB1w>a9?D-MGdycC z|98Iyu1OipyWf$Y3NBP67y`Ex!HE=;;JiG)`4UMCMHE9fA+Bo%NQF7(PPjgyCVY{g zofux5`r@8qC3N?eAno65whQcW-qb&$s^Zkp)zBUwv9^wo*yIY4;eUMcTd>Gg&NT{T zLF}O;P{?P|=}bXS;Zu2G()O=?43R^y4MScWPCxQ-=COoRN}9~G z3}u#0=5~cj;WSX0lTtRenQe?v=EyeFCKR9>>lN+ z>E(XJ{%Bkq?Ux|M5`ACkw>RkOCHDe~UoDd+=@eHH$U>WP4J)&@Ps#|u7}>d+gi2Z)_P!*4ZcVG z`$p_Pj>o)P?-dE*5GpPHydatZ;)qtFcHzeE`UfmdD}P=Q;@v~>u$kBQ=il=XXX%0j z#-cbAui?aQsMKr#^^qP4>pW~Z@YdlbrIhVbVUv`e-+%fyk_|tgY ziL9|;tvrrgq*&;$`>HKDCV9^0Cyh7*CzU#EI^CmI-icupJI9s#S;-eEe%JaIfAAE} zeQbB6zC%XG*Bs%o-GuWEsbg5R+;U4yE>wL*)uazpuT7x}ACy9pW7BMrhrEG=z7z0; z36w8hv?0ZmK=wa+=RwTa!jQO^v3dIZ?K^$b0aYu9S>t)izW3O%W^o({@@t>cHPBtQ zjUAFC+2=IGNZ=gi^>c)f7kwWXS$|R7X86sPeUf7!dvZJY5t0GLeCM8dCw+uLYs5zz z!_b{ApDc4ytY-JKbkIlm@wYj33LzUXOA4LDl{4o21~o6fnfugLZrWUj?&%=Sf{>UC zL_(7j<-9|r6%qDlF1^k8e^lKAr3zFEABI?_eh@xOmGboN&My2+v8rbFkht8| zPV06##A1~{E7-7DD`jugj0LHQIP52=qXwyFk1D11A)m)k@}dcs?m)`yCrG*}L$f7Q zBxQG@t}0}mq49gon%c%~kkVMwEl{#VHa;)MfygO>TKUVBPNjg(Mz>s4KJEItJjC>S z`6AIV%S10I4Y8i2riJ9u&wI3)2etJhHFfyHq|h=)?B}yEKdqPzH0PW0KcR?DgLZ~> z)15y${h*T%xM>?K5${N)i1IXZ6^SAAnGxPI$1%g8Gsp2}$Fy2*ic?opDmQmwiK!c(K|$`JA{^iZ~mX9jMUdk86BZGx5s{^7#K}O zW{SpsR9^2~g>b;J`IG=+PAGXJouEp0s5)ug2}C5kth)j()h5fX&1<$xPF*ty)-3mU z7wbx1DvY7j=`T08r{#ApN!B5I0HF%(c$BpG36U#g6EPZ^?($*vsCFb)nIKzjg;HHf zeL^iAnc^VRa{}Csow$02)|3(+Wux4&Q+jS9NDyB|lgakf)}X3{^X)m=_p5C?Q=tOJ zRbydXmQBuL&f;==!A?HGdkUfb{zKsjtXEE0X>X(2CCbG>ZJg%Z+Q?`9M0=`OyIjp! z)mXGpQel_&u^sQXG^jBZR`44=Fgwn!;iBtuxn+A<3+rR1BP@~^>m7~!hQCS_BzY}9 zADMwzZtuOJR1Bv2E#|EIZ3$)Zj5b}Z8>Ki`yu$X)l#D8y;2&p-3IC;{#hS%y#`$mF ze0*wU+`$_dYn+nI%b{57x7^v2zGFMaQa|YgY&M$P@y=PJ&YjE)He$BixJW{=Gx#X*Q;Qn<YKA zy;-@&V3{AM5z2ELPo&OH(bg-nsGyOy!?ulH&EAW3w5wvGpuVijx0`Xk-#N{7wqNyX z%dd>yAe1@h_8}R^iNm){mvC|S_EJ#uX}hEIoZDIh=56(>%nyy#`0*;H^$#ijbS)O~ z`ILv7R2cZaol!5MAzU8mDK#Jbqeitf_V>(aN@xVmE(>x+E&1P}PMki5($ow#+=0~ zb(rZL)R-blZ%W1wIF+MZ^r7W9+9ML?#jqhU!8|p`QGT>F#q|@x(LY+T8t|-~CZ7No zJ<(Tx{=8q=q?}jL+!%2|en_V4MHNA|dVrk|D^hag?8@vS|J{KGCzJIlQOtd!fBh2iw@mGZ4pop-wD4yNjJTk&B-?m5H3r_XjLuzX3TOQS|^ zTDju5T~TgHeMC_}$!lf!JbB)t7)$t)IDU#m$})mvF~&D*l(?WIfYDiAkSvBF!hf4s zu3;)k-lJk>yaN?;mptk)RZ_4JGCsM&KW5rRgdC zXi}@%$|J~P%HU%RtS(kA7J>HmKSA;Oix`B+>u?*QD5){Y{q+~X*&;8b>V44V1A30o zWh++dTUN_)*!qb6X%`eOC@nO+KjczXAbiZ_GkgMOuqEg7>k{wnI& z|71~dQK_)LH62;gZ-v8H_<~3-R;Gh9UZF2tXF~{*=86OV&_R9ZU~Op^)N`%gyYdXT z@O((%m=8B>AC)*A$`|pj*SkJt_zz;E(=1JW(Qm2n2&tdc8^c4M0T_?3T4qS8rD$r* zHR-HEMhX;v7>kewvAiEElr)rHrTKPdaCKH$?$CxOpa?E{`1uo|i}D9r=6{SP^TUvH zd&Jz^SS1Hwm59G{HKb%rN>WKTll2nu!h}?|uqLMs)l7*Y9os@taeQDXe>!B*GNJXC z*K|80AGW?yrjd4I6>Bl`VaNCV<#=lo#U=K?Xrf!vUtZ%*Nt@GbLgqS-5^b^hT2uZ@ zFU8chd}dtvek(TE=@MeO$oQ=q^CS$cHmf2}s0;E~mrvjZeXPOgbBn<;Z>TZ4newVy zToE?E?U%*i)qzD~d@LoNBvxI>zqT^9H7F?YsH1I#hbd6|v`Zh@v(@D!L`hiOv``WVJL=g=uvHN6L*VfIH;sT9-i{On+Y%O3$Y)&J`^-1U*JSEnHUW z2q(@xUHG|o8`iD5x)Ycsu+Tdvilx80I?S+uCe*?AXyrNs*G0f^TZF zNY+Vl&0ZEu3O%FX_m^`8PK|^BcuNxyZhT7Y50-RSKN0Ny&R1crzf;dc zTrL-}OXTO{Zo*PwegEy;+A!as3@PzW^(}VGKT?3}_h;}6Cpk$EUy-Qa&E~y@f~d!K zc*9~bCT)k^@|@#<+}few$vj)-;-p7>9-9JWR!^^#dqp*nTxzz%xS-A7q}XXxg6x89 zbl?COLOYA6L;J|9_v=Zsl5~IfG*z=Azl67?2A#2KU82)lDe>t-7dNaMpY~4ZI8<FfC$rE=X^X1&)x5`c}eZb|fE z57vJlYLq5F`XLcU6=%L5t2L-$x858WbRl@pz^8bKN~|o*FijW(xAIeF(e15e-N%7r z)C%4s6iV+)o;|b?4Wr&`P91o;Ulw8%*4|SC@6qSsoK41d{`}M47_~7_QATL zyP*;e@5xk<`?-VssH*@nfO`K9;Qt!~n8qF5lC0bL_EItiMEdccKR+Irda=HQwjfLB z)?a@vp}6ra#PD!7Bq`Va(+f2zv(inGO1?PMSchbdWjL`oJ16J49}YzfC(e8wSXlrg zhRE`s-TCM8PMRP4s=mh;w9+N&WyhEJe0t3*LOyjUU5cIoC|`pVj41s7IJ0}hx! z_r~*5ISKz*bF(`8nOZtp?v+F2&@I>tau@$$N#}3JWRIuk1O{%+T?Q?tF~BvXQqFaQ zMmg}8O-FGb%>Od54j;?(Fd#rQL5FK0e&E<7DgNV93BFsD-{h|}0!c%mMm9{LlC{qNU7gkDdCgneDb_gl9D71G%RYDZk;#TR36{Ds}Ei?bH2YZkYBugvS6pa(738km2Un zowTBbaTgrfzC(V20|cWb1Il4AF7NrO1VoE5Y&re4hDZp`(!McH$S(Oj^EW?~BOEH2 z8EtO{1p~8A`^*P*P|6HZ=q*!hQOPVrgy^h=?A|;?loOQZhWI_wCKUn6wM31SZ;Ye-3KKm;VDv&x6F)98=+NEh{e0?FAqpNlTEJ!Cr zQ*4oxVEKd6bv-&mX(SA)cJ-h(wj&><5V!qu%&g$qkA-_RVf#ADF#Yr^fL}( z>ziyF+1HLN7;QOSz6dl45%8nHhm9a4%glgFuN$XfuY2sVJ#U7H?5@;z7EXm~&hj7` zR@V7!DfA(QwJ_@o{W2Qw>|X;XFXHXTfnL&<;+bOUpuD`0N&7h4{%#H=oZ*o~XbR_m z_Ku%jstk&spDd;U@2iXI1C6s}vi`d?d=vc^yB1=x!CbkM0nZ7EjBEiugv?>2GTsn$ z%rh_0W0q$dw_6y|CXeOroJ-TmgY6L6?n-%eCej*&6bx)~+`9C-AS^!()q3*X38O#7 zMtSAjlFmFo`bO+g*Gz=ssyX#!{NQS~c(&8YLN|ot>yo+u_gJ=$+7h29vaS@w+8x`u z*TV|n6gN}4vg`4>yl0ZKu6?Q(2epklkTNm(U>rJAvZGk^?AVZ;T-)bYd>|J)Qptp(|boc}++rwL5ai zwk^(hH(>&%7rZI)p1%tE70KSqij|;3cTwRh@bdKK7kRPayB}UX5E|Y%3q2i!Eq1gn zaN6$T{4Tt(u&^Ur)=i_7VT^twVH?k*CBASW{XV5vN%L2xbHf)J?fb=sF8uI{Y#p~rT$nNMjn5i~7wPOuxbF0*c2cn$eWCbSSy_4f0!f26 z9H}c|9PN}WH8=DkXJ~hKs_R%9_ADjdBw%GWRTLo*^_tVc`X4OBC9{-9eA;&IARdu^ zLaVXPUp{c^7D&GI9U8HbhG~0rR_Tg?j;ciJBPm`zQNx_tZo#OO(TOLYbw~u){S~RS z4Zmyx(b@Q0)4a+*TLTH$*e7ERS9t4bW+o<77c6)U=_cYcoH)1QT^gFR&10!C`^#1~ zp0*R>WU5E~>13*mQ`n_^DuiUTs^>xI3?+)B{YM{C5Fz)e7I;c|QZt{bm>@>HZ6>qb zIlZw3lZV zKV|S6$f;?D&F(@vrAUo=wC~NzeKv_54bG?G%-EIg-32UdhOJK>mLm@o*v_hCX183x z`?*b(C@&V*N#bVxBZLIA-l(HXal1zpc9PvvSQzMP>aT~fZ2IZ@kCi>E^*gykIZ}_8 z_h;tq?5+UQ1FAC$vtMqFVKVEl?%ZeBejh2#T30)bCc&*|EGZ$x6*c$eMfi^G5uX~1 zvIO?|L#3K76FVKdW`jn8S6gbvZPNj23`viw&tUF-%U7?d45hl!uAMvhm^xJ61NBIA_tFO7OsdN1DV7uL===oY~EQeswH_qDeJ%wI3;LW z?bv=Iik)*wU%)N-GCr)8x|m(TW|$Xdw0ZXXKn*Rg$!E{#0w81>Po^+PYsDz!H%dhn z6BoCuMf9}{v{%WDXSaGf4i_AN>*{)Lfj`w^#K6wY!Du7G-hVk z6U35^IXC&fAkkC;+It1;?5jk1mZ#vIeNcijd5-!F{@rj>7`8q)-=9|$v(rznX-2}w zuT8`#x3l3JeM68$aRTv@pqZ|AT6T7@0}X-og`MVeMedxlICbAC&A)HAvKvMy6o!be z9;DfO!v;#d(vH=+mw$LCf{i(97q7V7qx;NqKeoOrtZ~opuEiNX4>Sp~PwheLOAJ<6 zH_Ki-ea3(SKiVxs=PL{WuMK)Ep}g}_Q<)!Rb>@H@+MC5jf)D#Lf;w9#KHgoBE04cF z49;}3b=~tzwu7`Tye>ZnS-u^PYth)15NYjeS4RYhhE=N$YGI}Dr?l4@yf`uMd_g%x zaau2zr5eavHsA(tKO5O|4SecpNQr!=wCRma5K*1D%=oq&ahhL6(`Q}$fVk)32c{0P zx+cdyuZzhF^Z-xN8+bx~U%@v*&1f4NHKuw=kqy!%#6$h?PadkV*;siU9J2(?xd40F7C^>CMl=3U!ei~%2%Fd|V#hT6`_(+1YJzT9WhfgImUKOh3fB=oL4F;FSb+u6|X8@f7&mq zQ>kA<;;i^1+03ZHZ@#9Y;tkg(6leS<=H>nO@rDkD=#%Ey)SV5nX2)D|bBo3t_^<2c z?p3+5X)^^S^;dqe{xa`-7L}pAv1iYrE`9`B>G_M^4VYb$$#A162wnu17{;?p3;vdo zZ}p+?tQ`E<`drz={Hs4#3&2zmlS}358f-WIOqLb`{No`VtB;}2(?E=@<1kk1hs5)h zHzTd7)KgFpqY4tFRv(Ld3_oV&&|sPu?jIm`kdZGe6XtAiJn6yeV}}yq$CMNb;!)*< z?Q314{wf5qz=H5Q?M%w56F_b;flVS15 zQEBO*bjzyn;ZK2!`Z}pbyE-~M%ilG>y4}3w*ZF*A=YMNxvh7;h?NF1&$iBa8GkC7s z2xcbt-;w2gy4$@2GyU)|1rZ%7_ghCCVdHhlz2I5hhi1`?Nwpb{wzkx8V+-Fp{F-Gs=%(!# ztqmf^-vtuT_$)^hX^axzvRi56-ASUG(I{V%X7;LmQrS+`_Z*+Al(A1?_UCu(>;P+g zrro9Ov94phZ(v&6+$=Po7Ps8S_KHMe-0MbqblLP~*ruX4reqPBlGnP2_pjL>jvH`7 zZTd@V&)P5e!rp)dYOXvKy!MotGxYgg{w}rE@E@8?0o?>}~jtv4e>MTsweL+scvwSlnE6bp+}wboh|D}z@tZS|1fFtn?3 zPe-hobVd%}&1lllj0)98J6i?CuA)h1v{E5!FS}8yLTL@ezp&;g)wRaKAge?3_n*hJ ztTm60-^r-Q5S=KS$63v_Cbj8d zAY|u;1lGTM>8y&`m@ay~9{_H`i0677KDai(NqYdLL|d@0jol`+9228n>&XNF$jOh1 ziNPn9Mn(NL;u`Rx5aV+@*PbekgVDV8N@9I@Zd2B@ua76T9=B$Svxit7}IB0|OhI zn)W7*zy5S2kG#)m7VENLYup+Z+ZdYrFrd4nHLk1pLpuauu$u<0;kABQ!T9h%$Pf3^Da#imCeJ7Ie&jf$46r+#-$(xD1L)is4~?Nw16FW)OkY*)PPc)okh;e$%bz;; z=1*We&!SOh*nTtjp97#tk&c&tt-d|(G=-Dw%$v^D>fY9Gva@q?V&LZHu4solef*m@S8nh}laJ>6fnbZy7@I|ZvpKr4 zv7=yPZ|@!+9i^KpfjR`DfZiM+oVucl;91U~n9I)o3!hue3Ze6G_hV$I8*L7T^7Yg| z`08F^-TK*AFUdd@tgOLrI&<=ybcw7H=+`DlWqS4$dJ=BZiX4jhdQUKw8YwWz7h5g1 zy0OgmQBp#s*MIO?c`kccm?_%fG0vS1xdC}}RUrJHAaL%_fCh1|6MnSx%Ck2j(+EX(;Q!0nu4;kvsu}%I5?asc`8W0S(G1!UE9y9_xjha zGaFXa27P`jev0>qXyN4XSrGTQh!HIUgem|c5Y8&T*C7YL9AG`Y!uQz3#QpX#it!%Q zT&*dHM+Maxbn+lF7Q74pW3Yh%DB~j2GsWi=mVXV#GNh-%jh`{C;N4bEy1Z9Ct;rl z?oBc*Pco>@b`HL8_3-K28A}Zy*+rM6gam#g&@4BHRIY5T9S~RA5&NswrVPMHliPJq zHYvmu;t~}tCZT%9HujlNR20Mt5mn;tu^^%z?ETZE-0Ro(0fL?Byulr`SIQXg@)U15 zC>JwX+;AX&lDazbNkV^q(sm3|;l&xio@qN@(EJSq#h>IVPf*s@kXiiM>^(|c++Tlx zsb{8`y=S&kIuHn>%7qZCD>^Gs>_4Q+OT#so50 z45qHUe1U6B?CQBw+6ZoBGZz*9il6$1ulhkhjs;D6xL>mjO58M#ahb|php=P-=xvFE z`hf%?*mS{SHgtJ5wC_4yj1vIbScvSi_Ogqb7}Y*arCW+D`4u_&j=@jN9{%5+`Zgp& zTV57G8(c0widDTM@pJ;BQDOi+Sy;O&Fs69|IzZ#{=vdCY*tnz!euPGwK9|dz^dT!ZjQscIeGNcdxE%_mtZ%u z6)b_8VKz0WL~$@zVP)zch7eq>Qiwg~E_6?GVL+?*2ge@C3dv09*U_)!50M9&ctONO z9$>0vpk-znwR1Vig#I!J=9z*SLEP$pAu zD?VYd>B9)lXEk17#r{DS>sRIes2sB;S752?m!^{`%eO_ZEAC^E)ASFYoZR|2e$;*MCHyL z0`r2Y`mlyc6>$rnG67&x(yI4Z+KS= zyuOWW{O-*TbOrsR!+De%K4^0zWgNb(TJo*`G`rr6lJwSZkY}?4QX-6I2Sok#g*4C0 zzLvw9r{XZ`1z z$0II0H#a`_ef4^K|2~{}b?tQj)9cWlL8FWk<{DKQgztw!DZM_mpyVyF@*a+}P`JjF zs~D>O-up5Gm6t_}bxq9jaOp)e4EwNt0HvrJ;rT_Xn6~t{IJCgf4g$(48ovE*tYFQ} z_s)wzR1&eig&3%c-EB{teNku>W(sU7t;=vIK_(Q)KV$?USrqKK@Sf%_W*47W4!Z7v0>2U+<1+t|8SpZ zj6qL-RML-v{SM`!S%LD@nARKpjO(ibEyoUvLC)%A*g#E$M?-z8ACu)oOr>OM|J)~+ zD(O%D+`yeFgClt%nJ@ai@J~{_tgla$$`$VuPnG4qMdaAK{Pm!+0KPF~ z^9+oIVq&~Dgj4isXMw#_$H z%-*cvP^rpJHia5&zgM;e1hh74FkO1`I%@Kl{Q8 z=!XIjL6AFnAn0H#E>+l{|E^jDq5`X}fZT^68vuobE zhz|^r@288I6|a)1k*hzEO4;0E*}p_Z%f|A~iR%Z{+7P*juMBBb`1SQs|M<9YeQAi~ z`%Q9=>_*PV+MtFKF0_hais~+6xMH)@O6I#K0>K*PROPo2-1cK*q@7G$y02UOYzL;F z_x-!^UPeFUQ&YOz+BlFiB?u0`j4IjUHFt|YOx27#);?EQicVr_&VPB^%xuvjsl9n; zrDTAwM^~O6^Vj;`Vvg|QCZ0;E#a|PbTmng`8DV;dv1peMN%q!yp)>`N%o`VcCS(>9 zn%2(T9c-;vmfjy!V#yqOZ zMTMl#1nQdlX3OLu_X2>4tWBe{zWUs6ZAEP{p0Kx#pcvi6DZlnpFo@W;mJtNSHDZ}k^a zUuVY=p%OTk?~ziH)27~>^*uaVvb+@U_2%!9{cKEgGh^c2o($4$XF|Nc@j}r+ zaIO-o-}g4!Y%R_fUkc*60gr2IL5dXMfl+!M*TK-3m&!GF#<~v$(PDynKC^B_TEFhqhOEPzmF!{&?^E(tpUgG6 ztseoE^gJ^U8CEZ!-bJbcl4Oz0A2!YF?-P~Z=H^AIM_Da`A9qy9c&=pmsr>NEM`cH< zxGo4~Glf>{SMYK=;ub>iGE$xce^h|Js=Xa17ZqBH18KFHkau2I*4Igr{j_^Rm6v8M zzL92%XNoMWJr#ItVqy^|7GyS1q~QurD9m>CNTfn>*AWp9=3L~6CP3jxqD!jinhOZg zrsmUsx_}qaC@Q(FMUaPxT+-BJst$P3p_wJ#slz^G+dQhZBeM13SYLB#DY$eUK;Sa>VYi$w55(t+71& zzeX}s9gOQ&BWlFAKnlc1^b{YeozLEdCyq?K8Qefu#CB68(qS~VG(m36>bBX!niNsm z0gUUhq#ql>8%B=9NUZE6j-G7b&2At*!4{v}=M?K6@+DMHeA@`Q1YUx$C8khhEo?a? z2 zd9+7Ev9dxYMmQ@cClF!}50wTc6?ANDY;5i9XbtT@@BS8(rCS#Xe2AUf-9A+1AruJvt)O7aJPMqx+!bfKWL=CN6Ap*N%M{3&sGD^&yei)? zQ{rrJDeK}&B;P{^*kpz~g31?ZO@(;GMHresf8Lij-twK}qKBIsOU;e7oH$8xk_UyFTQle{H{SV78c(TjcOE=9BOG7vlGz5n{!412jqQ zvs);60s)EK4U6lF!SJ#5zkG3{v}0oPT6ckA1|iT^r-_Se#~h!wDonzb3XzSDq4vT7 z_efn}9cwi+gONeY%C3WlH&jC4H(UiT4y-+eJ_h5k?TiiI#+%Lox)AC`%d(CQi6N;H z@t@s<*B;tT!4HQlkG_+~8f@~D>H28CZdppG&~^r;A_XBoPc;BNS_Y#~E*gM!V29s_ zi}@yRH!m@k+Ai-P<)=bNLEL;qc4NR)!r>i4oiE7C!_1f|?`supd125}-YRgV;BZQ2 zY$A8;e*s)waJsIbF!@!p!cq#5?N_f}HMX_sO;?#D4}!}>2Yyg|QBe^`?x578vIzQK zmj{rQnf7V#fWt+K^I+m3#TI)M0c^c&rZVyX}!ctia4E*0%$1(P9 zPPqb|fT{kfh~aegQGJkLeA3t^cAyJ2tt2@i@ZTCRs|=W73FXJ~80lJ?ap!tl*8#!f zzrtORl;ijhf&PD!a{f(S`u{Y+izi^*4PfqB(`(m$9O~*8CZ~vef}`a%3IPlLC#c2W z6`hllGZ`~ec(4#O0|_95=`RD6obsnfEYwZIz@c_tb5(=>!!_SJ&t9ct@9bj3r z=;68lOmtJZLP5+s;1>19NZ+I%)F9~*fXTeKsfig9u$iAfe~xbpPEAe4x(zo?0zZyz z@Jx(??EB@wtHF}RP(XrEG~w%%zV8E@p*>UakLU>&N!^BwkU>c)Dfix-q9R2hlft~& zVn_gYSGMeeyYO>MMxLtEPsPk1NLJZZrMNEYH9(-`xQ2!X;YX!(@eyze*#>8Ct)8G9 zJYXoE|Cv(vcrO-dvTRUkCZBQ37bq@~Oo;4Vu!e#y#~5Zf8e~sB&5U!IO!e8SK&_`buU z(dX;m8anhdzjj4zZ3hNq`B^AM;|$5=0gz-{bUhr@dJ71Uq$^w>+TgKBYGr@tL)X&( z6t2bT4cSudmmfJ-?y|X!RIX4AQ6zo7ptfVmDpc1Pbtw#yxho&83Ftl7d}OCbN6~s&5|1h5?;nRGaIq z3EuJy8s(F3VU#>q&b#D%y-L0(y|PqJWQRE9_G)kMwQMT{Advl?YhuhPv&DrU4M9Lb z#CadMK5%^^cB=S@Pin1P8YRA{0bsz8;09~*Y7I1j)oT4`CM8_s`|0{17rNwWdKaXf z6;EfUv1Z=2!;=KqRZ!h+Co{-6&&_vAIDLU-yG=f2c555&JY0# zn7!t(g~+jhMCTYv76wGiAtV0Yv#zN?mjgemp!kS|j)zf$oFDW|?bz~+MijT-hdn{J z0m&6v80%efkQqrzD4XzKp6|2u!6WhbJt4~z%S#hbH$(b1L~ioEX9L(xIC=o8W$U%0 z!oQAkJ=F%WLDfR7C@UVtJ~m;$*>YaC2E)KJS?{SoS!f7&%)VLx?A< zP;tpyhO6`PF8*Z{!XH@b5-gFVMUgEYw)+`a93hW1@5pNFB5Q zj0)E$FECYAOt~b-TrvTDp4E>E6A8O!wiq&YAV`0ks~7vKC!aP!K!Uz;~D~dr6_BPZ@O01$IjwBv`yGJ`n*7D;Ug z`6IBU{k9Y*?(G$aCIR;SY4Lo8jJ$mDPqF&9Cf#hUw?ga`s`g9I88YuH9z7EC1MFMC zJE868iTZ)!SQBFoVfbfeY&FQY5)m#MQ13r}t@-KrHsUCad4WI*;j?cpJ%Q96QL@xj zVXBZ_SS364wO9BGkw;=TFC{)9emrL5UZt*X7QF*wndA$KhqsX`M0%}q5bGB;;rgIc zM2|vo7%NT#e^|uiC&$dC*b9o_C+oG+V1isvGNMtaTrvX6F9;F*?v5^wRAs*ivEsxl zDLj6^VJ#^wdGQMRW{QuWr~a0^Ckh;L>=Gm;oG-jCmh+X?boMxMLdTB3XXmaY#l*r9 zf(6t%2#K;Pn&3E09cSYie3KgTVqvfD5s_uijfakWPYk9YDhX$<9$5FEs)(RT*;Qj) z2XgR!yp_YIARbHM2p zG~tmA8&KNnsx_eGM-Fq`+WBD*Hsr}fPDP-3rRmubzdO^dHfPUrw6aWhyYyR}43qbt zx6qqA<;~IgxzSfE%G|fxH(1rs+h(Tt?buh2Q>l=#XnXRN7I#z_y7+BzlIvcB0q+wQsv(eAEd@`Nxq)&YR^Bn)V*Zw0W z8uBsHVPalFm1q&P#hb+0dj>s^GGPpor;Z6@H(+`UD*f82sF6e|i#uF42mJdvdk=+gdo$e&mB#Mvs&(?L zF?{R69*dyI7V7SFMaj9(By0kqviBaO9%9AmA&xt|P;!36gqM%BXdukt9jXdNIYlT&^_;j0 zy4^cTGCU>JnE9}_V=M8UKqK%i)c?V^faGNwnXRoZ-NPUu!4iR_{*=Ew>2K1Sj`w|A z^*%cZaU~yo#<#cc>N`!;yH@B=hwE?JM<#agnWDY6Lyi=3!r4h+ek`6jPYyvMYw|l- z<(k$DXUQR^%OcvM=RYOC=HlOc# z&gI*i?Rrj_89%D{jmU;Vcm}~z0uNWukwcG4_bZ!ld{g{c)!5a=RT;u<_)3Fw+D0ET zkZd8NcH7xVX07>o2fLoL|Ft2@h9YJqFOt(e4e=VUFVQKrsUiH|e9W%4UCHdwd<)TC z$C1+;vCVuifY*q3yvVUl)bTj+8eDw*o~*1az1=TkQ>(CE757->%C?o(Z@y77#8Z2>8``Hy^+&0d@Z3fM_mX_R|db)PA8}E)Q2EUfn zBHBWBR`#>pu`}qM`|xn`OLI+mUxe=M`qWGNeX6tlU&6GSH{TUDpVQe{=2u{!(IJ`a zZK`k9ZTQ|}Vl`vxmRtPh%DI_ROg-V6akwG6LQ?S9JvG695QatK6k;FNL&p(zutTxa+JA+r~Zb%yrfhY#!fTb`TJ`6{Mswxjy%@@90lDb+?lNPrro_V&D4cc$VXMI5*Fp7HwF zQI4DdRO8@-^B}ZH4tbSAvscLdLnZgj(K7$^3raavI8Lo`O#j00*A@6g+pVG|+{{^gsouQDEs+5?9e36_R-!|2M;a2y>Or_mTd#XFpZj7z6vq1|6<&c%#z4IF5mzR{mUBu zms|b+5CfeDJ-#sGzwBXq^(qbEi51Zw%vt)*mgXX<8Y^BYE4|()+*d^ur@zG_{R+` zkrpE_FJBH(ZQefR%ey%_RW&s=AyU1g3?heZPP~0j*-ftRSr|U+PXj7JCT+F}54Ii^ zF_8)J24cuPZ>=bqBd0;pE6>zl0+Uwwy&4Ntg}~|QX#_$6ZnjRB_oFjX!+s|%O9JG{ zoYW*i@TUw4U{3xAcjG7;1<*(yh{pv-aN#LpSFk+bBBlYL69{44KCW z*@MUfH-dRKi5Czy>$j-<$zD7efORI6jrS*e4_ge}1}#kf+NVbn^yn`mJToLQpS~h& zv3F^#_ny_6!-I3=G^5YC|4B_@NMrog^~+vw@_oiAmI~jVed5@LOA>e*k@TFkDCZCY zP@`8=yRX3^r~TD7ZMu|D*>6uRdZ7|56DS7 zbEZoJrez`pqvv-SEZj$X3_MHC|Mnc)A0j_3GfuneRcMzW*vLeiqw7=D7paO~yEPc0 z{TMAK)9&ugwKE$sVxFmVZ^NJ4D?((7nA1Z7I1tU{f$M?SCP>v)B|zG;==3EDL;?A> z|7?&ywJ0bf-pS0coaM67nCS@@He{oh8;a{oyz^wg8r+UL6{W`KKZW|S0FAcS+g7je zx{Hg60v1;)S6>@qXI9nzrKMBUZz^~^QRT3tF7FIE`6oH=;P7V^stobU!UKFB@3;PG$3HpdDTYG6o>=ngV7%25+-KdLGS5T9ORW; zBLl%du|^hPsaVR1P@OM20|rBnT`uq)$RPU6PUn^wbMZhC}YXqnvAhC_MNC6 zL{s9QcpbJptW1Sx-LAZ9Tz^pllOVUt*2vaz?nlPuXyZNh(utB$xC#88!s7Ywt){ry z;Md_ZG6&uJJv2Mivtiu-NFN8f3|yvVAb~tMJ1OBJT`GgZWT}NB@tzUMe&m!&RK~s! z%L@(5ZfR$_dmr+_VmF!vygajG7t)<<9X}e3AoFTS8{&Om8TE3dqyveez5DMw4gVSs zUCPpRcMI5=>bqCSVA{a{HGLBHA#4(yT$^#X{h$V~?@^!0Tf9P8K}~16 zX&_}#1p^U4d8uDH5YB`6S*{sG$MLL0vg%Yzin%a$yxe*fOgvPyfeNJN>hy3MCznmN zDu`fB1SJn{-2C{&v=QQf|4czIO0>ZRh!R0?MCMmzR_$s#^1%;?b-{~k{D*rH0`)P9 z@{B&z-RIbXo^1t4c03)B0H-^xV~W!dl09sa z$5BOtNw`VAg5RN#1=;M7?wmHU+F7FVa_IP2T0=~XSgOEiL4Re%Lq3nC=?d)6C(~j% z!wvq*_ux<~aB{J=o46mr@uZH9c+TA8!p4n@e=o3{*7=4HNMO)~77&gC5>USm#fi%i zB-X^2)H2@hk{<>{7oe2}_E`4*d?n3V^*<;Q=^$%jzO?Ym91cu@V%>(A5cyYH(YuwF zrb2^+bJ|{T%m;}2?73K3*-t@CB69#KU%+UiQXC7dh}#Kes>`_9g3s(uzK0jSrn6s- zmU=r6H|774IF1!ET^ksFikG4qh#$ z#^`RblwL81dfWb)TaZZ%E0rq2xGYD2dFtK*`?QpiEZSke^R zgzg?h-0{itnyv9)69!wvSF{f+*NINd_1e}tEUvSZ zI2}S7$ci^Wvfcu9 zp>-CZEeZd&KI@tTX5ZD!m`WsGA_1F;N>%uq^{kMM6w$IKaSV3Xw~c=DYQtqMZ@Eg8 z>dHgnhn2F*Zv!nHgN04aYJkl(ABy&eDGBx8O1nnxi%UEA@u*t}-e~{U2xCUflZ!^eGAAsFaAhW0`y7)!*q9XnAby zh`%8^dS|K|aD9lVtVROKYZ^3Fng(I*n0Ju>sR0l6W}&Y_YhMl`a=85E@AgZt>CEVa zVIr$k$|9=5m(>+}=u-1%jZ7SxE{eWhL+5wINRl9`v5^$0rg_)HM7)6qB_dA4@l~Z% z-CMgBe*!w+wt4k%HkX^0O7f{oDnBXSst ze>-cqmgxDX>na=islPXM$qADDT30F1e@4lF$wRExeuPg3(2V2dS|Z~0e}dWby~&1- zF-d8NX2gNk)p+X5i>vAs>m#-y49+bzx|CsHqkNBVleZGP}gR~ze zg0NNq3;iX+-}_W8zVad9BNcBqQIghi2PgbwWyX2O1+)0T$;upXfk_Mbu1-VH@rE$( z@p=YoBM>f4y&QJa4CN0ru*nvVFo;o3J-(cw1kwLMQ(ydYX?frxZoFa#IbA1ub^6yL z6c5TTA%{4mnJ4#n4?5hecz-*@3{cP;CY+sytQ0gSS=A6lC~+2yxeB4o7s8yWcK(F` z%|iTbMt$4Vr!_YB*?QKiJ8%^~uQMCy&G1Mij$IdFLXF!0n>k40*ovSsP~HB$Y4{1v zb9h_ABd`3_=|2HJL0EyUX$ceA>1W+<|M+)Kc&(cWiN&C1NoL}Tp_p2|a6+|DMxF-r*Vv~V0Q z_Fs%G04dUS6DeFg&#m?*_B zlRhFHRV?ANHBkk@t>bTZ1s=`>_yY^rjcN#e-%P#BLVW^~+4jjNni?Gr(6J^&$y1KZ zBK{Plirg$OLl$vwIF@LEvEs|9AVF0_ZJ6kWA%E}-gW%Fes#zXd;jvYYlip8{sniRo zt!ve(>nqC6BM3huEVwxADb64^{fTN)ei68zU6kM}7;N|wO%|hlk$!V`uTr5I&BU5u zmWMIMEfeILl!(10d&mNEvvaEHsCycK*XkaDCt3Io+(#@##Dmrdr>fXmuLbmw_f222Xw z<}NT^C>7+SLz@>%;Y2VYEh~Q(MlMpM2z2OWyGq-vVg`h2xQVidKg!{iRnvRP08hZp^Rf9O(j%3ubNQate)!jIng0IIa`Db<$FySWrE?Q@v_ zwW5aFb5XS%XRAmv$^B@T$>i_>%7l70TO>r*wm!QxweGBqqepLk)}9Nfu-MV$IwAEe z^fzKI6L~BG#Rt$oBKvwpJBv9knZs@1ZHmYBsTN_kdK=8*tPHKoNX;lySE0X4`kPpX zFSi_F^X8G|uby%B4|;A?{whJa4Zm4T$M?q{*-!brB; z2$$0mQxaR~Ge)&`H-qpUUI962Z>v2fg7WRrc4|b?)bvt+%l4;C3N7Np!C(E{+#Zvh z+d_K&G1JAn5AWn2-+fhLN7e2d5{=OZuVmf1^Lh8a%{%Vzl=j!}jySu8V))g*9+$Jq zw>J6Ral6v;)n({=OmZ-noV9iMms2xegml97ZVrA4tuU$BotwGn0m}t1g@~V<`}$>Q z3CDZJBkwEKW?U1@l^=nma#`yxiWtK2e#=^`kKbSATJnknBCmf8#3MHJu!V=)c0ABI zVmm(y+JDiR=1{Zk45qO2W>FJac{@6&k4kGeH8|6Iyxg4a9C$&|z`6Qv!jehYKD6Fd zqPt`HwU5AU(HBeb*<+SE2HkeS5#ICy+=XJ_8geWB*{qVXx? zjkgX^y_kOfY-y+DynOI$WJ%bsO z1>Kz;%jc&Nx@mM}2pW%&xJuGD$<(tqnu;`PC_}NkI!bPOgptQhB-}PTr<$fu%ymNR zmw2a~4e?G0m2n;ouc-YrWR3-f1XIK`8L;n>D-E+CJQDCYodBgo&Vr+#5BodVSQEbT zH*xmFwxpZ11yqN@2%7+HG%1cILy~8fV1hM5J5BtgWnR$hD@P7>+`gw`vivqZ+NRTA z9ZPg;M4WUzw7o7kj`wtku=7{egs>Amk*3wJO%x~P70y6P)%o?UDP$_>x>jwe`^FU~ zn+twimDqPaFHKahE)?-N^Y%lHSxFfs6{t`UuMc|-I5SO8R82z`Ch zVpE;WppcNYU0Rs(-{nvlaaOat@?}X*b`}nyjmLYcB~=p{(>1Xk?t;0|vDt*ItT@8T z^C>a3vj*PDRy7{?2fk>{mh3!=^(c4%TNRO^AfsRg0=U$J&e)mnL8sTJ9omM6N&{d$^fZ|-=2MC_J2<-6l`CU^SQPv&6d6N#_e>qb6l1^XnI3v<{NKjLpZ%7A!! zY86md6a{b!6Rud2GtVJfimLIIYvGZ|a! zW;QM$<#k-IZK>NW=urnzy|*R`ia6(Q1{={Jo1_z3h4=f7SNwf2Ix4_^r0Xy(y6cyk zktbWF!T0o;Hv<`yY^Pdm`~&HVX}OCh51OT}?qm+iZvkp37uK$08+BL_pGiQ2x!h9PdUK9x;EO)55W$@OO?@rD7ZulNXk(_A&G5lDcPq4GR4l`pp9fyZqm-* z{vt`=Ar!8e`1g?Jl(A>PeN}9%S2+)86>X;Q&yAE-d<Krq%lpp8(Ukh0PSJ$>M|RV_JYu=2dFl(?dr3>oljzt;yxzz^g?J=fr;Th zestzS0FR zKhkXPZ>cx2Q0$9`l`T^)@kNIV`tAt4uv`jds>J(9>u>(*_D!))lK5%ob^>yD8$JR|G)NpM%ehIC*3&S6oGF|nW+@L!QKICA0Vf`$Q z-i4p+P&l!Db<%NV0vuwBE|P5hciv^e@AP&WuXSNm2E%o1t+6s%{m>?o93-Yt4TD;P z(cU5BXX$t2^{my1?Jz%y-tW1#cEny7K+mQPG`6k%WJ?xD0-mIAP-x|rqn4d|YAqbp)fx_BAOgQ`$BwuFKjFJ} zw0cd1{|-XH)Smk%uRL-HbG8pKL$$&WL2Gp@66tVZT7Icp(So^{XL3)_>apCBkb`^a z@Cz_<134VE_0TF#*5T==Hdrp-TqY*__R!&8Z>5)sw?Cch;(Q=7TQ@<t>$zK zZ%v@`q(JSzw)-BiMr`csH~Q!sxP*^&d5NT-#--hYgr9l^5JA!CH=2;lfBPN@5qv4|fdDK)6>?O|OrQD{Ub$vayhXse7xtZs@aL&oe2^nx2m>r&k#lwID z%3bj<5dN?~z2^lM>z2bmI1Ys?vUUt>7w(|^EqK9RmT8->+#;}iR6VP;va-_5zq9FX zFZWJhm5|V0BoM1s8dxRTjlD`o6W~7Yy&!?DF`J&)U!1UVt}zUym^E05#ie2gS!r-m?to&)Vs!Kd_OtC-#- zD*&s+^b4z`0D>FT?hTd?jVMwox4y<5X zA>&bjN4$`Tbb9$$#;8VJ#R9jVu2yWd2?AfpqDA_40V|g4NIRQ2L)0;o;u> zu%>tyA1@~+z{lqfZ#a86P$4~)QF7tNsx7;S?{h!ACyvxKf3qBKc~=h53Vk^U$8|ap z5jE24&^d+#z#uL0yCnNnOmg_h>G@;mQ9O%}>G1WnwW(Vsioz53FTZrIS};+M)k^-& zrora5FvL-lM}=A|pBzO(1YS$|QbNz_4jh98)ZWEq`>l`%P=DG}TDSk-N;v z5`<4^M+!0$*$r3HuOjXsr*MAv-=9Kfi9hzGC({e>kC2*G01lSWEp<3%7e{0(nNYZ; zZab5$#&Uu<%dH(N`pqO{_t(qwHyeg*rBLXp(&@~1-RvI@kd7Y>n%T1QtR|>I2i``k zc=R+%fUxR{&iU<)moLqq1>l?%fD}1(Z$XXxI|=inWuB+egE4d?;?Awka|0(#T0ic> z#imqAGfv+2*<>}(K56cw5(xoLyae!3?5|p?JaGD!JRPsrpJi5!<;d{gPz$h0r(Qqv z0wl8>nNrbzA6eijKHshr*OOtp2s!LDP(DlDF#=*zZcey@J?+*m#X?s?h54Recf6{@ zaLRFd*uDHw#dLpig$Gi-x=DTN;Nr%!Z6? z`_>KV;TM1A!6oHB&Afu$NyNfHKc>LFF!gS`+>B#3?F-@As{)p(OB2pCW7!7J6q|lf=sjPp`|8 zrZ=0*ET>Q*q}+1w7vzBP3b9iZt8ANI;XL(`oQwE7zipS105&*v5mN9R`Ft9Nge68r zh$aiD*2-Yq{H;8VZn}7wPwh%5g$V~~Oc%W9Za<*(TSG6MRJ+)l6u`g=6-6stK0>Hm zRCKZtPPlt7*f8mm`M~)8jt{*M?CCD8C8Rxv4b=DOKlBNLwrJ~{Ar(; zmU>N)LPl#1RA1*)I&>6IjCU&Fw(RHXj;Ap`7?3htUPnZ5BCp-EeZReS(!0?(>MFA% z=2=FWKLSb~67W@r(<<(3j)7b{kU<@~xe<4fUrJaIB-G%Ivj>zFSzQJUFl11FDW`a7 zX~4vV3;}SfWjwTfMfsIx=xubcP0~Q#wHEIZXvsyw{VQA+?=6w_A#FmtP;yaCO@<%0_Njq2aQtU zSIQrpwJ_Z4XKYq$r+S4EW!eS?oOUFV-u|d60C^>%+8Uv8vL-_Kc};g}#T}U0s=xvy z74L#CQ>bX?ib(F=Z3MVv#u!}`)bFbeUV0_wMz(b3$Ypzd<@Ok0jW`~VuCFS)c@YF^ z8EXVmJ|F_fCGHaGKEv=%yua|aZ;8z2&2f2)vkAQ)qCNUE=z@wZl9~{98!-m48b11D zM6R!G(sv%Vk{&$>u`6kq{-h--`Xk}^uYfXaRtl=C^Vd!l!KjZv4&dryjzFH#3NP}_7k0s5lq%N`6-En1C0e#S& z`LoFeR8%;V>q|oGxa^n%8D1N(4?K1(b?DXh#3QvMv zg9GLMldL_58Buri(p?tE!Zm@uyl7pNS!`-w;6}>DQ3Zf%eFC)2u+>ZB&U?1DV#IJR88vojAV`QP*y3lkML|lBKrdJgvC5}@NswXFuQhc^+IMy zdwyJQ9dG{?Ojb~P*HaJ6H%^Kvv;xmcA-vjKqElroZvu8wQzRLIb4-c zBu9+4?}!XfPD)GdAQ?s4imRdK9k>-S?J%kT1pKj5 z>F9j$`V+fe*kEertBST`Y}0EYAx$reKgC;#0TR-@BwN;c5w`5OA%)=W(^`d$HqE@> z8evSu!VFm#r)qe11e$_8BLkF=8|KdZWLfkqN9GCS#kW?$x#ev) z%<=nJ5}5GuCK={SS2c(4%%0Z@IhA*e!Ls3cd*tn8n%k;!VVntrm#);WK_v&}IbGpO zYlnA@|9H?NGamqJ3HPVk@txqO6>nh1mSdQ>;eaW@ujVoxd>k9R! zUC^E5#qrZ3L*Cx#>7;x?{_G+H+ydbJm?V+3k7%RRT4ndj+dD!idaD+}_np& z{@ z$<(TY*-Up7G262K5e5s(veF@2PLaG;_$8+#i-#NaR*uUi=&|7*l4{AN#s%=y`W~QDpw~d_37<-W3 zBw~~}xEIZ%=q#`X-5soqCIM^mYkAdY4A@Fox&4U&Ssntra78Tu15VMlFleKan?A*| zEerNTm+v;%o|!ZMWLaz8L|uoJ-|Y#)jSPR%F&Vd)VPj8tH@R`+*RS8PL*GwpAN% zKT(TfH#(Q}(!k-9?HT7Xu5?yc%dAF`9F34_XoU-fh|Xu`Sw*ITY51U%y3*6orx@rc z>l-N!p~MBO4CQfu$oUq9Hv}9s!no0O?dxJj`QiQ$$u6p`POO+<7oREVAM6}M7U!l& z;}j6m!ZS=LIq=GVy+rnOyz|z6jNpDyq`gv7Rbs?EeJ*BD$DMXu2rRS8KDxNsm5iZM zh|e!aUHXkigA87)os3I|*%qtu_@J2YkGMD*9bRhLJI|hqyX6e-TIFIc*e73->lwF| zTL^(H$z}HSS<$_16w-=XdJe`@HBQHKWXZj?F@5;Etj1=tSWl{WOWUZf^A&WJIETVO zjtkYN-wj$utSH1I3Q1ByNNH0qquFs-5gj;<`u2K#b$B!c5j++g3tqh`2M^10xTXdi zeEw=?4R8j&Rsn!j1x@R{M94YcW$Qx*^pZ_L(ZZV}7tIW`Z?3OMMv)!e&I58pqXHV7 z0TYMrrV6QPv}`Lx5x)SC)g$2Y2z(dBL<~rJM}d%-!lb`V{{K>nJN{)~5s%8hL^Uj@ z_6S^Iy{=qXB4%ywUPLj!-4d4aUwd^)+UGk z5j#I+?94X5I27;hyfEv&xG)#k^UZ!i2~i#+aAmPc(ltTrY^%nc@cb;bUwY5ofPP>r z)$t>87tgmag^wEJrvj;t`06~f`tj@v3WeL9*JxshH+{x3KuPltjBBt&R%upT95*@L z2?>M8+&dYty+rOq7QUzq*qD6F4P=okV1Zk?ecls?S!n0!Op?NGuDBw{bTV!G`}lZT z)%0U_@1CI@{cIRE* zt6E}=prKz@-r$-2qC`w~1ra{5^t67qWCd!qXW z)?`HPTf%H7a!hgB*Rhg3x%BD|de$&#BvcB?bod|-m9*q`x5Q`)QWaliUc-dZ?+?K| zNBv7Z4-32??wnqih%boD!+fHmp?HPpTvagqH$-Qlf}F>agY$*apoQ59vWba_mnLnw zx-R|a?Q@|P>;yy$pMc^=oo4oWI-2rUrv+KC&ka7^SkU8+2$HhSuo1cPrwRwvl!&T^ zgYO1kRf6~m6mm#1mRf8I4cf-FXG6FL38<&YR2iDXghdse$C z2X6d7jF5|6-X%a-pQSrvYx}3tbK>xsx!rABQ|EnYWS916NjO1=-^xh@xI~XA}zvsJB z`*bhf25o&i>;M-GLWqO?r*P`Jy`aq5)czF0E-q#r2u+WzZyN&*D=CKgqG~j0v$y4cM?mRt4LfP zAUv8?Z!Rg2KGxRAfo2dlB$geVL}x-kG7YDq-?=iDz2ie(#D{;k`{Mp^eIQ?0mffSy ztZ$VYo+C!T90g~+Fox?lF{NwO);dVtX&*>$qZ|>}>5$noH#NkJfOrYOKUgby-mZRo zBv!B^gGJKA4fMlDw1t&58HcFTZJnEcehsEYI&(WM$X`f#)9nUv*<(jvm%=gd)J>dl znCxf0&m`t(>pAwY>uvoNp2|sQB%=!IN+`epB^Q@ou#@A)kI2V8jzmk!mMn1c4~D4- zOKXHmPj67OIg&(@#%SM~Lwcg;p*!6qT^b?q?610X9%feRp2kv%GeB(vvpbloB5M=D z(!(2q1a$#pM~UDp(E$f}Ef;~~c(J89(FhlD_Fe&E?NTHV0wJ~TL_f^_mt>v*ugC>b z8-Qn3j5BowC-K@UcYlHjB3U#B0|vqb=;C3Ej^oMf^g98dxpnviQq7HU%PG5WNA|CT zr5z1GF0<(GU^Fm0yq%&*Zw)-cZSN<5&Sf-)UUZ!aOM5Ny*A;>U?{Nc9ae{tdhL*4s zDdGn!+fDe;|FH=PxpO1k>-Sg)io*M4n5y%${2YoZ?Lq1t-+7~psO$1te!SFvBjb;r z*Kh*aqX(Go*vx#K!sQ^LZx`yuR8KYg8Q?hE5o}&vi%w2(s@VbyWpn-(O& z{c6UUKhB-8h9D|WmxVq|N#=-e|HH0Hl0HG}xq)=5#GwfUdmMx;zJSCL?DPl$3uNz; znBrWxP=2(>ixoG1)=575}$x{X*^@4){U{eJfr74|Ud>d@M6ETMo26x??L8#N) z?tW`!Jn~0>Wz7zW5?Qn3OPKPh)4BOsQRme*Ad+Uwh-CXPQrN^g{3r&=mjqWqtqQ3) zBx4~(5tk;_Oh0W!+BT%m{m4i{TA43RE{nxweCf=iR8%t=CEK?%JDzBGM)5fLlld7W zxyln@TQIP!N;NPGIxdfR7V2}86*x42yJ1q2IPv~HTM1 zWZa%CRAx?)7vqjcKnU_z z3jlEAv~-B(AvOe_!Xm_x~XXf;!isFd5Jnu;CJ9J?~~9AnilO>t|JRVlz6^b?Ld! zK1U6l29N;wIJ3r-+bp^JcqcQNge|+z)xPhH^ioURvK!4)sK0WL(XX}wI*&o6}SsHrgnrMiq;F~Gf*7W!ovJ;q`_sh zgxxbXt?~GbBec66af&Adnh}cKZDRm85TaQ^whxH8WwfAZT9p2bo{9}X{60ia!n+$0 zJ6x?o`nsQ#d2rPos5?23`87JqlT(d3P1$AU&5_UH;m?^`9Zih8F-vU_p2rcxled&w zc_KYgS2a}}t*K^YHnpCYEiZ~tO;HeWhvZuxqQMeQKSme|EYg>Avpl51{7f3zgO}E- zS3-@*_Q=uk>EF0NAg%~Z?J#)%J(DZMMCSW2OZxjc5{zFC6O&6mv&cWq&&rXSF6@;o z=n;vdo*nPm>+FalYcFSSH6BVS%97K_L2sc5%ZiG)+8Tt> zLM&Vy2VPLbBdxHyuc==f3f9`dqQoREq$l%d#|s z#-U`ya2JD3VZ@a8sq!5#jr|!tl9+B~xEHAV`6JCB63`s){?^`XQE{cw<4dprPd|9k zS<((XFp@+Z?~2-lKs?VJG!G#%noEP>Mgn_$(`irWZSF&cc?O$ivl5pR#p;cTKQ1}e z*?t9SoQV3X+Pc;W<BrPoOS&Hw7&*%5R7JKay&a4}ItuRA`}CJwqe&YJpq zc29zIKc0kjGEyBhBiK8eHdkqKiEH;ghOi!iLW}Jb2cD)-JXo?puhu+Auy55rk|4G+ zN5muymNaIsZ}2yfgpz@{YHORy^b*X8rCAqwmCU@9YY{bk;< z4{mw{w(!^O^Taj`E`WdIKZ8och0#Oq zn|T(bMlUKVq0}27u8zU1U4VZleN3~n47ID(kGJI;JPW5`DE)R^iwMHz?_qfymlPKr zO}2R(dF6pOULT;31|sQSzJLF|y0&)YBy50BtnQ={Zbbt=?&_tjzlO#6Wb~E{yHr$M z{CnVDEVDSlbaJpST_}12thfeLZ5t3;KN#BrKIp~~zpQ6V)w8-thXi%oi1JxQJrnkX za{Y~cG--gRr7G|w6Ng~wqq`iG+eYv;!0JRLqpyYOV$O+vxAt2mCI z)lgKwh|)nZJtrwYEB5|MW#clEtS<#OS0wf1TBxq;PWS`Vzx2_ZMR*D>@7rs1uq7m# zwur9xkAYhTHh^kw$AdN#LB=~0zJN13cvrcvpXm)X_#G>AfwlY4eFd(gNpBYa>WBVt zZ3TVvR9t-Y8qjLQk z-nyu*pfcaxAr23DUyn@odifeyqd4U!@@GNJy+?7%{Hj_Q(bJRghO98(9wKRFgzt5& zYcJ$2NEE1I3}ipMtsp^bXF&Xqh~sdOA5LVG{`G&X!1tC%0BVr_Y_`sFZ+PE!y?Y3c z#PKi2=hm_QHHQ)WhZ^`Gz^8d#kY>9Bd4Xn#xP~FH)1D>Z$8Y1eOPBz6 z6*) zf;LRhZuL-Jj??@zIlT(Ql-#Z9adK!oe3hnut*5&YKhti0Ro#o*^(M=WU9E;PvW{=O zRLBDiAr(kJur0_!rDqJ8K3KR?Idw~Fc7A>m8ea4DwRqOdk~{t4qP~C>eO)k#h;fp+ z;LYq*hcTMiIIaZaqIM!4uwi_XkRkzJmEp1imqFdEh#EWGj>RG%e}}ZH%FLFI(ix@X zo9;c;QJ(LwN3l^M$zdL;b;{9F%#)AHm?)^u2#98GGywayVvA_5L~L{bXAm90>M359 zV}`C)0`27MP!jQ2I4_a(l=k8eX+7er+p&2&rK%s3%LA7>5Z(`iqk)}E!`?j7Dl2JK zA|tPs(3V1{S^?b>r@=(1kFe&n{ek(GYp?n0f1KZYUo;xYN7GRT|6W`|)~&4{svNa$ z0f!*RZlj_XYGpo^;v*V8weUl-WTFFe2d$tMd5*ioNb44(0dqQLKp?FiF%Q!wO33~; zmT}%Dfjk5JHehpRKKo5hhY1`WF6D@49XM;R^~@?OOE_)Q1CA-9BI^4*s7C{5KO_Rh zuGZYP`P6Uczwg|4uzg)_BtfgG-8bi~yYwbHMtjZ&qL;+$RjpqPOW4{eOD1Wx_4cYh z%@Gq^w-^u9LI#7wv-+anhWJ#TVMKU8O_Iy2U2OYFYasAc{oca1d((5fFe&?8m`4r6 zg?|iI;!_TiGo2Bv(Djb>%im}{R?m5zpM_cr@>-u8 z%nd*AUwr!Iyhdm?t3sv((TF_sAC9i_`X=208QZ%@E3fBKk@PjrI_*K%E9QI&`0KbW zuk0GO^Qa72ikm1}Zb1x`D}A(58fq#NCsqDB&`R9iYJWU}ldE#iqy`)l4A!w2hK+Ki z4R2H=b@%%_NhuOC_*Q|0y3y1At8Y7q|QZz;KF`>a@c>UCC56}*WlKzi%f{1<|X(Z6zoF6 zW}VO>t`n56Q9I~klMJUTzdLK@U$fG5Cfq$Su#nmQyuOVpuw_YEfx%#ajigUGI%sBH zY(#uUrH{k2_2boOVZ8I2-P92B3QJxO z06N?ragXvtsrF>e5Vm7Sj7_7?pZGCdwYHbF%`;cpi>2DgX&3FJJt1&m`<6Nx ztys1LO)POG%Xh+txd`$xms61ZI+iLz{XbeGqH5o}xFS&tdl z)DHFn(@mlR+G+UHc2C3+tqFNua@?mNG}aUfBt3$Sv)=zmYBJ6?66|^7e=c0?*#$o? zNrO>ILw~U9WwTn)qHWX*Cd58op6)U3bDPa&o;QZDk{wjJ!^XCqKU_QmqrOsdQ1vm3 zrI(v)E<6joO`1Z>xud=CL50WM(u8amE#{oh&X~Z*=NvXP_pU5hCBAL_upiBHOO~_( zjEts;uAUQ-QRRVM(oem1K&U8_5em<^JQ449Lb-5XiX+e1c0gTfMRm#)u#;b%q6swj zx=)$7h|Q0@zfGp5G4}|yqpj6k=iOWd#=d!M>8&So^L)H}xyl|OJOT?pINbbgRM>xO3*X@Qi=*b@N8>r7ZXwiA##IBt|V&Y@VWdhF*{G8wMa8+4!^ z7)|;-{(^DAJ0}|h_Pt#$#k7~>GCCe=LdF?cD0C{_e}iRYH=O(Slez}$PvDUYrp~o1 z*~=IvqUmdeo#o*2XKEwo$y}$ZsnLzW(6)wG(=Cox>~DWxuQyp3$&u@kpQUx2{kPX-$M{771<=s*N#yI*HJrr^tF;OF*xhT}kn&<@K1BfCumct_|FhiR%lqhHQ<2+?Im z8r|IKG;mzoYF8cd1xS%oXQT%t$Qjy>6w&$p-RmO8F|YR-ZNfvMQ;C(QUO z4le5WA9tlh7%aZ)p-4r1lz30`6v{TbhBy)|xVqv*wd4hVmKnG`!ER6KFLMq$6@Toq zp|d7|XYl!+ZIgZUam_9_=+1nQLL3mGdtPUUZJwskSo;xLVOi$mZTuxyx#8 z!skY6lf%H|@*_v~LMl9W zWC0duI!BOLplZ&&77e0vUYf>&P+A znW#g2Sk{~SkaRtz76x3g#4IdbOh^L=e24|KQ#f$~hv+mwaPh>^H##A2X<>++>x!1h z6Bxc?BCRfc)N|C-aeSa-cy^EEAe{ux2%*&OANIL;cNj*0?#v6PA2daYFZAfN@~YCJ z*;%OQS8>?zzwc{Yzs9Qz5o)p<90@*W%?+J)tLUc~4O|`3S+eMupBeKHICTB(Wm0gW z!9)OZrdut=?XV67LR1esLT9OmLhGS0Q5N?XA_->*0it57-q|@6P_m{FWtf_!_i@q} zq)4W~qO*4uwl+bAhSg`ECjIDGzp!TB$Fk}8GGUL75v2|q%*7dqLAaq2N}@1rQSa-! zW6tU4VJ@B`LjQ583vWqq8=f=tozT^RT_passzmETZxH^*5lE>J9 zD{U8QukZd-Y^tD(;>vkPJ3*=?$bdqLFHRFKsXWQJn<$oSMwo@+|mTEgTo zn)TC+V`yShwQa=`M4ZDZZxZDov)WN>;e#m~O;UiKw)(5QG94vWk+k2n|}+k^Q` zmp&e5_65p2{rW|^BkmQzw*@;64v5?LPPOl4UpV%>_xq=N4i2RQ>f&i5_j%5!5+pZc zr6uRmKQiz-m`e@tca-)Fyl8TYX`CM|614uIxw&9?DP`h5A=!0;U6FZ@tckJIwR%-t z#sT3bn$8!S1t`n7l5dRO#f`l{8Oj-|XXeyLDelCyIsTwhloP=8g?}}Yp5;$J`9M3l z=|0PX^3RX~r%=%-c5$-5Byz1@sVpvJ^McxYpX+QDbzMVlmD(e(mqTE9rRk`cu7KoK z`VRdcbe!N>04u(vdU+ zA5#?>V6-Xp#6|_9F5ygtjCl{|PPd{8YN>L=9DR9A=EHyTGf8f%OCJxErA(PQOpnG% z2D%vBm>zz-SfXTeX2crF!c9@Wv>=O}PpIc%CZn*R$7-s?(xfp?yyQpq&-X=tHTrXM zv6c`T{&ehJzA$@4`|sn7=wV397T)#=e9utVi1B-X0TEg|q)pfcQjI{$h)B;dQ4o#m zSdBq&%>LQQ0s3;tTiDg)tdl-HS_En`0dGbT{vKXrk8OkB!%E?iyzqvQ+DH~d*g2T8 zWF3|-oa>f2u#hz70(Qg#b%z0JN@&a+hC~FkW@u5*cMHdMEpk`wmY?M&JKM4c!&!0* z6JB#|tHSZ6Q(VGS26663zBz0HW6k#cNqydL3%$`ibFvE(hjp48K`%pl6c}G z79rhb4}XJ>zhW%Eao)nDDGVBu7d1o?+v~z)6Z=BCh=Jaodjt4~tc%{CCINP#?e%O? z0y)bho=QdLam>ZJaBUeMyo<0=^b@0!Z;SVLaW^_FB>ddD*&oa6m^&4IN?F~9&PHrW zQF6NFo{1Y?M7X8HYnRP!=iOS(joa$XtR^Do4C#93oGTj-Pks>7%=Y!54&FS7?>#(M zRTwI;|H;Q2yxgO4#ic!#i)st|&^AXp1%JB);LC)yvm9}!#8qJhQ@4klQ}1=IwkGHM zIj^;E1yA0AmR^7VfP+v6aUJseogarVVS6haf1Wb(WQ#r5k%$-T1brPvVxYN0bX3!} z&jX=*=Pu&{kG8_2QsT6_@aMFwfJGxG={th!bVo7XLD)%2o4n2hw5E__yZxn}^)Rvk zUCy-y&}}$1-igaFwN_C*D`;zPr$xVj6NtK$XGQf;r)*C5zqt3pL1rn6-HJS1yUfSZ z-BL9)r|J9>`qnM$B1;FJv-30cb!3on@YLJOVp?acjGAzK(6Y$N{*z%v-}GG^HqQR$ zOWHHV7QTyBz3s_gqd$S7PK3!tf@U9^YzUpK$UsVG4e4GsHzaPFF<{<1 z+FyH^9wm&MWI>irXo%C!h8TbOxng>g%RiY+b~X9T&(`M|PZSgjwi&*2f;G11x7Sjh z{X0jXYV=Tu(_}+QNYY-P*elMn)!}T}9TrBgz#{46l&1}MS@=p(@S%(=G=zF{YZJ2z zM~G9G*MN_(W|J=)kM?CMb>W%Xg!%8zTuzZNPBo?Nwu8xqCGJ(ZaY<=UMO>2nP+u^U z)2VsGYkdhbr3(VmiWuC;K#sf_2-^yD=ZxsI=$O;#nZgc~FIAV7bhXY(YP6^%#Ss%R zS4U4+&Lmt!CS*dP)``j5cxOHx%%60qeZI`%Vx-RqUeww{J>Zp3t`%x{R^!huM~P@W zM?-iY`>;&IMPTTVp@c0<9@|1}@{Z*)T~JanKyF`KQit7Ddo(UTYk+JYO8IuAY3Z>j zRtsm?b4rqzGea3^XIeuV%ZQyd$mftol}@y(CSzAJ1dMnu^mBIAdPL5A?xkq8b_p94FD55{uMUL$(o1_}~NX6$_ z-9u9$G(8$sPzSrQ+{BsEp20T?IMJ_0=Q=Rwyk|du|7mtFSfxINK~_;`X)+guee_k+ zGAmWBhj9u=UDKA^@qW0!#$!CCvr9c(`F-s|Yp|&JeB0O%fB2O1G2e;Z7nqOu|M)y^uQSsEt+#a&iSS~KjtF?j3!u%u(p;b17qnxzPGK&Q z?CHm8-4{`3dGs|Hd6qy~?GD27>}rvsKNb-D1j)H5vqL1iRnQk8n3T+w-FOD3p1>1> zkV_s)F_5UehW{N)(|TW=Hg%K3oo-9LuKy2NV+PDy+v(a0R!wu&Z;m};^eHLSJv!*J zmvk3~mV()_GGBTAyqu1gnY+%+_`>;$y{1d_Ovq#!^T2rGE(~2m0}aY+F1Ky!k3YzC zF8z7-24=P%h5bSGv995^sQG1$mc!oMzjNJ>Q3zajK2%E8j-ko0P^^w|Yo^%`E64T@ zDY4Nf?%d3Cnz(}^YPBI@khXu=NAOV3<{UsrQr#MP$6KbB(rtKH%yo4c^7B3z`^Wi& z3bxn@T7HqaN2;munOyj;(tQoG;zv6UeR^cJ-;?tu%Z`U4EZItZT0xxW-Ojohs>%GmR(`ec9A=sf0_4r0hTvpeJNLz13d$T_~0no9A z-7GjC#xkF-={HcaeQ)pe`yVF-1Wa1LSYHnpv5Bky(K|Onz!9LcBhhJYvNmr*jFXE? z*C>*3X*AyQ#%I9iwcB!?qA{@)@M*TLpFb7z=sP`M^=jf>txgJM7@TYkYGMu_5|cOk z>e0Q_Zr`l_V9JCOSK&N$_DyBhlyv(DeOohalla02Wq+%Umkkq6rG>Fpe7<*Z`b($n zWS6I$u#!fOb(e*(#cULgO6R5B&fvCQCV|n(Y&oi6m-&gDkw=G5MN7Gi&&mw--14jG zV$SM9Q4x+LFnJ`2t!`k}ml}1{M~_FSroWDwKggK$8G&S@fB^ouc z4@q3fyK+`#w%#~hTTAN@nOC*so@$Oz&Co_!mdWx^5pBo$JWcIM-SDNzG+J8P!&`@X z_(Vk+dmINQ)ai3fa;&{(bKlt?J)EF!Fr34aL+231{;F1Vdsa&eMQA~fkFvI#J4>dt6yx%j*Xw% zXF1gN?V;QwK7v{H-XWW&#cFZ!-srD-IZ7G}FQv%DvExxOMPbtRX5ElGRy15+tw zj3*^(gc3@~F%sC{IE9+?5%2CKaWlOZ^L^EO7J4hC_n=V*Io`j+;ws43Hj+HT7e4#| zh*|1R0L5b`PAdO(`y*kr_2JH=24ttT_Huk+f~=k60Nke5mEHQCGC=N13|Hm-D>3iv zfhLVMh2K`@dA%yyTpPh_6p@2WlQ_(uxN?vM(9rVlL9So<)c;qnB)4kDE)EV3GeALZ zoj#Bn8ucKS=W*}BTFDRhZcr_J%pLU-zmT202LgE(P7_*LNI{Ao({SI$Wii!t65mbJ zP%7b2`8P|!$gSLJS^KD%&fJGU5L{=cPGpzX@B(Rbwha$ACYNFF-o0i(MdluW?Od0( z^N70Qb#UZ6rT+N&X=h5)z(s>Ry_+YU$7T&S4_wv|l~;fCo3P*m&k1pHW->CeM1%S` ze2#<71iQ{QdYTO91^ebKD-P>^{BCe(HJFO@6k~G7rltoR1hlH(UP*+yiQGo>F^#QY z+oJPj5`8%IQJ7wQW|DS$r(;d&_QMz7Mu!n?W^`;UQ}!l#8f<@V(Z-UdyGI zlAc;nCr)p4Jp2FH`~3gEH2nU(I)C%|mg4(*ulHH7mlfxim%e&%-M&gS?$@1djbEoq z0WV0M5)QqS0?{CBtm61m7P)-x26jG~0uvA0Xd%Pk#fUjK=rpO#cc$?5alK2IF3p+q zvSPx^moGE6M#&xdacBLVV{9|zYRiF}dUw?RE-T~l$Vz>EZEYd&Jo#SJA5(V6)&EYK z^Z!BFw_Gc^l$x45d9Rm8@V-8;xuWjV6HlP(SD&})`y8KbUs0)XmwQe7p*65`Sm0iA zuw|*TUi;cmdUJ(Xqf&X;^WSgZZaMe=Zs|{^!xr1C-s-Sk%Ql5yr~xl2fTf7S+|0c@ zc1WB)efp-LV5a)I@V_ily>8zOjsO=)&X_s#b~bJE6vr2sQx}jt a^oPCStvgrAlyWr&An`_. +- `Mistral Client / CLI Guide `_. + +Workflow definition file validation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate workflow definition files before registering with Mistral. + +:: + + usage: mistral workflow-validate + +:: + + $ mistral workflow-validate create_mea.yaml + + +-------+-------+ + | Field | Value | + +-------+-------+ + | Valid | True | + | Error | None | + +-------+-------+ + + $ mistral workflow-validate create_mead.yaml + + +-------+-------+ + | Field | Value | + +-------+-------+ + | Valid | True | + | Error | None | + +-------+-------+ + + $ mistral workflow-validate delete_mea.yaml + + +-------+-------+ + | Field | Value | + +-------+-------+ + | Valid | True | + | Error | None | + +-------+-------+ + + $ mistral workflow-validate delete_mead.yaml + + +-------+-------+ + | Field | Value | + +-------+-------+ + | Valid | True | + | Error | None | + +-------+-------+ + +Registering Apmec workflows with Mistral +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create std.create_mea, std.create_mead, std.delete_mead and +std.delete_mea workflows in Mistral. + +:: + + usage: mistral workflow-create --public + +:: + + $ mistral workflow-create create_mea.yaml --public + + +--------------------------------------+----------------+----------------------------------+--------+-------+----------------------------+------------+ + | ID | Name | Project ID | Tags | Input | Created at | Updated at | + +--------------------------------------+----------------+----------------------------------+--------+-------+----------------------------+------------+ + | 445e165a-3654-4996-aad4-c6fea65e95d5 | std.create_mea | bde60e557de840a8a837733aaa96e42e | | body | 2016-07-29 15:08:45.585192 | None | + +--------------------------------------+----------------+----------------------------------+--------+-------+----------------------------+------------+ + + $ mistral workflow-create create_mead.yaml --public + + +--------------------------------------+-----------------+----------------------------------+--------+-------+----------------------------+------------+ + | ID | Name | Project ID | Tags | Input | Created at | Updated at | + +--------------------------------------+-----------------+----------------------------------+--------+-------+----------------------------+------------+ + | 926caa3e-ee59-4ca0-ac1b-cae03538e389 | std.create_mead | bde60e557de840a8a837733aaa96e42e | | body | 2016-07-29 15:08:54.933874 | None | + +--------------------------------------+-----------------+----------------------------------+--------+-------+----------------------------+------------+ + + $ mistral workflow-create delete_mead.yaml --public + + +--------------------------------------+-----------------+----------------------------------+--------+---------+----------------------------+------------+ + | ID | Name | Project ID | Tags | Input | Created at | Updated at | + +--------------------------------------+-----------------+----------------------------------+--------+---------+----------------------------+------------+ + | f15b7402-ce31-4369-98d4-818125191564 | std.delete_mead | bde60e557de840a8a837733aaa96e42e | | mead_id | 2016-08-14 20:01:00.135104 | None | + +--------------------------------------+-----------------+----------------------------------+--------+---------+----------------------------+------------+ + + $ mistral workflow-create delete_mea.yaml --public + +--------------------------------------+----------------+----------------------------------+--------+--------+----------------------------+------------+ + | ID | Name | Project ID | Tags | Input | Created at | Updated at | + +--------------------------------------+----------------+----------------------------------+--------+--------+----------------------------+------------+ + | d6451b4e-6448-4a26-aa33-ac5e18c7a412 | std.delete_mea | bde60e557de840a8a837733aaa96e42e | | mea_id | 2016-08-14 20:01:08.088654 | None | + +--------------------------------------+----------------+----------------------------------+--------+--------+----------------------------+------------+ + + + +MEAD resource creation with std.create_mead workflow +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To create MEAD apmec resource based on the MEAD workflow input file. + +Create new execution for MEAD creation. + +:: + + usage: mistral execution-create [] [] + +:: + + $ mistral execution-create std.create_mead create_mead.json + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 31f086aa-a3c9-4f44-b8b2-bec560e32653 | + | Workflow ID | 926caa3e-ee59-4ca0-ac1b-cae03538e389 | + | Workflow name | std.create_mead | + | Description | | + | Task Execution ID | | + | State | RUNNING | + | State info | None | + | Created at | 2016-07-29 15:11:19.485722 | + | Updated at | 2016-07-29 15:11:19.491694 | + +-------------------+--------------------------------------+ + +Gather execution details based on execution id. + +:: + + usage: mistral execution-get + +:: + + $mistral execution-get 31f086aa-a3c9-4f44-b8b2-bec560e32653 + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 31f086aa-a3c9-4f44-b8b2-bec560e32653 | + | Workflow ID | 926caa3e-ee59-4ca0-ac1b-cae03538e389 | + | Workflow name | std.create_mead | + | Description | | + | Task Execution ID | | + | State | SUCCESS | + | State info | None | + | Created at | 2016-07-29 15:11:19 | + | Updated at | 2016-07-29 15:11:21 | + +-------------------+--------------------------------------+ + +.. note:: Wait until execution state become as SUCCESS. + +Gather MEAD ID from execution output data. + +:: + + usage: mistral execution-get-output + +:: + + $ mistral execution-get-output 31f086aa-a3c9-4f44-b8b2-bec560e32653 + + Response: + + { + "mead_id": "fb164b77-5e24-402d-b5f4-c6596352cabe" + } + +Verify MEAD details using apmec CLI +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + $ apmec mead-show "fb164b77-5e24-402d-b5f4-c6596352cabe" + + +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | attributes | {"mead": "tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0\n\ndescription: Demo example\n\nmetadata:\n template_name: sample-tosca- | + | | mead\n\ntopology_template:\n node_templates:\n VDU1:\n type: tosca.nodes.mec.VDU.Apmec\n properties:\n image: cirros-0.3.5-x86_64-disk\n | + | | flavor: m1.tiny\n availability_zone: nova\n mgmt_driver: noop\n config: |\n param0: key1\n param1: key2\n\n CP1:\n type: | + | | tosca.nodes.mec.CP.Apmec\n properties:\n management: true\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n | + | | node: VL1\n - virtualBinding:\n node: VDU1\n\n CP2:\n type: tosca.nodes.mec.CP.Apmec\n properties:\n anti_spoofing_protection: | + | | false\n requirements:\n - virtualLink:\n node: VL2\n - virtualBinding:\n node: VDU1\n\n CP3:\n type: | + | | tosca.nodes.mec.CP.Apmec\n properties:\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL3\n - | + | | virtualBinding:\n node: VDU1\n\n VL1:\n type: tosca.nodes.mec.VL\n properties:\n network_name: net_mgmt\n vendor: Apmec\n\n | + | | VL2:\n type: tosca.nodes.mec.VL\n properties:\n network_name: net0\n vendor: Apmec\n\n VL3:\n type: tosca.nodes.mec.VL\n | + | | properties:\n network_name: net1\n vendor: Apmec\n"} | + | description | Demo example | + | id | fb164b77-5e24-402d-b5f4-c6596352cabe | + | infra_driver | openstack | + | mgmt_driver | noop | + | name | apmec-create-mead | + | service_types | {"service_type": "mead", "id": "db7c5077-7bbf-4bd3-87d5-e3c52daba255"} | + | tenant_id | bde60e557de840a8a837733aaa96e42e | + +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + +MEA resource creation with std.create_mea workflow +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Update the mead_id from the output of above execution in create_mea.json + +Create new execution for MEA creation. + +:: + + $ mistral execution-create std.create_mea create_mea.json + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 3bf2051b-ac2e-433b-8f18-23f57f32f184 | + | Workflow ID | 445e165a-3654-4996-aad4-c6fea65e95d5 | + | Workflow name | std.create_mea | + | Description | | + | Task Execution ID | | + | State | RUNNING | + | State info | None | + | Created at | 2016-07-29 15:16:13.066555 | + | Updated at | 2016-07-29 15:16:13.072436 | + +-------------------+--------------------------------------+ + +Gather execution details based on execution id. + +:: + + $ mistral execution-get 3bf2051b-ac2e-433b-8f18-23f57f32f184 + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 3bf2051b-ac2e-433b-8f18-23f57f32f184 | + | Workflow ID | 445e165a-3654-4996-aad4-c6fea65e95d5 | + | Workflow name | std.create_mea | + | Description | | + | Task Execution ID | | + | State | SUCCESS | + | State info | None | + | Created at | 2016-07-29 15:16:13 | + | Updated at | 2016-07-29 15:16:45 | + +-------------------+--------------------------------------+ + +Gather MEA ID from execution output data. + +:: + + $ mistral execution-get-output 3bf2051b-ac2e-433b-8f18-23f57f32f184 + + Response: + + { + "status": "ACTIVE", + "mgmt_url": "{\"VDU1\": \"192.168.120.7\"}", + "vim_id": "22ac5ce6-1415-460c-badf-40ffc5091f94", + "mea_id": "1c349534-a539-4d5a-b854-033f98036cd5" + } + +Verify MEA details using apmec CLI +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:: + + $ apmec mea-show "1c349534-a539-4d5a-b854-033f98036cd5" + + +----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | attributes | {"heat_template": "heat_template_version: 2013-05-23\ndescription: 'Demo example\n\n '\nparameters: {}\nresources:\n VDU1:\n type: OS::Nova::Server\n | + | | properties:\n availability_zone: nova\n config_drive: false\n flavor: m1.tiny\n image: cirros-0.3.5-x86_64-disk\n networks:\n - port:\n | + | | get_resource: CP1\n - port:\n get_resource: CP2\n - port:\n get_resource: CP3\n user_data_format: SOFTWARE_CONFIG\n CP1:\n type: | + | | OS::Neutron::Port\n properties:\n network: net_mgmt\n port_security_enabled: false\n CP2:\n type: OS::Neutron::Port\n properties:\n network: | + | | net0\n port_security_enabled: false\n CP3:\n type: OS::Neutron::Port\n properties:\n network: net1\n port_security_enabled: false\noutputs:\n | + | | mgmt_ip-VDU1:\n value:\n get_attr: [CP1, fixed_ips, 0, ip_address]\n", "monitoring_policy": "{\"vdus\": {}}"} | + | description | Demo example | + | error_reason | | + | id | 1c349534-a539-4d5a-b854-033f98036cd5 | + | instance_id | 771c53df-9f41-454c-a719-7eccd3a4eba9 | + | mgmt_url | {"VDU1": "192.168.120.7"} | + | name | apmec-create-mea | + | placement_attr | {"vim_name": "VIM0"} | + | status | ACTIVE | + | tenant_id | bde60e557de840a8a837733aaa96e42e | + | vim_id | 22ac5ce6-1415-460c-badf-40ffc5091f94 | + +----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +MEA resource deletion with std.delete_mea workflow +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Update the mea_id from the output of above execution in delete_mea.json + +Create new execution for MEA deletion. + +:: + + $ mistral execution-create std.delete_mea delete_mea.json + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 677c7bab-18ee-4a34-b1e6-a305e98ba887 | + | Workflow ID | d6451b4e-6448-4a26-aa33-ac5e18c7a412 | + | Workflow name | std.delete_mea | + | Description | | + | Task Execution ID | | + | State | RUNNING | + | State info | None | + | Created at | 2016-08-14 20:48:00.333116 | + | Updated at | 2016-08-14 20:48:00.340124 | + +-------------------+--------------------------------------+ + +Gather execution details based on execution id. + +:: + + $ mistral execution-get 677c7bab-18ee-4a34-b1e6-a305e98ba887 + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 677c7bab-18ee-4a34-b1e6-a305e98ba887 | + | Workflow ID | d6451b4e-6448-4a26-aa33-ac5e18c7a412 | + | Workflow name | std.delete_mea | + | Description | | + | Task Execution ID | | + | State | SUCCESS | + | State info | None | + | Created at | 2016-08-14 20:48:00 | + | Updated at | 2016-08-14 20:48:03 | + +-------------------+--------------------------------------+ + + +Gather execution output data from execution id. + +:: + + $ mistral execution-get-output 677c7bab-18ee-4a34-b1e6-a305e98ba887 + + Response: + + { + "openstack": { + "project_name": "demo", + "user_id": "f39a28fa574848dfa950b50329c1309b", + "roles": [ + "anotherrole", + "Member" + ], + "auth_uri": "http://192.168.122.250:5000/v3", + "auth_cacert": null, + "auth_token": "2871049fae3643ca84f44f7e17f809a0", + "is_trust_scoped": false, + "service_catalog": "[{\"endpoints\": [{\"adminURL\": \"http://192.168.122.250/identity_v2_admin\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250/identity\", \"publicURL\": \"http://192.168.122.250/identity\"}], \"type\": \"identity\", \"name\": \"keystone\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9292\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9292\", \"publicURL\": \"http://192.168.122.250:9292\"}], \"type\": \"image\", \"name\": \"glance\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8774/v2.1\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8774/v2.1\", \"publicURL\": \"http://192.168.122.250:8774/v2.1\"}], \"type\": \"compute\", \"name\": \"nova\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volumev2\", \"name\": \"cinderv2\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volume\", \"name\": \"cinder\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9494\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9494\", \"publicURL\": \"http://192.168.122.250:9494\"}], \"type\": \"artifact\", \"name\": \"glare\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"orchestration\", \"name\": \"heat\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"compute_legacy\", \"name\": \"nova_legacy\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9896/\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9896/\", \"publicURL\": \"http://192.168.122.250:9896/\"}], \"type\": \"mec-orchestration\", \"name\": \"apmec\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8989/v2\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8989/v2\", \"publicURL\": \"http://192.168.122.250:8989/v2\"}], \"type\": \"workflowv2\", \"name\": \"mistral\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9696/\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9696/\", \"publicURL\": \"http://192.168.122.250:9696/\"}], \"type\": \"network\", \"name\": \"neutron\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volumev3\", \"name\": \"cinderv3\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8082\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8082\", \"publicURL\": \"http://192.168.122.250:8082\"}], \"type\": \"application-catalog\", \"name\": \"murano\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"database\", \"name\": \"trove\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8000/v1\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8000/v1\", \"publicURL\": \"http://192.168.122.250:8000/v1\"}], \"type\": \"cloudformation\", \"name\": \"heat-cfn\"}]", + "project_id": "bde60e557de840a8a837733aaa96e42e", + "user_name": "demo" + }, + "mea_id": "f467e215-43a3-4083-8bbb-ce49d9c70443", + "__env": {}, + "__execution": { + "input": { + "mea_id": "f467e215-43a3-4083-8bbb-ce49d9c70443" + }, + "params": {}, + "id": "677c7bab-18ee-4a34-b1e6-a305e98ba887", + "spec": { + "tasks": { + "delete_mea": { + "action": "apmec.delete_mea mea=<% $.mea_id %>", + "version": "2.0", + "type": "direct", + "description": "Request to delete a MEA.", + "name": "delete_mea" + } + }, + "description": "Delete a MEA.\n", + "version": "2.0", + "input": [ + "mea_id" + ], + "type": "direct", + "name": "std.delete_mea" + } + } + } + + +MEAD resource deletion with std.delete_mead workflow +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Update the mead_id from the output of above execution in delete_mead.json + +Create new execution for MEA deletion. + +:: + + $ mistral execution-create std.delete_mead delete_mead.json + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 1e0340c0-bee8-4ca4-8150-ac6e5eb58c99 | + | Workflow ID | f15b7402-ce31-4369-98d4-818125191564 | + | Workflow name | std.delete_mead | + | Description | | + | Task Execution ID | | + | State | RUNNING | + | State info | None | + | Created at | 2016-08-14 20:57:06.500941 | + | Updated at | 2016-08-14 20:57:06.505780 | + +-------------------+--------------------------------------+ + +Gather execution details based on execution id. + +:: + + $ mistral execution-get 1e0340c0-bee8-4ca4-8150-ac6e5eb58c99 + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 1e0340c0-bee8-4ca4-8150-ac6e5eb58c99 | + | Workflow ID | f15b7402-ce31-4369-98d4-818125191564 | + | Workflow name | std.delete_mead | + | Description | | + | Task Execution ID | | + | State | SUCCESS | + | State info | None | + | Created at | 2016-08-14 20:57:06 | + | Updated at | 2016-08-14 20:57:07 | + +-------------------+--------------------------------------+ + + + +Gather execution output data from execution id. + +:: + + $ mistral execution-get-output 1e0340c0-bee8-4ca4-8150-ac6e5eb58c99 + + Response: + + { + "openstack": { + "project_name": "demo", + "user_id": "f39a28fa574848dfa950b50329c1309b", + "roles": [ + "anotherrole", + "Member" + ], + "auth_uri": "http://192.168.122.250:5000/v3", + "auth_cacert": null, + "auth_token": "176c9b5ebd9d40fb9fb0a8db921609eb", + "is_trust_scoped": false, + "service_catalog": "[{\"endpoints\": [{\"adminURL\": \"http://192.168.122.250/identity_v2_admin\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250/identity\", \"publicURL\": \"http://192.168.122.250/identity\"}], \"type\": \"identity\", \"name\": \"keystone\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9292\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9292\", \"publicURL\": \"http://192.168.122.250:9292\"}], \"type\": \"image\", \"name\": \"glance\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8774/v2.1\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8774/v2.1\", \"publicURL\": \"http://192.168.122.250:8774/v2.1\"}], \"type\": \"compute\", \"name\": \"nova\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v2/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volumev2\", \"name\": \"cinderv2\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v1/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volume\", \"name\": \"cinder\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9494\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9494\", \"publicURL\": \"http://192.168.122.250:9494\"}], \"type\": \"artifact\", \"name\": \"glare\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8004/v1/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"orchestration\", \"name\": \"heat\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8774/v2/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"compute_legacy\", \"name\": \"nova_legacy\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9896/\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9896/\", \"publicURL\": \"http://192.168.122.250:9896/\"}], \"type\": \"mec-orchestration\", \"name\": \"apmec\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8989/v2\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8989/v2\", \"publicURL\": \"http://192.168.122.250:8989/v2\"}], \"type\": \"workflowv2\", \"name\": \"mistral\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:9696/\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:9696/\", \"publicURL\": \"http://192.168.122.250:9696/\"}], \"type\": \"network\", \"name\": \"neutron\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8776/v3/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"volumev3\", \"name\": \"cinderv3\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8082\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8082\", \"publicURL\": \"http://192.168.122.250:8082\"}], \"type\": \"application-catalog\", \"name\": \"murano\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\", \"publicURL\": \"http://192.168.122.250:8779/v1.0/bde60e557de840a8a837733aaa96e42e\"}], \"type\": \"database\", \"name\": \"trove\"}, {\"endpoints\": [{\"adminURL\": \"http://192.168.122.250:8000/v1\", \"region\": \"RegionOne\", \"internalURL\": \"http://192.168.122.250:8000/v1\", \"publicURL\": \"http://192.168.122.250:8000/v1\"}], \"type\": \"cloudformation\", \"name\": \"heat-cfn\"}]", + "project_id": "bde60e557de840a8a837733aaa96e42e", + "user_name": "demo" + }, + "mead_id": "fb164b77-5e24-402d-b5f4-c6596352cabe", + "__env": {}, + "__execution": { + "input": { + "mead_id": "fb164b77-5e24-402d-b5f4-c6596352cabe" + }, + "params": {}, + "id": "1e0340c0-bee8-4ca4-8150-ac6e5eb58c99", + "spec": { + "tasks": { + "delete_mead": { + "action": "apmec.delete_mead mead=<% $.mead_id %>", + "version": "2.0", + "type": "direct", + "description": "Request to delete a MEAD.", + "name": "delete_mead" + } + }, + "description": "Delete a MEAD.\n", + "version": "2.0", + "input": [ + "mead_id" + ], + "type": "direct", + "name": "std.delete_mead" + } + } + } diff --git a/doc/source/user/alarm_monitoring_usage_guide.rst b/doc/source/user/alarm_monitoring_usage_guide.rst new file mode 100644 index 0000000..cdf26e4 --- /dev/null +++ b/doc/source/user/alarm_monitoring_usage_guide.rst @@ -0,0 +1,260 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-alarm_frm: + +========================== +Alarm monitoring framework +========================== + +This document describes how to use alarm-based monitoring driver in Apmec. + +Sample TOSCA with monitoring policy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following example shows monitoring policy using TOSCA template. +The target (VDU1) of the monitoring policy in this example need to be +described firstly like other TOSCA templates in Apmec. + +.. code-block:: yaml + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + resize_compute: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + metrics: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 65 + evaluations: 1 + method: avg + comparison_operator: gt + actions: [respawn] + +Alarm framework already supported the some default backend actions like +**scaling, respawn, log, and log_and_kill**. + +Apmec users could change the desired action as described in the above example. +Until now, the backend actions could be pointed to the specific policy which +is also described in TOSCA template like scaling policy. The integration between +alarming monitoring and scaling was also supported by Alarm monitor in Apmec: + +.. code-block:: yaml + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + description: Demo example + + metadata: + template_name: sample-tosca-mead + + topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VDU2: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + properties: + increment: 1 + cooldown: 120 + min_instances: 1 + max_instances: 3 + default_instances: 2 + targets: [VDU1,VDU2] + + - vdu_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_scaling_out: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + metrics: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: avg + comparison_operator: gt + metadata: SG1 + actions: [SP1] + + vdu_lcpu_usage_scaling_in: + targets: [VDU1, VDU2] + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + metrics: cpu_util + condition: + threshold: 10 + constraint: utilization less_than 10% + period: 600 + evaluations: 1 + method: avg + comparison_operator: lt + metadata: SG1 + actions: [SP1] + + +**NOTE:** +metadata defined in VDU properties must be matched with metadata in monitoring policy + +How to setup environment +~~~~~~~~~~~~~~~~~~~~~~~~ + +If OpenStack Devstack is used to test alarm monitoring in Apmec, OpenStack +Ceilometer and Aodh plugins will need to be enabled in local.conf: + +.. code-block::ini + +**enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer master** + +**enable_plugin aodh https://git.openstack.org/openstack/aodh master** + +How to monitor MEAs via alarm triggers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +How to setup alarm configuration +================================ + +Firstly, mead and mea need to be created successfully using pre-defined TOSCA +template for alarm monitoring. Then, in order to know whether alarm +configuration defined in Apmec is successfully passed to Ceilometer, +Apmec users could use CLI: + +.. code-block:: console + + $aodh alarm list + + +--------------------------------------+-----------+--------------------------------------------------------------------------------------------------------------------------------------+-------------------+----------+---------+ + | alarm_id | type | name | state | severity | enabled | + +--------------------------------------+-----------+--------------------------------------------------------------------------------------------------------------------------------------+-------------------+----------+---------+ + | 6f2336b9-e0a2-4e33-88be-bc036192b42b | threshold | apmec.mem.infra_drivers.openstack.openstack_OpenStack-a0f60b00-ad3d-4769-92ef-e8d9518da2c8-vdu_lcpu_scaling_in-smgctfnc3ql5 | insufficient data | low | True | + | e049f0d3-09a8-46c0-9b88-e61f1f524aab | threshold | apmec.mem.infra_drivers.openstack.openstack_OpenStack-a0f60b00-ad3d-4769-92ef-e8d9518da2c8-vdu_hcpu_usage_scaling_out-lubylov5g6xb | insufficient data | low | True | + +--------------------------------------+-----------+--------------------------------------------------------------------------------------------------------------------------------------+-------------------+----------+---------+ + +.. code-block:: console + + $aodh alarm show 6f2336b9-e0a2-4e33-88be-bc036192b42b + + +---------------------------+-------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +---------------------------+-------------------------------------------------------------------------------------------------------------------------------+ + | alarm_actions | [u'http://pinedcn:9896/v1.0/meas/a0f60b00-ad3d-4769-92ef-e8d9518da2c8/vdu_lcpu_scaling_in/SP1-in/yl7kh5qd'] | + | alarm_id | 6f2336b9-e0a2-4e33-88be-bc036192b42b | + | comparison_operator | lt | + | description | utilization less_than 10% | + | enabled | True | + | evaluation_periods | 1 | + | exclude_outliers | False | + | insufficient_data_actions | None | + | meter_name | cpu_util | + | name | apmec.mem.infra_drivers.openstack.openstack_OpenStack-a0f60b00-ad3d-4769-92ef-e8d9518da2c8-vdu_lcpu_scaling_in-smgctfnc3ql5 | + | ok_actions | None | + | period | 600 | + | project_id | 3db801789c9e4b61b14ce448c9e7fb6d | + | query | metadata.user_metadata.mea_id = a0f60b00-ad3d-4769-92ef-e8d9518da2c8 | + | repeat_actions | True | + | severity | low | + | state | insufficient data | + | state_timestamp | 2016-11-16T18:39:30.134954 | + | statistic | avg | + | threshold | 10.0 | + | time_constraints | [] | + | timestamp | 2016-11-16T18:39:30.134954 | + | type | threshold | + | user_id | a783e8a94768484fb9a43af03c6426cb | + +---------------------------+-------------------------------------------------------------------------------------------------------------------------------+ + + +How to trigger alarms: +====================== +As shown in the above Ceilometer command, alarm state is shown as +"insufficient data". Alarm is triggered by Ceilometer once alarm +state changes to "alarm". +To make MEA instance reach to the pre-defined threshold, some +simple scripts could be used. + +Note: Because Ceilometer pipeline set the default interval to 600s (10 mins), +in order to reduce this interval, users could edit "interval" value +in **/etc/ceilometer/pipeline.yaml** file and then restart Ceilometer service. + +Another way could be used to check if backend action is handled well in Apmec: + +.. code-block::ini + +curl -H "Content-Type: application/json" -X POST -d '{"alarm_id": "35a80852-e24f-46ed-bd34-e2f831d00172", "current": "alarm"}' http://pinedcn:9896/v1.0/meas/a0f60b00-ad3d-4769-92ef-e8d9518da2c8/vdu_lcpu_scaling_in/SP1-in/yl7kh5qd + +Then, users can check Horizon to know if mea is respawned. Please note that +the url used in the above command could be captured from "**ceilometer alarm-show** command as shown before. +"key" attribute in body request need to be captured from the url. The reason is that key will be authenticated +so that the url is requested only one time. diff --git a/doc/source/user/enhanced_placement_awareness_usage_guide.rst b/doc/source/user/enhanced_placement_awareness_usage_guide.rst new file mode 100644 index 0000000..99e44d2 --- /dev/null +++ b/doc/source/user/enhanced_placement_awareness_usage_guide.rst @@ -0,0 +1,155 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +Enhanced Placement Awareness Usage Guide +======================================== + +Overview +-------- + +OpenStack Apmec supports TOSCA MEAD templates that allow specifying +requirements for a MEA that leverages features of a compute node such as +NUMA topology, SR-IOV, Huge pages and CPU pinning. This allows for Enhanced +Platform Awareness(EPA) placement of a MEA that has high performance and low +latency requirements. + +Configuring compute nodes to be EPA nodes +----------------------------------------- + +The compute nodes requires configuration in the BIOS, Hypervisor and +OpenStack to enable it be an EPA compute node for deploying high performance +MEAs. + +Below table shows the configurations needed for the different features across +BIOS, Hypervisor and OpenStack. + ++----------------+------+------------+-----------+ +| | BIOS | Hypervisor | OpenStack | ++----------------+------+------------+-----------+ +| NUMA Topology | X | | X | ++----------------+------+------------+-----------+ +| SR-IOV | X | X | X | ++----------------+------+------------+-----------+ +| HyperThreading | X | | | ++----------------+------+------------+-----------+ +| Huge Pages | | X | | ++----------------+------+------------+-----------+ +| CPU Pinning | | X | X | ++----------------+------+------------+-----------+ + +**NOTE**: Consult the configuration guide from the Server and NIC vendor to +enable NUMA topology, SR-IOV and HyperThreading in BIOS. Also check the +Hypervisor documentation to verify if NUMA topology is supported. + +Below is a snippet of the /etc/default/grub file in Ubuntu that enables + +a) CPU isolation from kernel process to be used for VMs(refer keyword +*isolcpus* in the code block below) + +b) Reserving huge memory pages (refer keywords *default_hugepagesz*, +*hugepagesz* and *hugepages* in the code block below) + +c) Enabling SR-IOV Virtual functions to be exposed (refer keyword +*intel_iommu* in the code block below) + +.. code-block:: console + + GRUB_CMDLINE_LINUX_DEFAULT="quiet splash isolcpus=8-19 default_hugepagesz=1G hugepagesz=1G hugepages=24" + + GRUB_CMDLINE_LINUX="intel_iommu=on" + +**NOTE**: The above could be different based on the Hypervisor and the +hardware architecture of the Server. Please consult the Hypervisor +documentation. + +Below table shows the OpenStack related files that needs to be configured +to enable the EPA features on compute nodes. + ++---------------+-----------+--------------+--------------------+ +| | nova.conf | ml2_conf.ini | ml2_conf_sriov.ini | ++---------------+-----------+--------------+--------------------+ +| NUMA Topology | X | | | ++---------------+-----------+--------------+--------------------+ +| SR-IOV | X | X | X | ++---------------+-----------+--------------+--------------------+ +| CPU Pinning | X | | | ++---------------+-----------+--------------+--------------------+ + +The NUMA Topology feature enablement on compute nodes requires the +**NUMATopologyFilter** to be added to the scheduler_default_filters in +nova.conf file. + +The SR-IOV feature enablement requires configuration on both the controller +and compute nodes. Please refer link similar to below for the appropriate +OpenStack release to setup SR-IOV: +https://docs.openstack.org/neutron/latest/admin/config-sriov.html + +The CPU Pinning feature enablement requires configuring the nova.conf on +compute nodes. It requires an entry similar to below: + +.. code-block:: console + + [DEFAULT] + vcpu_pin_set = 8-19 + cpu_allocation_ratio = 1.0 + [libvirt] + virt_type = kvm + +**NOTE**: Please refer OpenStack release documentation for configuring the +above-mentioned features. + +Creating availability zone using compute nodes +---------------------------------------------- + +Once the compute nodes have been prepared for high performance requirement +MEA deployments, the next step would be to create an 'aggregate-list' and +availability zone from the compute nodes identified for MEA deployments. +Below commands illustrates an example of creating such an aggregate-list, +availability zone and adding compute nodes. + +.. code-block:: console + + openstack aggregate create --zone MEC-AZ MEC-AGG + + openstack aggregate add host MEC-AGG + + openstack aggregate add host MEC-AGG + +**NOTE**: Consult http://docs.openstack.org/cli-reference/nova.html for +latest supported commands. + +Specifying Availability Zone for VDU in MEAD template +----------------------------------------------------- + +Find below snippet of MEAD template that specifies the EPA Availability Zone +created as part of the VDU properties using **availability_zone** property. + +.. code-block:: yaml + + vdu1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + mem_page_size: large + properties: + availability_zone: MEC-AZ + image: cirros + +Deploying EPA TOSCA templates using Apmec +------------------------------------------ + +Once OpenStack/Devstack along with Apmec has been successfully installed, +deploy a sample EPA template such as tosca-mead-hugepages.yaml from location +below: +https://github.com/openstack/apmec/tree/master/samples/tosca-templates/mead + +Refer the 'Getting Started' link below on how to create a MEAD and deploy a +MEA: +https://docs.openstack.org/apmec/latest/install/getting_started.html diff --git a/doc/source/user/mea_component_usage_guide.rst b/doc/source/user/mea_component_usage_guide.rst new file mode 100644 index 0000000..f0705fa --- /dev/null +++ b/doc/source/user/mea_component_usage_guide.rst @@ -0,0 +1,58 @@ +======================== +MEA Component in Apmec +======================== + +This section will cover how to deploy `mea component` in Apmec with the +examples of how to write MEA descriptors. + + +Sample TOSCA with meac +======================= + +The following example shows meac resource using TOSCA template. +The target (VDU1) of the 'firewall_meac' in this example need to be +described firstly like other TOSCA templates in Apmec. + +.. code-block:: yaml + + topology_template: + node_templates: + firewall_meac: + type: tosca.nodes.mec.MEAC.Apmec + requirements: + - host: VDU1 + interfaces: + Standard: + create: install_meac.sh + +Every meac node must be of type 'tosca.nodes.mec.MEAC.Apmec'. It takes +two parameters: + +1) requirements: This node will accept list of hosts on which MEAC has to be + installed. +2) interfaces: This node will accept the absolute path of shell script to be run + on the VDUs. This shell script should reside in the machine where apmec + server is running. + + +How to setup environment +~~~~~~~~~~~~~~~~~~~~~~~~~ +To make use of MEAC in Apmec, we have to upload the image to the glance in +which heat-config and heat-config agents are installed. The installation steps +can be referred `here `_. The tool +'tools/meac/build_image.sh' can be used to generate such a kind of image. + +Currently MEAC feature works by using `heat software config `_ which +makes use of heat API. + +So the glance images which has heat-config agents installed are only to be +passed to VDU. + +Known Limitations +~~~~~~~~~~~~~~~~~ +1) Only one MEAC is supported for one VDU. Multiple MEAC per VDU will + be introduced in future. +2) The shell script for meac has to be placed in the machine where apmec + server is running. diff --git a/doc/source/user/mem_usage_guide.rst b/doc/source/user/mem_usage_guide.rst new file mode 100644 index 0000000..5f8720b --- /dev/null +++ b/doc/source/user/mem_usage_guide.rst @@ -0,0 +1,134 @@ +.. + Copyright 2014-2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================== +MEA Manager User Guide +====================== + +Apmec MEA Manager (MEM) component manages the life-cycle of a Virtual Network +Function (MEA). MEM takes care of deployment, monitoring, scaling and removal +of MEAs on a Virtual Infrastructure Manager (VIM). + + +Onboarding MEA +============== + +TOSCA MEAD templates can be onboarded to Apmec MEAD Catalog using following +command: + +.. code-block:: console + + apmec mead-create --mead-file + +.. note:: + + Users can find various sample TOSCA templates at https://github.com/openstack/apmec/tree/master/samples/tosca-templates/mead + +Deploying MEA +============= + +There are two ways to create a MEA in Apmec. + +#. Using Apmec Catalog +#. Direct MEA Instantiation + +Using Apmec Catalog +-------------------- + +In this method, a TOSCA MEAD template is first onboarded into Apmec MEAD +catalog. This MEAD is then used to create MEA. This is most common way of +creating MEAs in Apmec. + + i). Onboard a TOSCA MEAD template. + +.. code-block:: console + + apmec mead-create --mead-file +.. + + ii). Create a MEA. + +.. code-block:: console + + apmec mea-create --mead-name + + +Example +~~~~~~~ + +.. code-block:: console + + apmec mead-create --mead-file sample-mead-hello-world.yaml hello-world-mead + apmec mea-create --mead-name hello-world-mead hw-mea + +Direct MEA Instantiation +------------------------ + +In this method, MEA is created directly from the TOSCA template without +onboarding the template into Apmec MEAD Catalog. + +.. code-block:: console + + apmec mea-create --mead-template + +This method is recommended when MEM Catalog is maintained outside Apmec and +Apmec is primarily used as a MEM workflow engine. + +Example +~~~~~~~ + +.. code-block:: console + + apmec mea-create --mead-template sample-mead-hello-world.yaml hw-mea + +.. note :: + + mead-list command will show only the onboarded MEADs. To list the MEADs + created internally for direct MEA instantiation, use + '--template-source inline' flag. To list both onboarded and inline MEADs, + use '--template-source all' flag. The default flag for mead-list command + is '--template-source onboarded'. + + .. code-block:: console + + apmec mead-list --template-source inline + apmec mead-list --template-source all + +Finding MEM Status +=================== + +Status of various MEM resources can be checked by following commands. + +.. code-block:: console + + apmec vim-list + apmec mead-list + apmec mea-list + apmec mea-show + apmec mead-show + +.. + +Deleting MEA and MEAD +===================== + +MEAs and MEADs can be deleted as shown below. + +.. code-block:: console + + apmec mea-delete + apmec mead-delete +.. diff --git a/doc/source/user/mesd_usage_guide.rst b/doc/source/user/mesd_usage_guide.rst new file mode 100644 index 0000000..c8cfe7a --- /dev/null +++ b/doc/source/user/mesd_usage_guide.rst @@ -0,0 +1,256 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-mesd: + +========================================================== +Orchestrating MEAs using Network Services Descriptor (MESD) +========================================================== + +To enable dynamic composition of network services, MEC introduces Network +Service Descriptors (MESDs) that specify the network service to be created. +This usage guide describes lifecycle of Network service descriptors and +services. + +MESD in Ocata can be used for creating multiple (related) MEAs in one shot +using a single TOSCA template. This is a first (big) step into MESD, few +follow-on enhancements like: +1) Creating VLs / neutron networks using MESD (to support inter-MEA private VL) +2) NFYD support in MESD. + +Creating the MESD +~~~~~~~~~~~~~~~~ + +Once OpenStack along with Apmec has been successfully installed, +deploy a sample MEAD templates using mea1.yaml and mea2.yaml as mentioned in +reference section. + +:: + + apmec mead-create --mead-file mead1.yaml MEAD1 + + apmec mead-create --mead-file mead2.yaml MEAD2 + +The following code represents sample MESD which instantiates the above MEAs + +:: + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + imports: + - MEAD1 + - MEAD2 + topology_template: + node_templates: + MEA1: + type: tosca.nodes.mec.MEA1 + requirements: + - virtualLink1: VL1 + - virtualLink2: VL2 + MEA2: + type: tosca.nodes.mec.MEA2 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: apmec + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: apmec + +In above MESD template VL1 and VL2 are substituting the virtuallinks of MEA1. +To onboard the above MESD: + +:: + + apmec mesd-create --mesd-file + +Creating the MES +~~~~~~~~~~~~~~~~ + +To create a MES, you must have onboarded corresponding MESD and +MEADS(which MES is substituting) + +Apmec provides the following CLI to create MES: + +:: + + apmec mes-create --mesd-id + +Or you can create directly a MES without creating onboarded MESD before by +following CLI command: + +:: + + apmec mes-create --mesd-template + +Reference +~~~~~~~~~ + +MEA1 sample template for mesd named mead1.yaml: + +:: + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + description: Demo example + node_types: + tosca.nodes.mec.MEA1: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + - virtualLink2: + type: tosca.nodes.mec.VL + required: true + capabilities: + forwarder1: + type: tosca.capabilities.mec.Forwarder + forwarder2: + type: tosca.capabilities.mec.Forwarder + + topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA1 + requirements: + virtualLink1: [CP11, virtualLink] + virtualLink2: [CP14, virtualLink] + capabilities: + forwarder1: [CP11, forwarder] + forwarder2: [CP14, forwarder] + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + CP14: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualBinding: + node: VDU2 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + +MEA2 sample template for mesd named mead2.yaml: + +:: + + tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + description: Demo example + + node_types: + tosca.nodes.mec.MEA2: + capabilities: + forwarder1: + type: tosca.capabilities.mec.Forwarder + topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA2 + capabilities: + forwarder1: [CP21, forwarder] + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + diff --git a/doc/source/user/multisite_vim_usage_guide.rst b/doc/source/user/multisite_vim_usage_guide.rst new file mode 100644 index 0000000..8cb8465 --- /dev/null +++ b/doc/source/user/multisite_vim_usage_guide.rst @@ -0,0 +1,154 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-multisite: + +=================== +Multisite VIM Usage +=================== + +A single Apmec controller node can be used to manage multiple Openstack sites +without having the need to deploy Apmec server on each of these sites. Apmec +allows users to deploy MEAs in multiple OpenStack sites using the multisite VIM +feature. OpenStack versions starting from Kilo are supported with this feature. + + +Preparing the OpenStack site +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Create a new 'mec' project and admin privileged 'mec' user on the remote + OpenStack site. +2. Create the required neutron networks for management, packet in and packet + out networks that will be used by MEAs. + +Register a new OpenStack VIM +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To register a new OpenStack VIM inside Apmec. + +:: + + $ apmec vim-register --description 'OpenStack Liberty' --config-file vim_config.yaml Site1 + Created a new vim: + +----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | auth_cred | {"username": "mec_user", "password": "***", "project_name": "mec", "user_id": "", "user_domain_name": "default", "auth_url": | + | | "http://10.18.161.165:5000/v3", "project_id": "", "project_domain_name": "default"} | + | auth_url | http://10.18.161.165:5000/v3 | + | description | OpenStack Liberty | + | id | 3f3c51c5-8bda-4bd3-adb3-5ae62eae65c3 | + | name | Site1 | + | placement_attr | {"regions": ["RegionOne", "RegionTwo"]} | + | tenant_id | 8907bae480c0414d98c3519acbad1b06 | + | type | openstack | + | vim_project | {"id": "", "name": "mec"} | + +----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + +In the above command, config.yaml contains VIM specific parameters as below: + +:: + + auth_url: 'http://localhost:5000' + username: 'mec_user' + password: 'devstack' + project_name: 'mec' + +The parameter auth_url points to the keystone service authorization URL of the +remote OpenStack site. + +Default VIM configuration +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default vim needs to be registered. This is required when the optional +argument -vim-id is not provided during mea-create. Refer to steps described in +`manual installation`_ to register default vim. + +.. _manual installation: https://docs.openstack.org/apmec/latest/install/manual_installation.html#registering-default-vim + +Deploying a new MEA on registered VIM +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + $ apmec mea-create --description 'Openwrt MEA on Site1' --mead-id c3cbf0c0-a492-49e3-9541-945e49e7ed7e --vim-name Site1 openwrt_MEA + Created a new mea: + +----------------+--------------------------------------+ + | Field | Value | + +----------------+--------------------------------------+ + | description | Openwrt tosca template | + | id | 159ed8a5-a5a7-4f7a-be50-0f5f86603e3a | + | instance_id | 7b4ab046-d977-4781-9f0c-1ee9dcce01c6 | + | mgmt_url | | + | name | openwrt_MEA | + | placement_attr | {"vim_name": "Site1"} | + | status | PENDING_CREATE | + | tenant_id | 8907bae480c0414d98c3519acbad1b06 | + | vim_id | 3f3c51c5-8bda-4bd3-adb3-5ae62eae65c3 | + | mead_id | c3cbf0c0-a492-49e3-9541-945e49e7ed7e | + +----------------+--------------------------------------+ + +The --vim-id/--vim-name argument is optional during mea-create. If +--vim-id/--vim-name is not specified, the default vim will +be used to deploy MEA on the default site. We can create default vim +by specifying --is-default option with vim-register command. + +User can optionally provide --vim-region-name during mea-create to deploy the +MEA in a specify region within that VIM. + +Updating a VIM +~~~~~~~~~~~~~~ + +Apmec allows for updating VIM authorization parameters such as 'username', +'password' and 'project_name' and 'ids' after it has been registered. To update +'username' and password' for a given VIM user within Apmec: + +:: + + $apmec vim-update VIM0 --config-file update.yaml + +update.yaml in above command will contain: + +:: + + username: 'new_user' + password: 'new_pw' + +Note that 'auth_url' parameter of a VIM is not allowed to be updated as +'auth_url' uniquely identifies a given 'vim' resource. + + +Deleting a VIM +~~~~~~~~~~~~~~ +To delete a VIM : + +:: + + $ apmec vim-delete VIM1 + Deleted vim: VIM1 + +Features +~~~~~~~~ +* VIMs are shared across tenants -- As an admin operator, the user can register + a VIM once and allow tenants to deploy MEAs on the registered VIM. +* Pluggable driver module framework allowing Apmec to interact with multiple + VIM types. +* Compatible for OpenStack versions starting from Kilo. +* Supports keystone versions v2.0 and v3. + +Limitations +~~~~~~~~~~~ +* MEAs of all users currently land in the 'mec' project that is specified + during VIM registration. +* Fernet keys for password encryption and decryption is stored on file systems. + This is a limitation when multiple servers are serving behind a load balancer + server and the keys need to be synced across apmec server systems. diff --git a/doc/source/user/scale_usage_guide.rst b/doc/source/user/scale_usage_guide.rst new file mode 100644 index 0000000..5f0d339 --- /dev/null +++ b/doc/source/user/scale_usage_guide.rst @@ -0,0 +1,186 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-scale: + +=========== +MEA scaling +=========== + +MEA resources in terms of CPU core and memory are hardcoded in MEAD template +through image flavor settings. This result in either provisioning MEA for +typical usage or for maximum usage. The former leads to service disruption +when load exceeds provisioned capacity. And the later leads to underutilized +resources and waste during normal system load. So apmec provides a +way to seamlessly scale the number of MEAs on demand either manually or +automatically. + + +TOSCA schema for scaling policy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Apmec defines TOSCA schema for the scaling policy as given below: + +.. code-block:: yaml + + tosca.policies.apmec.Scaling: + derived_from: tosca.policies.Scaling + description: Defines policy for scaling the given targets. + properties: + increment: + type: integer + required: true + description: Number of nodes to add or remove during the scale out/in. + targets: + type: list + entry_schema: + type: string + required: true + description: List of Scaling nodes. + min_instances: + type: integer + required: true + description: Minimum number of instances to scale in. + max_instances: + type: integer + required: true + description: Maximum number of instances to scale out. + default_instances: + type: integer + required: true + description: Initial number of instances. + cooldown: + type: integer + required: false + default: 120 + description: Wait time (in seconds) between consecutive scaling + operations. During the cooldown period, scaling action will be ignored + + +Sample TOSCA with scaling policy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Following TOSCA snippet shows the scaling policy used in MEAD, in which vdu1 +and vdu2 are already defined VDUs. + +.. code-block:: yaml + + policies: + + sp1: + + type: tosca.policies.apmec.Scaling + + description: Simple VDU scaling + + properties: + min_instances: 1 + + max_instances: 3 + + default_instances: 2 + + increment: 1 + + targets: [vdu1, vdu2] + + +Deploying scaling TOSCA template using Apmec +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once OpenStack/Devstack along with Apmec has been successfully installed, +deploy a sample scaling template from location given +below: +https://github.com/openstack/apmec/tree/master/samples/tosca-templates/mead + +Refer the 'Getting Started' link below on how to create a MEAD and deploy a +MEA: +https://docs.openstack.org/apmec/latest/install/getting_started.html + + +How to scale MEA using CLI +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Apmec provides following CLI for scaling. + +.. code-block::console + +**apmec mea-scale --mea-id ** + **--mea-name ** + **--scaling-policy-name ** + **--scaling-type ** + +Here, + +* scaling-policy-name - Policy name defined in scaling MEAD +* scaling-type - in or out +* mea-id - scaling MEA id +* mea-name - scaling MEA name + +For example, to scale-out policy 'sp1' defined above, this cli could be used +as below: + +.. code-block::console + +**apmec mea-scale --mea-name sample-mea** + **--scaling-policy-name sp1** + **--scaling-type out** + +How to scale MEA using REST API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Apmec provides following REST API for scaling. + +**POST on v1.0/meas//actions** + +with body + +.. code-block::json + +**{"scale": { "type": "", "policy" : ""}}** + +Here, + +* scaling-policy-name - Policy name defined in scaling MEAD +* scaling-type - in or out +* mea-id - scaling MEA id + +Response http status codes: + +* 202 - Accepted the request for doing the scaling operation +* 404 - Bad request, if given scaling-policy-name and type are invalid +* 500 - Internal server error, on scaling operation failed due to an error +* 401 - Unauthorized + +MEA state transitions during scaling operation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +During the scaling operation, the MEA will be moving in below state +transformations: + +* **ACTIVE -> PENDING_SCALE_IN -> ACTIVE** +* **ACTIVE -> PENDING_SCALE_IN -> ERROR** +* **ACTIVE -> PENDING_SCALE_OUT -> ACTIVE** +* **ACTIVE -> PENDING_SCALE_OUT -> ERROR** + + +Limitations +~~~~~~~~~~~ + +Following features are not supported with scaling: + +* Auto-scaling feature is supported only with alarm monitors and it does + not work with other monitors such as ping, http_ping. +* When MEA is modelled with scaling requirement in MEAD, any config + management requirement in MEAD is not supported. +* Scaling feature does not support to selectively choose the VDU as part + of scaling. diff --git a/etc/apmec/README.txt b/etc/apmec/README.txt new file mode 100644 index 0000000..4547fe5 --- /dev/null +++ b/etc/apmec/README.txt @@ -0,0 +1,9 @@ +To generate the sample apmec configuration files, run the following +command from the top level of the apmec directory: + +tox -e config-gen + +If a 'tox' environment is unavailable, then you can run the following script +instead to generate the configuration files: + +./tools/generate_config_file_sample.sh diff --git a/etc/apmec/api-paste.ini b/etc/apmec/api-paste.ini new file mode 100644 index 0000000..62e0729 --- /dev/null +++ b/etc/apmec/api-paste.ini @@ -0,0 +1,33 @@ +[composite:apmec] +use = egg:Paste#urlmap +/: apmecversions +/v1.0: apmecapi_v1_0 + +[composite:apmecapi_v1_0] +use = call:apmec.auth:pipeline_factory +noauth = request_id catch_errors extensions apmecapiapp_v1_0 +keystone = request_id catch_errors alarm_receiver authtoken keystonecontext extensions apmecapiapp_v1_0 + +[filter:request_id] +paste.filter_factory = oslo_middleware:RequestId.factory + +[filter:catch_errors] +paste.filter_factory = oslo_middleware:CatchErrors.factory + +[filter:alarm_receiver] +paste.filter_factory = apmec.alarm_receiver:AlarmReceiver.factory + +[filter:keystonecontext] +paste.filter_factory = apmec.auth:ApmecKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = apmec.api.extensions:extension_middleware_factory + +[app:apmecversions] +paste.app_factory = apmec.api.versions:Versions.factory + +[app:apmecapiapp_v1_0] +paste.app_factory = apmec.api.v1.router:APIRouter.factory diff --git a/etc/apmec/policy.json b/etc/apmec/policy.json new file mode 100644 index 0000000..b38bc69 --- /dev/null +++ b/etc/apmec/policy.json @@ -0,0 +1,10 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:vims:shared=True", + "default": "rule:admin_or_owner", + + "get_vim": "rule:admin_or_owner or rule:shared" +} diff --git a/etc/apmec/rootwrap.conf b/etc/apmec/rootwrap.conf new file mode 100644 index 0000000..cccf321 --- /dev/null +++ b/etc/apmec/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for apmec-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/apmec/rootwrap.d,/usr/share/apmec/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/etc/apmec/rootwrap.d/apmec.filters b/etc/apmec/rootwrap.d/apmec.filters new file mode 100644 index 0000000..a927055 --- /dev/null +++ b/etc/apmec/rootwrap.d/apmec.filters @@ -0,0 +1,10 @@ +# apmec-rootwrap command filters for nodes on which apmec is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + diff --git a/etc/config-generator.conf b/etc/config-generator.conf new file mode 100644 index 0000000..daf02fa --- /dev/null +++ b/etc/config-generator.conf @@ -0,0 +1,27 @@ +[DEFAULT] +output_file = etc/apmec/apmec.conf.sample +wrap_width = 79 +namespace = apmec.common.config +namespace = apmec.wsgi +namespace = apmec.service +namespace = apmec.meo.meo_plugin +namespace = apmec.meo.drivers.vim.openstack_driver +namespace = apmec.keymgr +namespace = apmec.mem.monitor +namespace = apmec.mem.plugin +namespace = apmec.mem.infra_drivers.openstack.openstack +namespace = apmec.mem.mgmt_drivers.openwrt.openwrt +namespace = apmec.mem.monitor_drivers.http_ping.http_ping +namespace = apmec.mem.monitor_drivers.ping.ping +namespace = apmec.mem.monitor_drivers.ceilometer.ceilometer +namespace = apmec.apmec.policy_actions.autoscaling.autoscaling +namespace = apmec.apmec.policy_actions.respawn.respawn +namespace = apmec.apmec.policy_actions.log.log +namespace = apmec.alarm_receiver +namespace = keystonemiddleware.auth_token +namespace = oslo.middleware +namespace = oslo.messaging +namespace = oslo.db +namespace = oslo.log +namespace = oslo.policy +namespace = oslo.service.service diff --git a/etc/init.d/apmec-server b/etc/init.d/apmec-server new file mode 100644 index 0000000..d99a185 --- /dev/null +++ b/etc/init.d/apmec-server @@ -0,0 +1,68 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: apmec-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: apmec-server +# Description: Provides the Apmec service +### END INIT INFO + +set -e + +PIDFILE=/var/run/apmec/apmec-server.pid +LOGFILE=/var/log/apmec/apmec-server.log + +DAEMON=/usr/bin/apmec-server +DAEMON_ARGS="--log-file=$LOGFILE" +DAEMON_DIR=/var/run + +ENABLED=true + +if test -f /etc/default/apmec-server; then + . /etc/default/apmec-server +fi + +mkdir -p /var/run/apmec +mkdir -p /var/log/apmec + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" +export TMPDIR=/var/lib/apmec/tmp + +if [ ! -x ${DAEMON} ] ; then + exit 0 +fi + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting apmec server" "apmec-server" + start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS + log_end_msg $? + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping apmec server" "apmec-server" + start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} + log_end_msg $? + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + $0 stop + sleep 1 + $0 start + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON apmec-server && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/apmec-server {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..9146fa8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,43 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +pbr!=2.1.0,>=2.0.0 # Apache-2.0 + +Paste>=2.0.2 # MIT +PasteDeploy>=1.5.0 # MIT +Routes>=2.3.1 # MIT +anyjson>=0.3.3 # BSD +Babel!=2.4.0,>=2.3.4 # BSD +eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT +requests>=2.14.2 # Apache-2.0 +keystonemiddleware>=4.17.0 # Apache-2.0 +kombu!=4.0.2,>=4.0.0 # BSD +netaddr>=0.7.18 # BSD +SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT +WebOb>=1.7.1 # MIT +python-heatclient>=1.10.0 # Apache-2.0 +python-keystoneclient>=3.8.0 # Apache-2.0 +alembic>=0.8.10 # MIT +six>=1.9.0 # MIT +stevedore>=1.20.0 # Apache-2.0 +oslo.concurrency>=3.20.0 # Apache-2.0 +oslo.config>=4.6.0 # Apache-2.0 +oslo.context!=2.19.1,>=2.14.0 # Apache-2.0 +oslo.db>=4.27.0 # Apache-2.0 +oslo.log>=3.30.0 # Apache-2.0 +oslo.messaging>=5.29.0 # Apache-2.0 +oslo.middleware>=3.31.0 # Apache-2.0 +oslo.policy>=1.23.0 # Apache-2.0 +oslo.reports>=1.18.0 # Apache-2.0 +oslo.rootwrap>=5.8.0 # Apache-2.0 +oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 +oslo.service>=1.24.0 # Apache-2.0 +oslo.utils>=3.28.0 # Apache-2.0 +openstackdocstheme>=1.17.0 # Apache-2.0 +python-neutronclient>=6.3.0 # Apache-2.0 +python-novaclient>=9.1.0 # Apache-2.0 +cryptography!=2.0,>=1.9 # BSD/Apache-2.0 +paramiko>=2.0.0 # LGPLv2.1+ +pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) +python-mistralclient>=3.1.0 # Apache-2.0 +python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 # Apache-2.0 diff --git a/samples/mistral/workflows/create_mea.yaml b/samples/mistral/workflows/create_mea.yaml new file mode 100644 index 0000000..7bdf81d --- /dev/null +++ b/samples/mistral/workflows/create_mea.yaml @@ -0,0 +1,53 @@ +--- +version: '2.0' + +std.create_mea: + type: direct + + description: | + Create a MEA and waits till MEA is active. + + input: + - body + output: + mea_id: <% $.mea_id %> + vim_id: <% $.vim_id %> + mgmt_url: <% $.mgmt_url %> + status: <% $.status %> + + task-defaults: + on-error: + - delete_mea + + tasks: + create_mea: + description: Request to create a MEA. + action: apmec.create_mea body=<% $.body %> + input: + body: <% $.body %> + publish: + mea_id: <% task(create_mea).result.mea.id %> + vim_id: <% task(create_mea).result.mea.vim_id %> + mgmt_url: <% task(create_mea).result.mea.mgmt_url %> + status: <% task(create_mea).result.mea.status %> + on-success: + - wait_mea_active + + wait_mea_active: + description: Waits till MEA is ACTIVE. + action: apmec.show_mea mea=<% $.mea_id %> + retry: + count: 10 + delay: 10 + break-on: <% $.status = 'ACTIVE' %> + break-on: <% $.status = 'ERROR' %> + continue-on: <% $.status = 'PENDING_CREATE' %> + publish: + mgmt_url: <% task(wait_mea_active).result.mea.mgmt_url %> + status: <% task(wait_mea_active).result.mea.status %> + on-success: + - delete_mea: <% $.status = 'ERROR' %> + + delete_mea: + description: Request to delete a MEA. + action: apmec.delete_mea mea=<% $.mea_id %> diff --git a/samples/mistral/workflows/create_mead.yaml b/samples/mistral/workflows/create_mead.yaml new file mode 100644 index 0000000..0415cf8 --- /dev/null +++ b/samples/mistral/workflows/create_mead.yaml @@ -0,0 +1,22 @@ +--- +version: '2.0' + +std.create_mead: + type: direct + + description: | + Create a MEAD. + + input: + - body + output: + mead_id: <% $.mead_id %> + + tasks: + create_mead: + description: Request to create a MEAD. + action: apmec.create_mead body=<% $.body %> + input: + body: <% $.body %> + publish: + mead_id: <% task(create_mead).result.mead.id %> diff --git a/samples/mistral/workflows/delete_mea.yaml b/samples/mistral/workflows/delete_mea.yaml new file mode 100644 index 0000000..0fcd881 --- /dev/null +++ b/samples/mistral/workflows/delete_mea.yaml @@ -0,0 +1,16 @@ +--- +version: '2.0' + +std.delete_mea: + type: direct + + description: | + Delete a MEA. + + input: + - mea_id + + tasks: + delete_mea: + description: Request to delete a MEA. + action: apmec.delete_mea mea=<% $.mea_id %> diff --git a/samples/mistral/workflows/delete_mead.yaml b/samples/mistral/workflows/delete_mead.yaml new file mode 100644 index 0000000..47c4438 --- /dev/null +++ b/samples/mistral/workflows/delete_mead.yaml @@ -0,0 +1,16 @@ +--- +version: '2.0' + +std.delete_mead: + type: direct + + description: | + Delete a MEAD. + + input: + - mead_id + + tasks: + delete_mead: + description: Request to delete a MEAD. + action: apmec.delete_mead mead=<% $.mead_id %> diff --git a/samples/mistral/workflows/input/create_mea.json b/samples/mistral/workflows/input/create_mea.json new file mode 100644 index 0000000..b7fa429 --- /dev/null +++ b/samples/mistral/workflows/input/create_mea.json @@ -0,0 +1,11 @@ +{ + "body": { + "mea": { + "attributes": {}, + "vim_id": "", + "description": "Sample for apmec.create_mea action", + "mead_id": "dda99d4c-f24d-4550-b104-0958fef427b3", + "name": "apmec-create-mea" + } + } +} diff --git a/samples/mistral/workflows/input/create_mead.json b/samples/mistral/workflows/input/create_mead.json new file mode 100644 index 0000000..af4c91c --- /dev/null +++ b/samples/mistral/workflows/input/create_mead.json @@ -0,0 +1,10 @@ +{ + "body":{ + "mead":{ + "attributes":{ + "mead":"tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0\n\ndescription: Demo example\n\nmetadata:\n template_name: sample-tosca-mead\n\ntopology_template:\n node_templates:\n VDU1:\n type: tosca.nodes.mec.VDU.Apmec\n properties:\n image: cirros-0.3.5-x86_64-disk\n flavor: m1.tiny\n availability_zone: nova\n mgmt_driver: noop\n config: |\n param0: key1\n param1: key2\n\n CP1:\n type: tosca.nodes.mec.CP.Apmec\n properties:\n management: true\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL1\n - virtualBinding:\n node: VDU1\n\n CP2:\n type: tosca.nodes.mec.CP.Apmec\n properties:\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL2\n - virtualBinding:\n node: VDU1\n\n CP3:\n type: tosca.nodes.mec.CP.Apmec\n properties:\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL3\n - virtualBinding:\n node: VDU1\n\n VL1:\n type: tosca.nodes.mec.VL\n properties:\n network_name: net_mgmt\n vendor: Apmec\n\n VL2:\n type: tosca.nodes.mec.VL\n properties:\n network_name: net0\n vendor: Apmec\n\n VL3:\n type: tosca.nodes.mec.VL\n properties:\n network_name: net1\n vendor: Apmec\n" + }, + "name":"apmec-create-mead" + } + } +} diff --git a/samples/mistral/workflows/input/delete_mea.json b/samples/mistral/workflows/input/delete_mea.json new file mode 100644 index 0000000..a3e74dd --- /dev/null +++ b/samples/mistral/workflows/input/delete_mea.json @@ -0,0 +1,3 @@ +{ + "mea_id": "d7606ee7-053a-4064-bb67-501ac704a6ed" +} diff --git a/samples/mistral/workflows/input/delete_mead.json b/samples/mistral/workflows/input/delete_mead.json new file mode 100644 index 0000000..844d028 --- /dev/null +++ b/samples/mistral/workflows/input/delete_mead.json @@ -0,0 +1,3 @@ +{ + "mead_id": "ad78e292-6b0b-47b5-80f4-3abe9e9c7e12" +} diff --git a/samples/tosca-templates/evaluation/sample-tosca-mead1.yaml b/samples/tosca-templates/evaluation/sample-tosca-mead1.yaml new file mode 100644 index 0000000..403417e --- /dev/null +++ b/samples/tosca-templates/evaluation/sample-tosca-mead1.yaml @@ -0,0 +1,88 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo Sample MEAD1 +node_types: + tosca.nodes.mec.MEA1: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA1 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP31: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP32: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + FIP31: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.104 + requirements: + - link: + node: CP31 + + FIP32: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.105 + requirements: + - link: + node: CP32 + diff --git a/samples/tosca-templates/evaluation/sample-tosca-mead2.yaml b/samples/tosca-templates/evaluation/sample-tosca-mead2.yaml new file mode 100644 index 0000000..e8e72de --- /dev/null +++ b/samples/tosca-templates/evaluation/sample-tosca-mead2.yaml @@ -0,0 +1,80 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +node_types: + tosca.nodes.mec.MEA2: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA2 + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP41: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP42: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + FIP41: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP41 + + FIP42: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP42 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-10app-apmec.yaml b/samples/tosca-templates/evaluation/sample2-tosca-10app-apmec.yaml new file mode 100644 index 0000000..1b4247e --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-10app-apmec.yaml @@ -0,0 +1,355 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example MEAD4 +node_types: + tosca.nodes.mec.MEA4: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA4 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-10app-tacker.yaml b/samples/tosca-templates/evaluation/sample2-tosca-10app-tacker.yaml new file mode 100644 index 0000000..876f2e3 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-10app-tacker.yaml @@ -0,0 +1,355 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-15app-apmec.yaml b/samples/tosca-templates/evaluation/sample2-tosca-15app-apmec.yaml new file mode 100644 index 0000000..ee0493f --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-15app-apmec.yaml @@ -0,0 +1,524 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example MEAD4 +node_types: + tosca.nodes.mec.MEA4: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA4 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 \ No newline at end of file diff --git a/samples/tosca-templates/evaluation/sample2-tosca-15app-tacker.yaml b/samples/tosca-templates/evaluation/sample2-tosca-15app-tacker.yaml new file mode 100644 index 0000000..354916e --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-15app-tacker.yaml @@ -0,0 +1,644 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VDU16: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP216: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU16 + + VDU17: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP217: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU17 + + VDU18: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP218: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU18 + VDU19: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP219: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU19 + + VDU20: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP220: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU20 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-20app-apmec.yaml b/samples/tosca-templates/evaluation/sample2-tosca-20app-apmec.yaml new file mode 100644 index 0000000..407fc05 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-20app-apmec.yaml @@ -0,0 +1,689 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example MEAD4 +node_types: + tosca.nodes.mec.MEA4: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA4 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VDU16: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP216: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU16 + + VDU17: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP217: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU17 + + VDU18: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP218: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU18 + VDU19: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP219: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU19 + + VDU20: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP220: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU20 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 + + FIP216: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP216 + + FIP217: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP217 + + FIP218: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP218 + + FIP219: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP219 + + FIP220: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP220 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-20app-tacker.yaml b/samples/tosca-templates/evaluation/sample2-tosca-20app-tacker.yaml new file mode 100644 index 0000000..0676f22 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-20app-tacker.yaml @@ -0,0 +1,689 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP28: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VDU16: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP216: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU16 + + VDU17: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP217: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU17 + + VDU18: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + + CP218: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU18 + VDU19: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP219: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU19 + + VDU20: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP220: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU20 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 + + FIP216: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP216 + + FIP217: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP217 + + FIP218: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP218 + + FIP219: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP219 + + FIP220: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP220 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-50app-tacker.yaml b/samples/tosca-templates/evaluation/sample2-tosca-50app-tacker.yaml new file mode 100644 index 0000000..ffe42cc --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-50app-tacker.yaml @@ -0,0 +1,1595 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1R + param1: key2 + + CP21: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP28: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VDU16: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP216: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU16 + + VDU17: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP217: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU17 + + VDU18: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP218: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU18 + VDU19: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP219: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU19 + + VDU20: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP220: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU20 + + VDU21: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1R + param1: key2 + + CP221: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU21 + + VDU22: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP222: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU22 + + VDU23: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP223: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU23 + VDU24: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP224: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU24 + + VDU25: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP225: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU25 + + VDU26: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP226: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU26 + + VDU27: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP227: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU27 + + VDU28: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP228: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU28 + VDU29: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP229: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU29 + + VDU30: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP230: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU30 + + VDU31: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP231: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU31 + + VDU32: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP232: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU32 + + VDU33: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP233: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU33 + VDU34: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP234: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU34 + + VDU35: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP235: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU35 + + VDU36: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP236: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU36 + + VDU37: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP237: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU37 + + VDU38: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP238: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU38 + VDU39: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP239: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU39 + + VDU40: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP240: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU40 + + VDU41: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP241: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU41 + + VDU42: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP242: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU42 + + VDU43: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP243: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU43 + VDU44: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP244: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU44 + + VDU45: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP245: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU45 + + VDU46: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP246: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU46 + + VDU47: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP247: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU47 + + VDU48: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP248: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU48 + VDU49: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP249: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU49 + + VDU50: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP250: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU50 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 + + FIP216: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP216 + + FIP217: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP217 + + FIP218: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP218 + + FIP219: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP219 + + FIP220: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP220 + + FIP221: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.140 + requirements: + - link: + node: CP221 + + FIP222: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.141 + requirements: + - link: + node: CP222 + + FIP223: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.142 + requirements: + - link: + node: CP223 + + FIP224: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP224 + + FIP225: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP225 + + FIP226: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP226 + + FIP227: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP227 + + FIP228: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP228 + + FIP229: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP229 + + FIP230: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP230 + + FIP231: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP231 + + FIP232: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP232 + + FIP233: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP233 + + FIP234: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP234 + + FIP235: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP235 + + FIP236: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP236 + + FIP237: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP237 + + FIP238: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP238 + + FIP239: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP239 + + FIP240: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP240 + FIP241: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.140 + requirements: + - link: + node: CP241 + + FIP242: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.141 + requirements: + - link: + node: CP242 + + FIP243: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.142 + requirements: + - link: + node: CP243 + + FIP244: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.143 + requirements: + - link: + node: CP244 + + FIP245: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.144 + requirements: + - link: + node: CP245 + + FIP246: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.145 + requirements: + - link: + node: CP246 + + FIP247: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.146 + requirements: + - link: + node: CP247 + + FIP248: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.147 + requirements: + - link: + node: CP248 + + FIP249: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.148 + requirements: + - link: + node: CP249 + + FIP250: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.149 + requirements: + - link: + node: CP250 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-7app-apmec.yaml b/samples/tosca-templates/evaluation/sample2-tosca-7app-apmec.yaml new file mode 100644 index 0000000..5f2d0a6 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-7app-apmec.yaml @@ -0,0 +1,259 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example MEAD4 +node_types: + tosca.nodes.mec.MEA4: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA4 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-7app-tacker.yaml b/samples/tosca-templates/evaluation/sample2-tosca-7app-tacker.yaml new file mode 100644 index 0000000..f767445 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-7app-tacker.yaml @@ -0,0 +1,260 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-nsd-mec.yaml b/samples/tosca-templates/evaluation/sample2-tosca-nsd-mec.yaml new file mode 100644 index 0000000..959b354 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-nsd-mec.yaml @@ -0,0 +1,19 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Import VNDs(already on-boarded) without param +imports: + - vnfd1 + - vnfd2 + - vnfd3 + +topology_template: + node_templates: + VNF1: + type: tosca.nodes.nfv.VNF1 + + VNF2: + type: tosca.nodes.nfv.VNF2 + + VNF3: + type: tosca.nodes.nfv.VNF3 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-nsd.yaml b/samples/tosca-templates/evaluation/sample2-tosca-nsd.yaml new file mode 100644 index 0000000..5d1fa4e --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-nsd.yaml @@ -0,0 +1,23 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Import VNDs(already on-boarded) without param +imports: + - vnfd1 + - vnfd2 + - vnfd3 + - vnfd4 + +topology_template: + node_templates: + VNF1: + type: tosca.nodes.nfv.VNF1 + + VNF2: + type: tosca.nodes.nfv.VNF2 + + VNF3: + type: tosca.nodes.nfv.VNF3 + + VNF4: + type: tosca.nodes.nfv.VNF4 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-vnfd-3app.yaml b/samples/tosca-templates/evaluation/sample2-tosca-vnfd-3app.yaml new file mode 100644 index 0000000..e6fc673 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-vnfd-3app.yaml @@ -0,0 +1,89 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD4 +node_types: + tosca.nodes.nfv.VNF4: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF4 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP41: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP42: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP41: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP41 + + FIP42: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP42 diff --git a/samples/tosca-templates/evaluation/sample2-tosca-vnfd1.yaml b/samples/tosca-templates/evaluation/sample2-tosca-vnfd1.yaml new file mode 100644 index 0000000..d8db350 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-vnfd1.yaml @@ -0,0 +1,55 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo Sample VNFD1 +node_types: + tosca.nodes.nfv.VNF1: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF1 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + CP11: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + FIP11: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.100 + requirements: + - link: + node: CP11 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-vnfd2.yaml b/samples/tosca-templates/evaluation/sample2-tosca-vnfd2.yaml new file mode 100644 index 0000000..114d4c6 --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-vnfd2.yaml @@ -0,0 +1,56 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD2 +node_types: + tosca.nodes.nfv.VNF2: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF2 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP12: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP12: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.102 + requirements: + - link: + node: CP12 + diff --git a/samples/tosca-templates/evaluation/sample2-tosca-vnfd3.yaml b/samples/tosca-templates/evaluation/sample2-tosca-vnfd3.yaml new file mode 100644 index 0000000..216590e --- /dev/null +++ b/samples/tosca-templates/evaluation/sample2-tosca-vnfd3.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: Demo example VNFD3 +node_types: + tosca.nodes.nfv.VNF3: + requirements: + - virtualLink1: + type: tosca.nodes.nfv.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.nfv.VNF3 + + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + properties: + flavor: m1.small + image: ubuntu-xenial + mgmt_driver: noop + availability_zone: nova + config_drive: true + key_name: mykey + config: | + param0: key1 + param1: key2 + + CP13: + type: tosca.nodes.nfv.CP.Tacker + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net0 + vendor: Tacker + + + FIP13: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.104 + requirements: + - link: + node: CP13 + diff --git a/samples/tosca-templates/evaluation/samples2-tosca-50app-apmec.yaml b/samples/tosca-templates/evaluation/samples2-tosca-50app-apmec.yaml new file mode 100644 index 0000000..5705be7 --- /dev/null +++ b/samples/tosca-templates/evaluation/samples2-tosca-50app-apmec.yaml @@ -0,0 +1,1594 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example MEAD4 +node_types: + tosca.nodes.mec.MEA4: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA4 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + VDU4: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP24: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU4 + + VDU5: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP25: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU5 + + VDU6: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP26: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU6 + + VDU7: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP27: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU7 + + VDU8: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP28: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU8 + VDU9: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP29: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU9 + + VDU10: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP210: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU10 + + VDU11: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP211: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU11 + + VDU12: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP212: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU12 + + VDU13: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP213: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU13 + VDU14: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP214: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU14 + + VDU15: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP215: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU15 + + VDU16: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP216: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU16 + + VDU17: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP217: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU17 + + VDU18: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP218: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU18 + VDU19: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP219: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU19 + + VDU20: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP220: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU20 + + VDU21: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP221: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU21 + + VDU22: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP222: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU22 + + VDU23: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP223: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU23 + VDU24: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP224: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU24 + + VDU25: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP225: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU25 + + VDU26: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP226: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU26 + + VDU27: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP227: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU27 + + VDU28: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP228: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU28 + VDU29: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP229: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU29 + + VDU30: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP230: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU30 + + VDU31: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP231: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU31 + + VDU32: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP232: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU32 + + VDU33: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP233: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU33 + VDU34: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP234: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU34 + + VDU35: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP235: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU35 + + VDU36: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP236: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU36 + + VDU37: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP237: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU37 + + VDU38: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP238: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU38 + VDU39: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP239: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU39 + + VDU40: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP240: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU40 + + VDU41: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP241: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU41 + + VDU42: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP242: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU42 + + VDU43: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP243: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU43 + VDU44: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP244: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU44 + + VDU45: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP245: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU45 + + VDU46: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP246: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU46 + + VDU47: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP247: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU47 + + VDU48: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP248: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU48 + VDU49: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP249: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU49 + + VDU50: + type: tosca.nodes.mec.VDU.Apmec + properties: + flavor: m1.tiny + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + config: | + param0: key1 + param1: key2 + + CP250: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU50 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + + FIP21: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.120 + requirements: + - link: + node: CP21 + + FIP22: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.121 + requirements: + - link: + node: CP22 + + FIP23: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.122 + requirements: + - link: + node: CP23 + + FIP24: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP24 + + FIP25: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP25 + + FIP26: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP26 + + FIP27: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP27 + + FIP28: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP28 + + FIP29: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP29 + + FIP210: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP210 + + FIP211: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP211 + + FIP212: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP212 + + FIP213: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP213 + + FIP214: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP214 + + FIP215: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP215 + + FIP216: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP216 + + FIP217: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP217 + + FIP218: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP218 + + FIP219: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP219 + + FIP220: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP220 + FIP221: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.140 + requirements: + - link: + node: CP221 + + FIP222: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.141 + requirements: + - link: + node: CP222 + + FIP223: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.142 + requirements: + - link: + node: CP223 + + FIP224: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.123 + requirements: + - link: + node: CP224 + + FIP225: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.124 + requirements: + - link: + node: CP225 + + FIP226: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.125 + requirements: + - link: + node: CP226 + + FIP227: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.126 + requirements: + - link: + node: CP227 + + FIP228: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.127 + requirements: + - link: + node: CP228 + + FIP229: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.128 + requirements: + - link: + node: CP229 + + FIP230: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.129 + requirements: + - link: + node: CP230 + + FIP231: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.130 + requirements: + - link: + node: CP231 + + FIP232: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.131 + requirements: + - link: + node: CP232 + + FIP233: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.132 + requirements: + - link: + node: CP233 + + FIP234: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.133 + requirements: + - link: + node: CP234 + + FIP235: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.134 + requirements: + - link: + node: CP235 + + FIP236: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.135 + requirements: + - link: + node: CP236 + + FIP237: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.136 + requirements: + - link: + node: CP237 + + FIP238: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.137 + requirements: + - link: + node: CP238 + + FIP239: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.138 + requirements: + - link: + node: CP239 + + FIP240: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.139 + requirements: + - link: + node: CP240 + FIP241: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.140 + requirements: + - link: + node: CP241 + + FIP242: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.141 + requirements: + - link: + node: CP242 + + FIP243: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.142 + requirements: + - link: + node: CP243 + + FIP244: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.143 + requirements: + - link: + node: CP244 + + FIP245: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.144 + requirements: + - link: + node: CP245 + + FIP246: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.145 + requirements: + - link: + node: CP246 + + FIP247: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.146 + requirements: + - link: + node: CP247 + + FIP248: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.147 + requirements: + - link: + node: CP248 + + FIP249: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.148 + requirements: + - link: + node: CP249 + + FIP250: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: external + floating_ip_address: 203.203.203.149 + requirements: + - link: + node: CP250 diff --git a/samples/tosca-templates/evaluation/test_simple_mesd.yaml b/samples/tosca-templates/evaluation/test_simple_mesd.yaml new file mode 100644 index 0000000..b27acfc --- /dev/null +++ b/samples/tosca-templates/evaluation/test_simple_mesd.yaml @@ -0,0 +1,15 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Import MEADs(already on-boarded) and NSD (already on-boarded) with input parameters +imports: + meads: + - mead1 + nsds: + - nsd + +topology_template: + node_templates: + MEA4: + type: tosca.nodes.mec.MEA4 + + diff --git a/samples/tosca-templates/mead/test_tosca_meac.yaml b/samples/tosca-templates/mead/test_tosca_meac.yaml new file mode 100644 index 0000000..4a1b904 --- /dev/null +++ b/samples/tosca-templates/mead/test_tosca_meac.yaml @@ -0,0 +1,40 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +metadata: + template_name: sample-tosca-mead-for-meac + +topology_template: + node_templates: + firewall_meac: + type: tosca.nodes.mec.MEAC.Apmec + requirements: + - host: VDU1 + interfaces: + Standard: + create: install_meac.sh + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: fedora-software-config + flavor: m1.small + mgmt_driver: noop + key_name: stack_key + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/test_tosca_meac_multiple_servers.yaml b/samples/tosca-templates/mead/test_tosca_meac_multiple_servers.yaml new file mode 100644 index 0000000..2a1ac7c --- /dev/null +++ b/samples/tosca-templates/mead/test_tosca_meac_multiple_servers.yaml @@ -0,0 +1,63 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +metadata: + template_name: sample-tosca-mead-for-meac + +topology_template: + node_templates: + firewall_meac: + type: tosca.nodes.mec.MEAC.Apmec + requirements: + - host: VDU1 + - host: VDU2 + interfaces: + Standard: + create: /home/bharatht/install_meac.sh + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: fedora-software-config + flavor: m1.small + mgmt_driver: noop + key_name: stack_key + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: fedora-software-config + flavor: m1.small + mgmt_driver: noop + key_name: stack_key + config: | + param0: key1 + param1: key2 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: private + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-config-openwrt-vrouter.yaml b/samples/tosca-templates/mead/tosca-config-openwrt-vrouter.yaml new file mode 100644 index 0000000..0c41061 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-config-openwrt-vrouter.yaml @@ -0,0 +1,46 @@ +vdus: + VDU1: + config: + network: | + package network + + config interface 'loopback' + option ifname 'lo' + option proto 'static' + option ipaddr '127.0.0.1' + option netmask '255.0.0.0' + + config interface 'mgmt_net' + option ifname 'eth0' + option proto 'dhcp' + + config interface 'net1' + option ifname 'eth1' + option proto 'dhcp' + + config interface 'net2' + option ifname 'eth2' + option proto 'dhcp' + + firewall: | + package firewall + + config defaults + option syn_flood '1' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'ACCEPT' + + config zone + option name 'mgmt_net' + option network 'mgmt_net' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'REJECT' + + config zone + option name 'lan' + list network 'net1 net2' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'ACCEPT' diff --git a/samples/tosca-templates/mead/tosca-config-openwrt-with-firewall.yaml b/samples/tosca-templates/mead/tosca-config-openwrt-with-firewall.yaml new file mode 100644 index 0000000..42f071b --- /dev/null +++ b/samples/tosca-templates/mead/tosca-config-openwrt-with-firewall.yaml @@ -0,0 +1,97 @@ +vdus: + VDU1: + config: + firewall: | + package firewall + + config defaults + option syn_flood '1' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'REJECT' + + config zone + option name 'lan' + list network 'lan' + option input 'ACCEPT' + option output 'ACCEPT' + option forward 'ACCEPT' + + config zone + option name 'wan' + list network 'wan' + list network 'wan6' + option input 'REJECT' + option output 'ACCEPT' + option forward 'REJECT' + option masq '1' + option mtu_fix '1' + + config forwarding + option src 'lan' + option dest 'wan' + + config rule + option name 'Allow-DHCP-Renew' + option src 'wan' + option proto 'udp' + option dest_port '68' + option target 'ACCEPT' + option family 'ipv4' + + config rule + option name 'Allow-Ping' + option src 'wan' + option proto 'icmp' + option icmp_type 'echo-request' + option family 'ipv4' + option target 'ACCEPT' + + config rule + option name 'Allow-IGMP' + option src 'wan' + option proto 'igmp' + option family 'ipv4' + option target 'ACCEPT' + + config rule + option name 'Allow-DHCPv6' + option src 'wan' + option proto 'udp' + option src_ip 'fe80::/10' + option src_port '547' + option dest_ip 'fe80::/10' + option dest_port '546' + option family 'ipv6' + option target 'ACCEPT' + + config rule + option name 'Allow-MLD' + option src 'wan' + option proto 'icmp' + option src_ip 'fe80::/10' + list icmp_type '130/0' + list icmp_type '131/0' + list icmp_type '132/0' + list icmp_type '143/0' + option family 'ipv6' + option target 'ACCEPT' + + config rule + option name 'Allow-ICMPv6-Input' + option src 'wan' + option proto 'icmp' + list icmp_type 'echo-request' + list icmp_type 'echo-reply' + list icmp_type 'destination-unreachable' + list icmp_type 'packet-too-big' + list icmp_type 'time-exceeded' + list icmp_type 'bad-header' + list icmp_type 'unknown-header-type' + list icmp_type 'router-solicitation' + list icmp_type 'neighbour-solicitation' + list icmp_type 'router-advertisement' + list icmp_type 'neighbour-advertisement' + option limit '190/sec' + option family 'ipv6' + option target 'REJECT' diff --git a/samples/tosca-templates/mead/tosca-mead-alarm-multi-actions.yaml b/samples/tosca-templates/mead/tosca-mead-alarm-multi-actions.yaml new file mode 100644 index 0000000..15814c3 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-alarm-multi-actions.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: [respawn, log] diff --git a/samples/tosca-templates/mead/tosca-mead-alarm-respawn.yaml b/samples/tosca-templates/mead/tosca-mead-alarm-respawn.yaml new file mode 100644 index 0000000..7090eb1 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-alarm-respawn.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: VDU1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - vdu1_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_respawning: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: VDU1 + action: [respawn] diff --git a/samples/tosca-templates/mead/tosca-mead-alarm-scale.yaml b/samples/tosca-templates/mead/tosca-mead-alarm-scale.yaml new file mode 100644 index 0000000..9f01aaa --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-alarm-scale.yaml @@ -0,0 +1,106 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VDU2: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + metadata: {metering.mea: SG1} + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1,VDU2] + properties: + increment: 1 + cooldown: 120 + min_instances: 1 + max_instances: 3 + default_instances: 2 + + - vdu_cpu_usage_monitoring_policy: + type: tosca.policies.apmec.Alarming + triggers: + vdu_hcpu_usage_scaling_out: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 50 + constraint: utilization greater_than 50% + period: 600 + evaluations: 1 + method: average + comparison_operator: gt + metadata: SG1 + action: [SP1] + + vdu_lcpu_usage_scaling_in: + event_type: + type: tosca.events.resource.utilization + implementation: ceilometer + meter_name: cpu_util + condition: + threshold: 10 + constraint: utilization less_than 10% + period: 600 + evaluations: 1 + method: average + comparison_operator: lt + metadata: SG1 + action: [SP1] diff --git a/samples/tosca-templates/mead/tosca-mead-block-attach.yaml b/samples/tosca-templates/mead/tosca-mead-block-attach.yaml new file mode 100644 index 0000000..498bc41 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-block-attach.yaml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VB1: + type: tosca.nodes.BlockStorage.Apmec + properties: + size: 1 GB + image: cirros-0.3.5-x86_64-disk + + CB1: + type: tosca.nodes.BlockStorageAttachment + properties: + location: /dev/vdb + requirements: + - virtualBinding: + node: VDU1 + - virtualAttachment: + node: VB1 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-cpu-dedicate.yaml b/samples/tosca-templates/mead/tosca-mead-cpu-dedicate.yaml new file mode 100644 index 0000000..3866d52 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-cpu-dedicate.yaml @@ -0,0 +1,38 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Dedicated CPU example + +metadata: + template_name: sample-tosca-mead-cpu-dedicate + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + cpu_allocation: + cpu_affinity: dedicated + properties: + image: cirros-0.3.5-x86_64-disk + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-hello-world.yaml b/samples/tosca-templates/mead/tosca-mead-hello-world.yaml new file mode 100644 index 0000000..5bd8284 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-hello-world.yaml @@ -0,0 +1,76 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-http-monitor.yaml b/samples/tosca-templates/mead/tosca-mead-http-monitor.yaml new file mode 100644 index 0000000..83dba5b --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-http-monitor.yaml @@ -0,0 +1,84 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 2 GB + disk_size: 20 GB + properties: + image: ubuntu + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: http_ping + parameters: + retry: 5 + timeout: 10 + port: 8000 + actions: + failure: respawn + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-hugepages.yaml b/samples/tosca-templates/mead/tosca-mead-hugepages.yaml new file mode 100644 index 0000000..4b92063 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-hugepages.yaml @@ -0,0 +1,37 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Huge Pages example + +metadata: + template_name: sample-tosca-mead-hugepages + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + mem_page_size: large + properties: + image: cirros-0.3.5-x86_64-disk + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-image.yaml b/samples/tosca-templates/mead/tosca-mead-image.yaml new file mode 100644 index 0000000..14cc24e --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-image.yaml @@ -0,0 +1,79 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example with auto image creation + +metadata: + template_name: sample-tosca-mead-image + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + artifacts: + MEAImage: + type: tosca.artifacts.Deployment.Image.VM + file: http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.tar.gz + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-keyname.yaml b/samples/tosca-templates/mead/tosca-mead-keyname.yaml new file mode 100644 index 0000000..769e424 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-keyname.yaml @@ -0,0 +1,43 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example with key_name + +metadata: + template_name: sample-tosca-mead-keyname + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + key_name: userKey + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-mac-ip.yaml b/samples/tosca-templates/mead/tosca-mead-mac-ip.yaml new file mode 100644 index 0000000..6775ec9 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-mac-ip.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example with key_name + +metadata: + template_name: sample-tosca-mead-keyname + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + key_name: userKey + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + mac_address: 6c:40:08:a0:de:0a + ip_address: 10.10.1.12 + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-monitor-multi-vdu.yaml b/samples/tosca-templates/mead/tosca-mead-monitor-multi-vdu.yaml new file mode 100644 index 0000000..91fb745 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-monitor-multi-vdu.yaml @@ -0,0 +1,193 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Monitoring for multiple vdus + +metadata: + template_name: tosca-mead-monitoir-multi-vdu + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + parameters: + monitoring_delay: 45 + count: 3 + interval: 1 + timeout: 2 + actions: + failure: respawn + + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + parameters: + monitoring_delay: 45 + count: 3 + interval: 1 + timeout: 2 + actions: + failure: respawn + + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + parameters: + monitoring_delay: 45 + count: 3 + interval: 1 + timeout: 2 + actions: + failure: respawn + + CP31: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + + CP32: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU3 + + CP33: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU3 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-monitor.yaml b/samples/tosca-templates/mead/tosca-mead-monitor.yaml new file mode 100644 index 0000000..4e157fd --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-monitor.yaml @@ -0,0 +1,85 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + monitoring_policy: + name: ping + parameters: + monitoring_delay: 45 + count: 3 + interval: 1 + timeout: 2 + actions: + failure: respawn + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-multi-vdu.yaml b/samples/tosca-templates/mead/tosca-mead-multi-vdu.yaml new file mode 100644 index 0000000..eaa1e40 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-multi-vdu.yaml @@ -0,0 +1,164 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP13: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.medium + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + CP22: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU2 + + CP23: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU2 + + VDU3: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP31: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU3 + + CP32: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU3 + + CP33: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU3 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-network.yaml b/samples/tosca-templates/mead/tosca-mead-network.yaml new file mode 100644 index 0000000..e8e760d --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-network.yaml @@ -0,0 +1,78 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo MEAD with custom network details. + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: custom_net0 + vendor: Apmec + ip_version: 4 + cidr: '20.0.0.0/24' + start_ip: '20.0.0.50' + end_ip: '20.0.0.200' + gateway_ip: '20.0.0.1' + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: custom_net1 + vendor: Apmec + ip_version: 4 + cidr: '30.0.0.0/24' diff --git a/samples/tosca-templates/mead/tosca-mead-nova-flavor.yaml b/samples/tosca-templates/mead/tosca-mead-nova-flavor.yaml new file mode 100644 index 0000000..9346905 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-nova-flavor.yaml @@ -0,0 +1,71 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + flavor: m1.tiny + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-numacount.yaml b/samples/tosca-templates/mead/tosca-mead-numacount.yaml new file mode 100644 index 0000000..0b10852 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-numacount.yaml @@ -0,0 +1,37 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: NUMA Node Count Input example + +metadata: + template_name: sample-tosca-mead-numacount + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 2048 MB + num_cpus: 2 + numa_node_count: 2 + properties: + image: cirros-0.3.5-x86_64-disk + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-numadefine.yaml b/samples/tosca-templates/mead/tosca-mead-numadefine.yaml new file mode 100644 index 0000000..09d62d4 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-numadefine.yaml @@ -0,0 +1,47 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: NUMA Node Define example + +metadata: + template_name: sample-tosca-mead-numadefine + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 4096 MB + num_cpus: 6 + numa_nodes: + node0: + id: 0 + vcpus: [0, 1] + mem_size: 1024 + node1: + id: 1 + vcpus: [2,3,4,5] + mem_size: 3072 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-openwrt.yaml b/samples/tosca-templates/mead/tosca-mead-openwrt.yaml new file mode 100644 index 0000000..224bbb9 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-openwrt.yaml @@ -0,0 +1,84 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: OpenWRT with services + +metadata: + template_name: OpenWRT + +topology_template: + node_templates: + + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: OpenWRT + config: | + param0: key1 + param1: key2 + mgmt_driver: openwrt + monitoring_policy: + name: ping + parameters: + count: 3 + interval: 10 + actions: + failure: respawn + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 2 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec + diff --git a/samples/tosca-templates/mead/tosca-mead-param-values.yaml b/samples/tosca-templates/mead/tosca-mead-param-values.yaml new file mode 100644 index 0000000..dd73de4 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-param-values.yaml @@ -0,0 +1,10 @@ +{ + image_name: 'cirros-0.3.5-x86_64-disk', + flavor: 'm1.tiny', + zone: 'nova', + network: 'net_mgmt', + management: 'true', + pkt_in_network: 'net0', + pkt_out_network: 'net1', + vendor: 'apmec' +} diff --git a/samples/tosca-templates/mead/tosca-mead-scale.yaml b/samples/tosca-templates/mead/tosca-mead-scale.yaml new file mode 100644 index 0000000..e3c8eb8 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-scale.yaml @@ -0,0 +1,65 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: sample-tosca-mead-scaling + +metadata: + template_name: sample-tosca-mead-scaling + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + flavor: m1.tiny + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + availability_zone: nova + flavor: m1.tiny + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + policies: + - SP1: + type: tosca.policies.apmec.Scaling + targets: [VDU1, VDU2] + properties: + increment: 1 + cooldown: 120 + min_instances: 1 + max_instances: 3 + default_instances: 2 diff --git a/samples/tosca-templates/mead/tosca-mead-secgroups.yaml b/samples/tosca-templates/mead/tosca-mead-secgroups.yaml new file mode 100644 index 0000000..38200ea --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-secgroups.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example with key_name + +metadata: + template_name: sample-tosca-mead-secgroups + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + key_name: userKey + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + security_groups: + - default + - test_secgrp + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-sriov.yaml b/samples/tosca-templates/mead/tosca-mead-sriov.yaml new file mode 100644 index 0000000..f03d4de --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-sriov.yaml @@ -0,0 +1,48 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: SR-IOV example + +metadata: + template_name: sample-tosca-mead-sriov + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: ubuntu + flavor: numa-sriov + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 1 + type: sriov + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: sr3010 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-userdata.yaml b/samples/tosca-templates/mead/tosca-mead-userdata.yaml new file mode 100644 index 0000000..9206644 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-userdata.yaml @@ -0,0 +1,47 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo with user-data + +metadata: + template_name: sample-mead-userdata + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: ACME + diff --git a/samples/tosca-templates/mead/tosca-mead-vcpu-topology.yaml b/samples/tosca-templates/mead/tosca-mead-vcpu-topology.yaml new file mode 100644 index 0000000..33b2425 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-vcpu-topology.yaml @@ -0,0 +1,40 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: vCPU Topology example + +metadata: + template_name: sample-tosca-mead-vcpu-topology + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 80 GB + mem_size: 4096 MB + num_cpus: 8 + cpu_allocation: + socket_count: 2 + thread_count: 2 + core_count: 2 + properties: + image: cirros-0.3.5-x86_64-disk + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + order: 0 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-vdu-name.yaml b/samples/tosca-templates/mead/tosca-mead-vdu-name.yaml new file mode 100644 index 0000000..d9f6a43 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-vdu-name.yaml @@ -0,0 +1,84 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +metadata: + template_name: sample-tosca-mead-vdu-name + +topology_template: + inputs: + vdu-name: + type: string + description: Vdu name + default: test-vdu + cp-name: + type: string + description: Cp name + default: test-cp + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + name: {get_input : vdu-name} + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + name: {get_input : cp-name} + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mgmt + vendor: Apmec + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: net0 + vendor: Apmec + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-vip.yaml b/samples/tosca-templates/mead/tosca-mead-vip.yaml new file mode 100644 index 0000000..4f33aeb --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-vip.yaml @@ -0,0 +1,94 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: | + Demo example with virtural IP. + The VCP is used to grab an IP which will be used as an virtual IP as CP1 and CP2. + +metadata: + template_name: sample-tosca-mead-vip + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + security_groups: + - default + ip_address: 10.10.1.11 + order: 0 + allowed_address_pairs: + - ip_address: 10.10.1.13 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.5-x86_64-disk + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + security_groups: + - default + ip_address: 10.10.1.12 + order: 0 + allowed_address_pairs: + - ip_address: 10.10.1.13 + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VCP: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: true + security_groups: + - default + ip_address: 10.10.1.13 + requirements: + - virtualLink: + node: VL1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec diff --git a/samples/tosca-templates/mead/tosca-mead-with-params.yaml b/samples/tosca-templates/mead/tosca-mead-with-params.yaml new file mode 100644 index 0000000..eae5c98 --- /dev/null +++ b/samples/tosca-templates/mead/tosca-mead-with-params.yaml @@ -0,0 +1,101 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: MEA TOSCA template with input parameters + +metadata: + template_name: sample-tosca-mead + +topology_template: + inputs: + image_name: + type: string + description: Image Name + + flavor: + type: string + description: Flavor Information + + zone: + type: string + description: Zone Information + + network: + type: string + description: management network + + management: + type: string + description: management network + + pkt_in_network: + type: string + description: In network + + pkt_out_network: + type: string + description: Out network + + vendor: + type: string + description: Vendor information + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + properties: + image: { get_input: image_name } + flavor: { get_input: flavor } + availability_zone: { get_input: zone } + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: { get_input: management } + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + CP2: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL2 + - virtualBinding: + node: VDU1 + + CP3: + type: tosca.nodes.mec.CP.Apmec + properties: + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL3 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: network } + vendor: { get_input: vendor } + + VL2: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_in_network } + vendor: { get_input: vendor } + + VL3: + type: tosca.nodes.mec.VL + properties: + network_name: { get_input: pkt_out_network } + vendor: { get_input: vendor } diff --git a/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_ip_address.yaml b/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_ip_address.yaml new file mode 100644 index 0000000..aac8748 --- /dev/null +++ b/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_ip_address.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Example Floating IP - Allocate specified IP from floating network and attach to CP. + +metadata: + template_name: sample-tosca-mead-test-fip-with-floating-ip-address + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 1 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec + + FIP1: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: public + floating_ip_address: 192.168.56.154 + requirements: + - link: + node: CP1 \ No newline at end of file diff --git a/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_network.yaml b/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_network.yaml new file mode 100644 index 0000000..6860959 --- /dev/null +++ b/samples/tosca-templates/mead/tosca_mead_assign_fip_to_vdu_floating_network.yaml @@ -0,0 +1,44 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Example Floating IP - Allocate one IP from floating network and attach to CP. + +metadata: + template_name: sample-tosca-mead-test-fip-with-floating-network + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 1 GB + mem_size: 512 MB + num_cpus: 1 + properties: + image: cirros-0.3.5-x86_64-disk + mgmt_driver: noop + + CP1: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net1 + vendor: Apmec + + FIP1: + type: tosca.nodes.network.FloatingIP + properties: + floating_network: public + requirements: + - link: + node: CP1 \ No newline at end of file diff --git a/samples/tosca-templates/mesd/sample-tosca-mead1.yaml b/samples/tosca-templates/mesd/sample-tosca-mead1.yaml new file mode 100644 index 0000000..ca35a97 --- /dev/null +++ b/samples/tosca-templates/mesd/sample-tosca-mead1.yaml @@ -0,0 +1,94 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo Sample MEAD1 +node_types: + tosca.nodes.mec.MEA1: + requirements: + - virtualLink1: + type: tosca.nodes.mec.VL + required: true + - virtualLink2: + type: tosca.nodes.mec.VL + required: true + +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA1 + + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: ubuntu-xenial + config_drive: true + key_name: mykey + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP11: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: ubuntu-xenial + config_drive: true + key_name: mykey + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP12: + type: tosca.nodes.mec.CP.Apmec + properties: + order: 0 + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mes + vendor: Apmec diff --git a/samples/tosca-templates/mesd/sample-tosca-mead2.yaml b/samples/tosca-templates/mesd/sample-tosca-mead2.yaml new file mode 100644 index 0000000..95ad829 --- /dev/null +++ b/samples/tosca-templates/mesd/sample-tosca-mead2.yaml @@ -0,0 +1,89 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Demo example + +node_types: + tosca.nodes.mec.MEA2: + capabilities: + forwarder1: + type: tosca.capabilities.mec.Forwarder +topology_template: + substitution_mappings: + node_type: tosca.nodes.mec.MEA2 + capabilities: + forwarder1: [CP21, forwarder] + node_templates: + VDU1: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: ubuntu-xenial + config_drive: true + key_name: mykey + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP21: + type: tosca.nodes.mec.CP.Apmec + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VDU2: + type: tosca.nodes.mec.VDU.Apmec + capabilities: + mec_compute: + properties: + disk_size: 10 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: ubuntu-xenial + config_drive: true + key_name: mykey + availability_zone: nova + mgmt_driver: noop + config: | + param0: key1 + param1: key2 + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo "my hostname is `hostname`" > /tmp/hostname + df -h > /home/openwrt/diskinfo + + CP22: + type: tosca.nodes.mec.CP.Apmec + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU2 + + VL1: + type: tosca.nodes.mec.VL + properties: + network_name: net_mes + vendor: Apmec + + diff --git a/samples/tosca-templates/mesd/test_simple_mesd.yaml b/samples/tosca-templates/mesd/test_simple_mesd.yaml new file mode 100644 index 0000000..dedbc71 --- /dev/null +++ b/samples/tosca-templates/mesd/test_simple_mesd.yaml @@ -0,0 +1,20 @@ +tosca_definitions_version: tosca_simple_profile_for_mec_1_0_0 + +description: Import MEADs(already on-boarded) and NSD (already on-boarded) with input parameters +imports: + meads: + - mead1 + - mead2 + nsds: + - nsd1 + vnffgds: + - vnffgd1 + +topology_template: + node_templates: + MEA1: + type: tosca.nodes.mec.MEA1 + MEA2: + type: tosca.nodes.mec.MEA2 + + diff --git a/samples/tosca-templates/nfv/vnfd1.yaml b/samples/tosca-templates/nfv/vnfd1.yaml new file mode 100644 index 0000000..5c2568b --- /dev/null +++ b/samples/tosca-templates/nfv/vnfd1.yaml @@ -0,0 +1,41 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 +description: Demo example +metadata: + template_name: sample-tosca-vnfd +topology_template: + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + capabilities: + nfv_compute: + properties: + disk_size: 10 GB + mem_size: 512 MB + num_cpus: 2 + properties: + image: ubuntu-xenial + mgmt_driver: noop + config_drive: true + availability_zone: nova + key_name: mykey + mgmt_driver: noop + user_data_format: RAW + user_data: | + #!/bin/sh + echo 1 > /proc/sys/net/ipv4/ip_forward + + CP1: + type: tosca.nodes.nfv.CP.Tacker + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net_sfc + vendor: Tacker diff --git a/samples/tosca-templates/nfv/vnffgd1.yaml b/samples/tosca-templates/nfv/vnffgd1.yaml new file mode 100644 index 0000000..88bd81f --- /dev/null +++ b/samples/tosca-templates/nfv/vnffgd1.yaml @@ -0,0 +1,30 @@ +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 +description: Sample VNFFG template +topology_template: + description: Sample VNFFG template + node_templates: + Forwarding_path1: + type: tosca.nodes.nfv.FP.Tacker + description: creates path (CP12->CP22) + properties: + id: 51 + policy: + type: ACL + criteria: + - network_src_port_id: c94b48d5-19b0-4dbc-8b48-eff4a53eb20a + ip_proto: 1 + path: + - forwarder: VNFD1 + capability: CP1 + groups: + VNFFG1: + type: tosca.groups.nfv.VNFFG + description: HTTP to Corporate Net + properties: + vendor: tacker + version: 1.0 + number_of_endpoints: 1 + dependent_virtual_link: [VL1] + connection_point: [CP1] + constituent_vnfs: [VNFD1] + members: [Forwarding_path1] diff --git a/samples/vim/vim_config.yaml b/samples/vim/vim_config.yaml new file mode 100644 index 0000000..9bcbcc9 --- /dev/null +++ b/samples/vim/vim_config.yaml @@ -0,0 +1,6 @@ +auth_url: 'http://10.18.112.10:5000' +username: 'mec_user' +password: 'mySecretPW' +project_name: 'mec' +project_domain_name: 'Default' +user_domain_name: 'Default' diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..b4ab35e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,120 @@ +[metadata] +name = apmec +summary = OpenStack MEM Orchestration +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://docs.openstack.org/developer/apmec/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 3 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3.5 + +[files] +packages = + apmec +data_files = + etc/apmec = + etc/apmec/api-paste.ini + etc/apmec/policy.json + etc/apmec/rootwrap.conf + etc/rootwrap.d = + etc/apmec/rootwrap.d/apmec.filters + etc/init.d = etc/init.d/apmec-server + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +console_scripts = + apmec-db-manage = apmec.db.migration.cli:main + apmec-server = apmec.cmd.eventlet.apmec_server:main + apmec-conductor = apmec.cmd.eventlet.conductor:main + apmec-rootwrap = oslo.rootwrap.cmd:main +apmec.service_plugins = + dummy = apmec.tests.unit.dummy_plugin:DummyServicePlugin + mem = apmec.mem.plugin:MEMPlugin + meo = apmec.meo.meo_plugin:MeoPlugin + commonservices = apmec.plugins.common_services.common_services_plugin:CommonServicesPlugin +apmec.meo.vim.drivers = + openstack = apmec.meo.drivers.vim.openstack_driver:OpenStack_Driver +apmec.openstack.common.cache.backends = + memory = apmec.openstack.common.cache._backends.memory:MemoryBackend +apmec.apmec.mem.drivers = + noop = apmec.mem.infra_drivers.noop:DeviceNoop + openstack = apmec.mem.infra_drivers.openstack.openstack:OpenStack +apmec.apmec.mgmt.drivers = + noop = apmec.mem.mgmt_drivers.noop:DeviceMgmtNoop + openwrt = apmec.mem.mgmt_drivers.openwrt.openwrt:DeviceMgmtOpenWRT +apmec.apmec.monitor.drivers = + ping = apmec.mem.monitor_drivers.ping.ping:MEAMonitorPing + http_ping = apmec.mem.monitor_drivers.http_ping.http_ping:MEAMonitorHTTPPing +apmec.apmec.alarm_monitor.drivers = + ceilometer = apmec.mem.monitor_drivers.ceilometer.ceilometer:MEAMonitorCeilometer +apmec.apmec.policy.actions = + autoscaling = apmec.mem.policy_actions.autoscaling.autoscaling:MEAActionAutoscaling + respawn = apmec.mem.policy_actions.respawn.respawn:MEAActionRespawn + log = apmec.mem.policy_actions.log.log:MEAActionLog + log_and_kill = apmec.mem.policy_actions.log.log:MEAActionLogAndKill +oslo.config.opts = + apmec.common.config = apmec.common.config:config_opts + apmec.wsgi = apmec.wsgi:config_opts + apmec.service = apmec.service:config_opts + apmec.meo.meo_plugin = apmec.meo.meo_plugin:config_opts + apmec.meo.drivers.vim.openstack_driver = apmec.meo.drivers.vim.openstack_driver:config_opts + apmec.keymgr = apmec.keymgr:config_opts + apmec.mem.monitor = apmec.mem.monitor:config_opts + apmec.mem.plugin = apmec.mem.plugin:config_opts + apmec.mem.infra_drivers.openstack.openstack= apmec.mem.infra_drivers.openstack.openstack:config_opts + apmec.mem.mgmt_drivers.openwrt.openwrt = apmec.mem.mgmt_drivers.openwrt.openwrt:config_opts + apmec.mem.monitor_drivers.http_ping.http_ping = apmec.mem.monitor_drivers.http_ping.http_ping:config_opts + apmec.mem.monitor_drivers.ping.ping = apmec.mem.monitor_drivers.ping.ping:config_opts + apmec.mem.monitor_drivers.ceilometer.ceilometer = apmec.mem.monitor_drivers.ceilometer.ceilometer:config_opts + apmec.alarm_receiver = apmec.alarm_receiver:config_opts +mistral.actions = + apmec.vim_ping_action = apmec.meo.workflows.vim_monitor.vim_ping_action:PingVimAction + + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[build_releasenotes] +all_files = 1 +build-dir = releasenotes/build +source-dir = releasenotes/source + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = apmec/locale/apmec.pot + +[compile_catalog] +directory = apmec/locale +domain = apmec + +[update_catalog] +domain = apmec +output_dir = apmec/locale +input_file = apmec/locale/apmec.pot + +[wheel] +universal = 1 + +[pbr] +autodoc_index_modules = True +warnerrors = True +autodoc_exclude_modules = + apmec.db.migration.alembic_migrations.* + apmec.tests.* + thirdparty.* diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..566d844 --- /dev/null +++ b/setup.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr>=2.0.0'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..289907b --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,25 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +# Despite above warning added by global sync process, please use +# ascii betical order. +coverage!=4.4,>=4.0 # Apache-2.0 +doc8>=0.6.0 # Apache-2.0 +fixtures>=3.0.0 # Apache-2.0/BSD +hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +mock>=2.0.0 # BSD +python-subunit>=0.0.18 # Apache-2.0/BSD +ordereddict>=1.1 # MIT +sphinx>=1.6.2 # BSD +oslotest>=1.10.0 # Apache-2.0 +os-testr>=1.0.0 # Apache-2.0 +tempest>=16.1.0 # Apache-2.0 +os-api-ref>=1.4.0 # Apache-2.0 +testrepository>=0.0.18 # Apache-2.0/BSD +testtools>=1.4.0 # MIT +WebTest>=2.0.27 # MIT +python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 # Apache-2.0 + +# releasenotes +reno>=2.5.0 # Apache-2.0 diff --git a/tools/check_i18n.py b/tools/check_i18n.py new file mode 100644 index 0000000..ba8a877 --- /dev/null +++ b/tools/check_i18n.py @@ -0,0 +1,154 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import compiler +import imp +import os.path +import sys + + +def is_localized(node): + """Check message wrapped by _()""" + if isinstance(node.parent, compiler.ast.CallFunc): + if isinstance(node.parent.node, compiler.ast.Name): + if node.parent.node.name == '_': + return True + return False + + +class ASTWalker(compiler.visitor.ASTVisitor): + + def default(self, node, *args): + for child in node.getChildNodes(): + child.parent = node + compiler.visitor.ASTVisitor.default(self, node, *args) + + +class Visitor(object): + + def __init__(self, filename, i18n_msg_predicates, + msg_format_checkers, debug): + self.filename = filename + self.debug = debug + self.error = 0 + self.i18n_msg_predicates = i18n_msg_predicates + self.msg_format_checkers = msg_format_checkers + with open(filename) as f: + self.lines = f.readlines() + + def visitConst(self, node): + if not isinstance(node.value, str): + return + + if is_localized(node): + for (checker, msg) in self.msg_format_checkers: + if checker(node): + print('%s:%d %s: %s Error: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + checker.__name__, msg), + file=sys.stderr) + self.error = 1 + return + if debug: + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Pass")) + else: + for (predicate, action, msg) in self.i18n_msg_predicates: + if predicate(node): + if action == 'skip': + if debug: + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Pass")) + return + elif action == 'error': + print('%s:%d %s: %s Error: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + predicate.__name__, msg), + file=sys.stderr) + self.error = 1 + return + elif action == 'warn': + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Warn: %s" % msg)) + return + print('Predicate with wrong action!', file=sys.stderr) + + +def is_file_in_black_list(black_list, f): + for f in black_list: + if os.path.abspath(input_file).startswith( + os.path.abspath(f)): + return True + return False + + +def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): + input_mod = compiler.parseFile(input_file) + v = compiler.visitor.walk(input_mod, + Visitor(input_file, + i18n_msg_predicates, + msg_format_checkers, + debug), + ASTWalker()) + return v.error + + +if __name__ == '__main__': + input_path = sys.argv[1] + cfg_path = sys.argv[2] + try: + cfg_mod = imp.load_source('', cfg_path) + except Exception: + print("Load cfg module failed", file=sys.stderr) + sys.exit(1) + + i18n_msg_predicates = cfg_mod.i18n_msg_predicates + msg_format_checkers = cfg_mod.msg_format_checkers + black_list = cfg_mod.file_black_list + + debug = False + if len(sys.argv) > 3: + if sys.argv[3] == '-d': + debug = True + + if os.path.isfile(input_path): + sys.exit(check_i18n(input_path, + i18n_msg_predicates, + msg_format_checkers, + debug)) + + error = 0 + for dirpath, dirs, files in os.walk(input_path): + for f in files: + if not f.endswith('.py'): + continue + input_file = os.path.join(dirpath, f) + if is_file_in_black_list(black_list, input_file): + continue + if check_i18n(input_file, + i18n_msg_predicates, + msg_format_checkers, + debug): + error = 1 + sys.exit(error) diff --git a/tools/check_i18n_test_case.txt b/tools/check_i18n_test_case.txt new file mode 100644 index 0000000..3d1391d --- /dev/null +++ b/tools/check_i18n_test_case.txt @@ -0,0 +1,67 @@ +# test-case for check_i18n.py +# python check_i18n.py check_i18n.txt -d + +# message format checking +# capital checking +msg = _("hello world, error") +msg = _("hello world_var, error") +msg = _('file_list xyz, pass') +msg = _("Hello world, pass") + +# format specifier checking +msg = _("Hello %s world %d, error") +msg = _("Hello %s world, pass") +msg = _("Hello %(var1)s world %(var2)s, pass") + +# message has been localized +# is_localized +msg = _("Hello world, pass") +msg = _("Hello world, pass") % var +LOG.debug(_('Hello world, pass')) +LOG.info(_('Hello world, pass')) +raise x.y.Exception(_('Hello world, pass')) +raise Exception(_('Hello world, pass')) + +# message need be localized +# is_log_callfunc +LOG.debug('hello world, error') +LOG.debug('hello world, error' % xyz) +sys.append('hello world, warn') + +# is_log_i18n_msg_with_mod +LOG.debug(_('Hello world, error') % xyz) + +# default warn +msg = 'hello world, warn' +msg = 'hello world, warn' % var + +# message needn't be localized +# skip only one word +msg = '' +msg = "hello,pass" + +# skip dict +msg = {'hello world, pass': 1} + +# skip list +msg = ["hello world, pass"] + +# skip subscript +msg['hello world, pass'] + +# skip xml marker +msg = ", pass" + +# skip sql statement +msg = "SELECT * FROM xyz WHERE hello=1, pass" +msg = "select * from xyz, pass" + +# skip add statement +msg = 'hello world' + e + 'world hello, pass' + +# skip doc string +""" +Hello world, pass +""" +class Msg: + pass diff --git a/tools/clean.sh b/tools/clean.sh new file mode 100755 index 0000000..754ed84 --- /dev/null +++ b/tools/clean.sh @@ -0,0 +1,5 @@ +#!/bin/bash +rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes +rm -rf */*.deb +rm -rf ./plugins/**/build/ ./plugins/**/dist +rm -rf ./plugins/**/lib/apmec_*_plugin.egg-info ./plugins/apmec-* diff --git a/tools/generate_config_file_sample.sh b/tools/generate_config_file_sample.sh new file mode 100755 index 0000000..fc802ce --- /dev/null +++ b/tools/generate_config_file_sample.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -e + +GEN_CMD=oslo-config-generator + +if ! type "$GEN_CMD" > /dev/null; then + echo "ERROR: $GEN_CMD not installed on the system." + exit 1 +fi + +$GEN_CMD --config-file=etc/config-generator.conf + +set -x diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py new file mode 100644 index 0000000..053b438 --- /dev/null +++ b/tools/i18n_cfg.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import compiler +import re + + +def is_log_callfunc(n): + """LOG.xxx('hello %s' % xyz) and LOG('hello')""" + if isinstance(n.parent, compiler.ast.Mod): + n = n.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Getattr): + if isinstance(n.parent.node.getChildNodes()[0], + compiler.ast.Name): + if n.parent.node.getChildNodes()[0].name == 'LOG': + return True + return False + + +def is_log_i18n_msg_with_mod(n): + """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)""" + if not isinstance(n.parent.parent, compiler.ast.Mod): + return False + n = n.parent.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Getattr): + if isinstance(n.parent.node.getChildNodes()[0], + compiler.ast.Name): + if n.parent.node.getChildNodes()[0].name == 'LOG': + return True + return False + + +def is_wrong_i18n_format(n): + """Check _('hello %s' % xyz)""" + if isinstance(n.parent, compiler.ast.Mod): + n = n.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Name): + if n.parent.node.name == '_': + return True + return False + + +""" +Used for check message need be localized or not. +(predicate_func, action, message) +""" +i18n_msg_predicates = [ + # Skip ['hello world', 1] + (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''), + # Skip {'hellow world', 1} + (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''), + # Skip msg['hello world'] + (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''), + # Skip doc string + (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''), + # Skip msg = "hello", in normal, message should more than one word + (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''), + # Skip msg = 'hello world' + vars + 'world hello' + (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''), + # Skip xml markers msg = "" + (lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''), + # Skip sql statement + (lambda n: len( + re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0, + 'skip', ''), + # LOG.xxx() + (is_log_callfunc, 'error', 'Message must be localized'), + # _('hello %s' % xyz) should be _('hello %s') % xyz + (is_wrong_i18n_format, 'error', + ("Message format was wrong, _('hello %s' % xyz) " + "should be _('hello %s') % xyz")), + # default + (lambda n: True, 'warn', 'Message might need localized') +] + + +""" +Used for checking message format. (checker_func, message) +""" +msg_format_checkers = [ + # If message contain more than on format specifier, it should use + # mapping key + (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1, + "The message shouldn't contain more than one format specifier"), + # Check capital + (lambda n: n.value.split(' ')[0].count('_') == 0 and + n.value[0].isalpha() and + n.value[0].islower(), + "First letter must be capital"), + (is_log_i18n_msg_with_mod, + 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)') +] + + +file_black_list = ["./apmec/tests/unit", + "./apmec/openstack", + "./apmec/plugins/bigswitch/tests"] diff --git a/tools/install_venv.py b/tools/install_venv.py new file mode 100644 index 0000000..8a06a1d --- /dev/null +++ b/tools/install_venv.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack Foundation. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Installation script for Apmec's development virtualenv +""" +from __future__ import print_function + +import os +import sys + +import install_venv_common as install_venv + + +def print_help(): + help = """ + Apmec development environment setup is complete. + + Apmec development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Apmec virtualenv for the extent of your current shell + session you can run: + + $ . .venv/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print(help) + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + venv = os.path.join(root, '.venv') + pip_requires = os.path.join(root, 'requirements.txt') + test_requires = os.path.join(root, 'test-requirements.txt') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Apmec' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + print_help() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py new file mode 100644 index 0000000..8ad0375 --- /dev/null +++ b/tools/install_venv_common.py @@ -0,0 +1,171 @@ +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Since this script is used to bootstrap a virtualenv from the system's Python +environment, it should be kept strictly compatible with Python 2.6. + +""" + +from __future__ import print_function + +import optparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, requirements, + test_requirements, py_version, + project): + self.root = root + self.venv = venv + self.requirements = requirements + self.test_requirements = test_requirements + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print(message % args, file=sys.stderr) + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + else: + return Distro( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print('Creating venv...', end=' ') + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print('done.') + else: + print("venv already exists...") + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print('Installing dependencies with pip (this can take a while)...') + + # First things first, make sure our venv has the latest pip and + # setuptools. + self.pip_install('pip>=1.3') + self.pip_install('setuptools') + + self.pip_install('-r', self.requirements) + self.pip_install('-r', self.test_requirements) + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = optparse.OptionParser() + parser.add_option('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install") + return parser.parse_args(argv[1:])[0] + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print('Installing virtualenv via easy_install...', end=' ') + if self.run_command(['easy_install', 'virtualenv']): + print('Succeeded') + return + else: + print('Failed') + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.die("Please install 'python-virtualenv'.") + + super(Fedora, self).install_virtualenv() diff --git a/tools/meac/build_image.sh b/tools/meac/build_image.sh new file mode 100755 index 0000000..1011557 --- /dev/null +++ b/tools/meac/build_image.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +MEAC_IMAGE=/tmp/apmec_meac_images +rm -rf $MEAC_IMAGE +mkdir $MEAC_IMAGE + +pip install diskimage-builder +pip install dib-utils +CURRENT_DIR=`pwd` +cd $MEAC_IMAGE +git clone https://git.openstack.org/openstack/tripleo-image-elements.git +git clone https://git.openstack.org/openstack/heat-templates.git + +export ELEMENTS_PATH=tripleo-image-elements/elements:heat-templates/hot/software-config/elements +disk-image-create vm \ + fedora selinux-permissive \ + os-collect-config \ + os-refresh-config \ + os-apply-config \ + heat-config \ + heat-config-ansible \ + heat-config-cfn-init \ + heat-config-puppet \ + heat-config-salt \ + heat-config-script \ + -o fedora-software-config.qcow2 diff --git a/tools/ostestr_compat_shim.sh b/tools/ostestr_compat_shim.sh new file mode 100755 index 0000000..a483ed1 --- /dev/null +++ b/tools/ostestr_compat_shim.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# preserve old behavior of using an arg as a regex when '--' is not present +case $@ in + (*--*) ostestr $@;; + ('') ostestr;; + (*) ostestr --regex "$@" +esac diff --git a/tools/prepare_functional_test.sh b/tools/prepare_functional_test.sh new file mode 100755 index 0000000..78a07f2 --- /dev/null +++ b/tools/prepare_functional_test.sh @@ -0,0 +1,20 @@ +# This script is used to prepare functional test env after devstack +# installation of apmec + +DEVSTACK_DIR=${DEVSTACK_DIR:-~/devstack} +APMEC_DIR=$(dirname "$0")/.. +PRIVATE_KEY_FILE=${PRIVATE_KEY_FILE:-/dev/null} +MEC_USER=${MEC_USER:-"mec_user"} + +# Test devstack dir setting +if [ ! -f ${DEVSTACK_DIR}/openrc ]; then + echo "Please set right DEVSTACK_DIR" + exit 1 +fi + +. $DEVSTACK_DIR/openrc admin admin +. ${APMEC_DIR}/apmec/tests/contrib/post_test_hook_lib.sh + +fixup_quota +add_key_if_not_exist +add_secgrp_if_not_exist diff --git a/tools/test-setup.sh b/tools/test-setup.sh new file mode 100755 index 0000000..07a0785 --- /dev/null +++ b/tools/test-setup.sh @@ -0,0 +1,57 @@ +#!/bin/bash -xe + +# This script will be run by OpenStack CI before unit tests are run, +# it sets up the test system as needed. +# Developers should setup their test systems in a similar way. + +# This setup needs to be run as a user that can run sudo. + +# The root password for the MySQL database; pass it in via +# MYSQL_ROOT_PW. +DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} + +# This user and its password are used by the tests, if you change it, +# your tests might fail. +DB_USER=openstack_citest +DB_PW=openstack_citest + +sudo -H mysqladmin -u root password $DB_ROOT_PW + +# It's best practice to remove anonymous users from the database. If +# a anonymous user exists, then it matches first for connections and +# other connections from that host will not work. +sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " + DELETE FROM mysql.user WHERE User=''; + FLUSH PRIVILEGES; + GRANT ALL PRIVILEGES ON *.* + TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" + +# Now create our database. +mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " + SET default_storage_engine=MYISAM; + DROP DATABASE IF EXISTS openstack_citest; + CREATE DATABASE openstack_citest CHARACTER SET utf8;" + +# Same for PostgreSQL +# The root password for the PostgreSQL database; pass it in via +# POSTGRES_ROOT_PW. +DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} + +# Setup user +root_roles=$(sudo -H -u postgres psql -t -c " + SELECT 'HERE' from pg_roles where rolname='$DB_USER'") +if [[ ${root_roles} == *HERE ]];then + sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" +else + sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" +fi + +# Store password for tests +cat << EOF > $HOME/.pgpass +*:*:*:$DB_USER:$DB_PW +EOF +chmod 0600 $HOME/.pgpass + +# Now create our database +psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" +createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest diff --git a/tools/with_venv.sh b/tools/with_venv.sh new file mode 100755 index 0000000..72f68f7 --- /dev/null +++ b/tools/with_venv.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +TOOLS=`dirname $0` +VENV=$TOOLS/../.venv +. $VENV/bin/activate && "$@" diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..dae21d6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,102 @@ +[tox] +envlist = py35,py27,pep8,docs +minversion = 1.6 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} +usedevelop = True +whitelist_externals = rm +install_command = + pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + rm -f .testrepository/times.dbm + {toxinidir}/tools/ostestr_compat_shim.sh {posargs} + +[testenv:functional] +setenv = OS_TEST_PATH=./apmec/tests/functional +deps = + {[testenv]deps} + +[testenv:dsvm-functional] +basepython = python2.7 +setenv = {[testenv]setenv} + {[testenv:functional]setenv} +deps = + {[testenv:functional]deps} +commands = + {toxinidir}/tools/ostestr_compat_shim.sh --concurrency 2 {posargs} + +[tox:jenkins] +sitepackages = True + +[testenv:debug] +commands = oslo_debug_helper {posargs} + +[testenv:debug-py27] +basepython = python2.7 +commands = oslo_debug_helper {posargs} + +[testenv:debug-py35] +basepython = python3.5 +commands = oslo_debug_helper {posargs} + +[testenv:pep8] +basepython = python2.7 +commands = + flake8 + doc8 -e .rst doc/source/ CONTRIBUTING.rst HACKING.rst README.rst TESTING.rst + apmec-db-manage check_migration + bash -c "find apmec -type f -regex '.*\.pot?' -print0|xargs -0 --no-run-if-empty -n 1 msgfmt --check-format -o /dev/null" +whitelist_externals = bash + +[testenv:i18n] +commands = python ./tools/check_i18n.py ./apmec ./tools/i18n_cfg.py + +[testenv:docs] +basepython = python2.7 +commands = + doc8 -e .rst doc/source/ CONTRIBUTING.rst HACKING.rst README.rst TESTING.rst + python setup.py build_sphinx + +[testenv:api-ref] +# This environment is called from CI scripts to test and publish +# the API Ref to developer.openstack.org. +commands = + rm -rf api-ref/build + sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html +whitelist_externals = rm + +[testenv:releasenotes] +commands = + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + + +[testenv:cover] +# Also do not run test_coverage_ext tests while gathering coverage as those +# tests conflict with coverage. +commands = + coverage erase + python setup.py testr --coverage --testr-args='{posargs}' + coverage report + +[testenv:venv] +commands = {posargs} + +[flake8] +# E128 continuation line under-indented for visual indent +# N320 log messages does not translate +ignore = E128,N320 +show-source = true +builtins = _ +exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,.ropeproject + +[hacking] +import_exceptions = apmec._i18n +local-check-factory = apmec.hacking.checks.factory + +[testenv:config-gen] +commands = + oslo-config-generator --config-file=etc/config-generator.conf