From c269683c0e068d571656e0e602b22a97970d7198 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 15:39:45 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: Ia85425594e6fb170783a498ca41a2cb35e1ce051 --- .gitignore | 42 - .gitreview | 4 - CONTRIBUTING.rst | 367 ----- LICENSE | 202 --- README | 14 + README.rst | 37 - babel.cfg | 1 - config-generator.conf | 12 - devstack/example.local.conf | 61 - devstack/plugin.sh | 354 ----- devstack/settings | 1 - devstack/upgrade/resources.sh | 77 - devstack/upgrade/settings | 4 - devstack/upgrade/shutdown.sh | 29 - devstack/upgrade/upgrade.sh | 106 -- doc/Makefile | 159 --- doc/source/.gitignore | 2 - doc/source/admin/index.rst | 10 - doc/source/admin/upgrade.rst | 28 - doc/source/conf.py | 103 -- doc/source/contributor/index.rst | 11 - doc/source/images/states.svg | 230 --- doc/source/index.rst | 26 - doc/source/install/index.rst | 361 ----- doc/source/user/http-api.rst | 392 ----- doc/source/user/index.rst | 34 - doc/source/user/troubleshooting.rst | 149 -- doc/source/user/usage.rst | 395 ----- doc/source/user/workflow.rst | 83 -- example.conf | 915 ------------ ironic-inspector.8 | 20 - ironic_inspector/__init__.py | 0 ironic_inspector/alembic.ini | 38 - ironic_inspector/api_tools.py | 83 -- ironic_inspector/cmd/__init__.py | 2 - ironic_inspector/cmd/all.py | 29 - ironic_inspector/common/__init__.py | 0 ironic_inspector/common/i18n.py | 21 - ironic_inspector/common/ironic.py | 188 --- ironic_inspector/common/keystone.py | 56 - ironic_inspector/common/lldp_parsers.py | 365 ----- ironic_inspector/common/lldp_tlvs.py | 366 ----- ironic_inspector/common/service_utils.py | 35 - ironic_inspector/common/swift.py | 172 --- ironic_inspector/conf.py | 241 ---- ironic_inspector/db.py | 197 --- ironic_inspector/dbsync.py | 94 -- ironic_inspector/firewall.py | 257 ---- ironic_inspector/introspect.py | 184 --- ironic_inspector/introspection_state.py | 148 -- ironic_inspector/main.py | 324 ----- ironic_inspector/migrations/env.py | 82 -- ironic_inspector/migrations/script.py.mako | 32 - .../versions/578f84f38d_inital_db_schema.py | 63 - ...2d84cb1b_attribute_constraints_relaxing.py | 90 -- ...e3f38c4_change_created_finished_at_type.py | 69 - ...01c8ef_introducing_node_state_attribute.py | 49 - .../versions/d588418040d_add_rules.py | 64 - ...1d88_add_invert_field_to_rule_condition.py | 33 - ironic_inspector/node_cache.py | 954 ------------- ironic_inspector/plugins/__init__.py | 0 ironic_inspector/plugins/base.py | 231 --- ironic_inspector/plugins/capabilities.py | 101 -- ironic_inspector/plugins/discovery.py | 102 -- ironic_inspector/plugins/example.py | 39 - ironic_inspector/plugins/extra_hardware.py | 98 -- ironic_inspector/plugins/lldp_basic.py | 87 -- .../plugins/local_link_connection.py | 149 -- ironic_inspector/plugins/pci_devices.py | 86 -- ironic_inspector/plugins/raid_device.py | 102 -- ironic_inspector/plugins/rules.py | 153 -- ironic_inspector/plugins/standard.py | 299 ---- ironic_inspector/process.py | 390 ----- ironic_inspector/pxe_filter/__init__.py | 0 ironic_inspector/pxe_filter/base.py | 224 --- ironic_inspector/pxe_filter/interface.py | 64 - ironic_inspector/rules.py | 425 ------ ironic_inspector/test/__init__.py | 0 ironic_inspector/test/base.py | 207 --- ironic_inspector/test/functional.py | 767 ---------- .../test/inspector_tempest_plugin/README.rst | 18 - .../test/inspector_tempest_plugin/__init__.py | 0 .../test/inspector_tempest_plugin/config.py | 66 - .../inspector_tempest_plugin/exceptions.py | 25 - .../test/inspector_tempest_plugin/plugin.py | 41 - .../rules/basic_ops_rule.json | 25 - .../services/__init__.py | 0 .../services/introspection_client.py | 83 -- .../tests/__init__.py | 0 .../inspector_tempest_plugin/tests/manager.py | 244 ---- .../tests/test_basic.py | 175 --- .../tests/test_discovery.py | 149 -- ironic_inspector/test/unit/__init__.py | 0 ironic_inspector/test/unit/test_api_tools.py | 136 -- .../test/unit/test_common_ironic.py | 131 -- ironic_inspector/test/unit/test_db.py | 77 - ironic_inspector/test/unit/test_firewall.py | 444 ------ ironic_inspector/test/unit/test_introspect.py | 432 ------ ironic_inspector/test/unit/test_keystone.py | 62 - ironic_inspector/test/unit/test_main.py | 615 -------- ironic_inspector/test/unit/test_migrations.py | 498 ------- ironic_inspector/test/unit/test_node_cache.py | 1265 ----------------- .../test/unit/test_plugins_base.py | 92 -- .../test/unit/test_plugins_capabilities.py | 77 - .../test/unit/test_plugins_discovery.py | 132 -- .../test/unit/test_plugins_extra_hardware.py | 97 -- .../test/unit/test_plugins_lldp_basic.py | 329 ----- .../test_plugins_local_link_connection.py | 196 --- .../test/unit/test_plugins_pci_devices.py | 102 -- .../test/unit/test_plugins_raid_device.py | 129 -- .../test/unit/test_plugins_rules.py | 222 --- .../test/unit/test_plugins_standard.py | 412 ------ ironic_inspector/test/unit/test_process.py | 675 --------- ironic_inspector/test/unit/test_pxe_filter.py | 272 ---- ironic_inspector/test/unit/test_rules.py | 477 ------- ironic_inspector/test/unit/test_swift.py | 128 -- ironic_inspector/test/unit/test_utils.py | 164 --- .../test/unit/test_wsgi_service.py | 207 --- ironic_inspector/utils.py | 226 --- ironic_inspector/version.py | 15 - ironic_inspector/wsgi_service.py | 196 --- plugin-requirements.txt | 0 releasenotes/notes/.placeholder | 0 ...eturn_all_attributes-98a9765726c405d5.yaml | 5 - ...ly_update_started_at-8af8cf254cdf8cde.yaml | 4 - ...at-in-the-status-API-7860312102923938.yaml | 9 - .../abort-introspection-ae5cb5a9fbacd2ac.yaml | 4 - ...ctive_states_timeout-3e3ab110870483ec.yaml | 4 - ...-option-to-add-ports-f8c6c9b3e6797652.yaml | 5 - ...dd-lldp-basic-plugin-98aebcf43e60931b.yaml | 4 - .../add-lldp-plugin-4645596cb8b39fd3.yaml | 5 - ...dp-plugin-dependency-c323412654f71b3e.yaml | 6 - ...pection-api-response-85fb7f4e72ae386a.yaml | 4 - ...trospection-statuses-2a3d4379c3854894.yaml | 10 - ...long-running-ramdisk-ffee3c177c56cebb.yaml | 4 - ...ing-error-on-timeout-904aeeeb319ecb2b.yaml | 4 - ...-logging-deprecation-4ca046a64fac6f11.yaml | 4 - .../notes/capabilities-15cc2268d661f0a0.yaml | 4 - ..._at_type_to_datetime-c5617e598350970c.yaml | 11 - ...value-from-nonstring-3d851cb42ce3a0ac.yaml | 5 - ...ompact-debug-logging-b15dd9bbdd3ce27a.yaml | 4 - .../contains-matches-ee28958b08995494.yaml | 4 - .../continue-http-500-62f33d425aade9d7.yaml | 4 - releasenotes/notes/cors-5f345c65da7f5c99.yaml | 13 - ...tom-ramdisk-log-name-dac06822c38657e7.yaml | 8 - .../deprecate-rollback-dea95ac515d3189b.yaml | 4 - ...ate-root-device-hint-909d389b7efed5da.yaml | 3 - ...e-setting-ipmi-creds-1581ddc63b273811.yaml | 12 - ...ptions-removal-ocata-a44dadf3bcf8d6fc.yaml | 8 - .../notes/disable-dhcp-c86a3a0ee2696ee0.yaml | 7 - .../drop-maintenance-a9a87a9a2af051ad.yaml | 5 - .../edeploy-typeerror-6486e31923d91666.yaml | 5 - .../empty-condition-abc707b771be6be3.yaml | 4 - .../notes/enroll-hook-d8c32eba70848210.yaml | 6 - .../notes/extend-rules-9a9d38701e970611.yaml | 5 - ...extra-hardware-swift-aeebf299b9605bb0.yaml | 3 - .../firewall-rerun-f2d0f64cca2698ff.yaml | 4 - ...cessError-on-startup-28d9dbed85a81542.yaml | 9 - ...-when-use-postgresql-ac6c708f48f55c83.yaml | 4 - ...dlock-during-cleanup-bcb6b517ef299791.yaml | 5 - .../notes/fix-mysql-6b79049fe96edae4.yaml | 8 - ...-tasks-configuration-edd167f0146e60b5.yaml | 6 - ...es-endpoint-response-d60984c40d927c1f.yaml | 10 - ...provision-state-name-150c91c48d471bf9.yaml | 11 - .../fix_llc_hook_bugs-efeea008c2f792eb.yaml | 6 - .../fix_llc_port_assume-4ea47d26501bddc3.yaml | 3 - .../notes/flask-debug-6d2dcc2b482324dc.yaml | 4 - .../notes/futurist-557fcd18d4eaf1c1.yaml | 5 - ...googbye-patches-args-071532024b9260bd.yaml | 4 - .../notes/hook-deps-83a867c7af0300e4.yaml | 6 - .../infiniband-support-960d6846e326dec4.yaml | 8 - ...-drivers-deprecation-1d0c25b112fbd4da.yaml | 10 - .../introspection-state-03538fac198882b6.yaml | 16 - .../notes/ipa-inventory-0a1e8d644da850ff.yaml | 15 - .../notes/ipa-support-7eea800306829a49.yaml | 4 - ...-credentials-removal-0021f89424fbf7a3.yaml | 8 - .../ironic-lib-hints-20412a1c7fa796e0.yaml | 6 - .../is-empty-missing-a590d580cb62761d.yaml | 3 - ...keystoneauth-plugins-aab6cbe1d0e884bf.yaml | 17 - .../less-iptables-calls-759e89d103df504c.yaml | 3 - ...ot-found-cache-error-afbc87e80305ca5c.yaml | 5 - ...gs-collector-logging-356e56cd70a04a2b.yaml | 3 - .../lookup-all-macs-eead528c0b764ad7.yaml | 6 - .../notes/loopback-bmc-e60d64fe74bdf142.yaml | 5 - ...rations-autogenerate-4303fd496c3c2757.yaml | 3 - .../missing-pxe-mac-d9329dab85513460.yaml | 4 - ...ttribute_node_lookup-17e219ba8d3e5eb0.yaml | 17 - .../notes/names-82d9f84153a228ec.yaml | 5 - ...downgrade-migrations-514bf872d9f944ed.yaml | 5 - ...ower-off-enroll-node-e40854f6def397b8.yaml | 4 - .../no-logs-stored-data-6db52934c7f9a91a.yaml | 4 - .../no-old-ramdisk-095b05e1245131d8.yaml | 7 - .../notes/no-rollback-e15bc7fee0134545.yaml | 10 - .../no-root_device_hint-0e7676d481d503bb.yaml | 3 - .../notes/node-locking-4d135ca5b93524b1.yaml | 3 - .../optional-root-disk-9b972f504b2e6262.yaml | 5 - ...patch-head-backslash-24bcdd03ba254bf2.yaml | 4 - .../pci_devices-plugin-5b93196e0e973155.yaml | 7 - ...port-creation-plugin-c0405ec646b1051d.yaml | 7 - .../preprocessing-error-01e55b4db20fb7fc.yaml | 4 - ...sing-data-type-check-7c914339d3ab15ba.yaml | 5 - .../processing-logging-e2d27bbac95a7213.yaml | 6 - .../notes/pxe-enabled-cbc3287ebe3fcd49.yaml | 9 - ...logs-on-all-failures-24da41edf3a98400.yaml | 11 - ...eapply-introspection-5edbbfaf498dbd12.yaml | 4 - .../rollback-formatting-7d61c9af2600d42f.yaml | 7 - .../rollback-removal-a03a989e2e9f776b.yaml | 4 - .../notes/rules-invert-2585173a11db3c31.yaml | 4 - ...r-when-swift-failure-3e919ecbf9db6401.yaml | 3 - .../notes/size-hint-ea2a264468e1fcb7.yaml | 4 - .../notes/sphinx-docs-4d0a5886261e57bf.yaml | 12 - .../status-removal-fa1d9a98ffad9f60.yaml | 11 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 292 ---- releasenotes/source/index.rst | 12 - releasenotes/source/liberty.rst | 6 - releasenotes/source/mitaka.rst | 6 - releasenotes/source/newton.rst | 6 - releasenotes/source/ocata.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 32 - rootwrap.conf | 27 - rootwrap.d/ironic-inspector-firewall.filters | 6 - setup.cfg | 101 -- setup.py | 29 - test-requirements.txt | 17 - tools/states_to_dot.py | 94 -- tools/test-setup.sh | 57 - tox.ini | 69 - 230 files changed, 14 insertions(+), 21927 deletions(-) delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 CONTRIBUTING.rst delete mode 100644 LICENSE create mode 100644 README delete mode 100644 README.rst delete mode 100644 babel.cfg delete mode 100644 config-generator.conf delete mode 100644 devstack/example.local.conf delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100755 devstack/upgrade/resources.sh delete mode 100644 devstack/upgrade/settings delete mode 100755 devstack/upgrade/shutdown.sh delete mode 100755 devstack/upgrade/upgrade.sh delete mode 100644 doc/Makefile delete mode 100644 doc/source/.gitignore delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/admin/upgrade.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/images/states.svg delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/user/http-api.rst delete mode 100644 doc/source/user/index.rst delete mode 100644 doc/source/user/troubleshooting.rst delete mode 100644 doc/source/user/usage.rst delete mode 100644 doc/source/user/workflow.rst delete mode 100644 example.conf delete mode 100644 ironic-inspector.8 delete mode 100644 ironic_inspector/__init__.py delete mode 100644 ironic_inspector/alembic.ini delete mode 100644 ironic_inspector/api_tools.py delete mode 100644 ironic_inspector/cmd/__init__.py delete mode 100644 ironic_inspector/cmd/all.py delete mode 100644 ironic_inspector/common/__init__.py delete mode 100644 ironic_inspector/common/i18n.py delete mode 100644 ironic_inspector/common/ironic.py delete mode 100644 ironic_inspector/common/keystone.py delete mode 100644 ironic_inspector/common/lldp_parsers.py delete mode 100644 ironic_inspector/common/lldp_tlvs.py delete mode 100644 ironic_inspector/common/service_utils.py delete mode 100644 ironic_inspector/common/swift.py delete mode 100644 ironic_inspector/conf.py delete mode 100644 ironic_inspector/db.py delete mode 100644 ironic_inspector/dbsync.py delete mode 100644 ironic_inspector/firewall.py delete mode 100644 ironic_inspector/introspect.py delete mode 100644 ironic_inspector/introspection_state.py delete mode 100644 ironic_inspector/main.py delete mode 100644 ironic_inspector/migrations/env.py delete mode 100644 ironic_inspector/migrations/script.py.mako delete mode 100644 ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py delete mode 100644 ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.py delete mode 100644 ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.py delete mode 100644 ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.py delete mode 100644 ironic_inspector/migrations/versions/d588418040d_add_rules.py delete mode 100644 ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py delete mode 100644 ironic_inspector/node_cache.py delete mode 100644 ironic_inspector/plugins/__init__.py delete mode 100644 ironic_inspector/plugins/base.py delete mode 100644 ironic_inspector/plugins/capabilities.py delete mode 100644 ironic_inspector/plugins/discovery.py delete mode 100644 ironic_inspector/plugins/example.py delete mode 100644 ironic_inspector/plugins/extra_hardware.py delete mode 100644 ironic_inspector/plugins/lldp_basic.py delete mode 100644 ironic_inspector/plugins/local_link_connection.py delete mode 100644 ironic_inspector/plugins/pci_devices.py delete mode 100644 ironic_inspector/plugins/raid_device.py delete mode 100644 ironic_inspector/plugins/rules.py delete mode 100644 ironic_inspector/plugins/standard.py delete mode 100644 ironic_inspector/process.py delete mode 100644 ironic_inspector/pxe_filter/__init__.py delete mode 100644 ironic_inspector/pxe_filter/base.py delete mode 100644 ironic_inspector/pxe_filter/interface.py delete mode 100644 ironic_inspector/rules.py delete mode 100644 ironic_inspector/test/__init__.py delete mode 100644 ironic_inspector/test/base.py delete mode 100644 ironic_inspector/test/functional.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/README.rst delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/__init__.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/config.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/exceptions.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/plugin.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/services/__init__.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/manager.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/test_discovery.py delete mode 100644 ironic_inspector/test/unit/__init__.py delete mode 100644 ironic_inspector/test/unit/test_api_tools.py delete mode 100644 ironic_inspector/test/unit/test_common_ironic.py delete mode 100644 ironic_inspector/test/unit/test_db.py delete mode 100644 ironic_inspector/test/unit/test_firewall.py delete mode 100644 ironic_inspector/test/unit/test_introspect.py delete mode 100644 ironic_inspector/test/unit/test_keystone.py delete mode 100644 ironic_inspector/test/unit/test_main.py delete mode 100644 ironic_inspector/test/unit/test_migrations.py delete mode 100644 ironic_inspector/test/unit/test_node_cache.py delete mode 100644 ironic_inspector/test/unit/test_plugins_base.py delete mode 100644 ironic_inspector/test/unit/test_plugins_capabilities.py delete mode 100644 ironic_inspector/test/unit/test_plugins_discovery.py delete mode 100644 ironic_inspector/test/unit/test_plugins_extra_hardware.py delete mode 100644 ironic_inspector/test/unit/test_plugins_lldp_basic.py delete mode 100644 ironic_inspector/test/unit/test_plugins_local_link_connection.py delete mode 100644 ironic_inspector/test/unit/test_plugins_pci_devices.py delete mode 100644 ironic_inspector/test/unit/test_plugins_raid_device.py delete mode 100644 ironic_inspector/test/unit/test_plugins_rules.py delete mode 100644 ironic_inspector/test/unit/test_plugins_standard.py delete mode 100644 ironic_inspector/test/unit/test_process.py delete mode 100644 ironic_inspector/test/unit/test_pxe_filter.py delete mode 100644 ironic_inspector/test/unit/test_rules.py delete mode 100644 ironic_inspector/test/unit/test_swift.py delete mode 100644 ironic_inspector/test/unit/test_utils.py delete mode 100644 ironic_inspector/test/unit/test_wsgi_service.py delete mode 100644 ironic_inspector/utils.py delete mode 100644 ironic_inspector/version.py delete mode 100644 ironic_inspector/wsgi_service.py delete mode 100644 plugin-requirements.txt delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yaml delete mode 100644 releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml delete mode 100644 releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yaml delete mode 100644 releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml delete mode 100644 releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml delete mode 100644 releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml delete mode 100644 releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml delete mode 100644 releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml delete mode 100644 releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml delete mode 100644 releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yaml delete mode 100644 releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yaml delete mode 100644 releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml delete mode 100644 releasenotes/notes/add-transition-starting-error-on-timeout-904aeeeb319ecb2b.yaml delete mode 100644 releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml delete mode 100644 releasenotes/notes/capabilities-15cc2268d661f0a0.yaml delete mode 100644 releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yaml delete mode 100644 releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml delete mode 100644 releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml delete mode 100644 releasenotes/notes/contains-matches-ee28958b08995494.yaml delete mode 100644 releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml delete mode 100644 releasenotes/notes/cors-5f345c65da7f5c99.yaml delete mode 100644 releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml delete mode 100644 releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml delete mode 100644 releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml delete mode 100644 releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml delete mode 100644 releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml delete mode 100644 releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml delete mode 100644 releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml delete mode 100644 releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml delete mode 100644 releasenotes/notes/empty-condition-abc707b771be6be3.yaml delete mode 100644 releasenotes/notes/enroll-hook-d8c32eba70848210.yaml delete mode 100644 releasenotes/notes/extend-rules-9a9d38701e970611.yaml delete mode 100644 releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml delete mode 100644 releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml delete mode 100644 releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml delete mode 100644 releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml delete mode 100644 releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml delete mode 100644 releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml delete mode 100644 releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml delete mode 100644 releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml delete mode 100644 releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml delete mode 100644 releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml delete mode 100644 releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml delete mode 100644 releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml delete mode 100644 releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml delete mode 100644 releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml delete mode 100644 releasenotes/notes/hook-deps-83a867c7af0300e4.yaml delete mode 100644 releasenotes/notes/infiniband-support-960d6846e326dec4.yaml delete mode 100644 releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yaml delete mode 100644 releasenotes/notes/introspection-state-03538fac198882b6.yaml delete mode 100644 releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml delete mode 100644 releasenotes/notes/ipa-support-7eea800306829a49.yaml delete mode 100644 releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml delete mode 100644 releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml delete mode 100644 releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml delete mode 100644 releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml delete mode 100644 releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml delete mode 100644 releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml delete mode 100644 releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml delete mode 100644 releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml delete mode 100644 releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml delete mode 100644 releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml delete mode 100644 releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml delete mode 100644 releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml delete mode 100644 releasenotes/notes/names-82d9f84153a228ec.yaml delete mode 100644 releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml delete mode 100644 releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml delete mode 100644 releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml delete mode 100644 releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml delete mode 100644 releasenotes/notes/no-rollback-e15bc7fee0134545.yaml delete mode 100644 releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml delete mode 100644 releasenotes/notes/node-locking-4d135ca5b93524b1.yaml delete mode 100644 releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml delete mode 100644 releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml delete mode 100644 releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml delete mode 100644 releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml delete mode 100644 releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml delete mode 100644 releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml delete mode 100644 releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml delete mode 100644 releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml delete mode 100644 releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml delete mode 100644 releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml delete mode 100644 releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml delete mode 100644 releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml delete mode 100644 releasenotes/notes/rules-invert-2585173a11db3c31.yaml delete mode 100644 releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml delete mode 100644 releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml delete mode 100644 releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml delete mode 100644 releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/liberty.rst delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 rootwrap.conf delete mode 100644 rootwrap.d/ironic-inspector-firewall.filters delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/states_to_dot.py delete mode 100755 tools/test-setup.sh delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 82f0112..0000000 --- a/.gitignore +++ /dev/null @@ -1,42 +0,0 @@ -# Compiled files -*.py[co] -*.a -*.o -*.so - -# Sphinx -_build -doc/source/contributor/api/ - -# release notes build -releasenotes/build - -# Packages/installer info -*.egg -*.egg-info -dist -build -eggs -parts -var -sdist -develop-eggs -.installed.cfg -.eggs/ - -# Other -*.DS_Store -.idea -.testrepository -.tox -.venv -.*.swp -.coverage -cover -AUTHORS -ChangeLog -*.sqlite -*~ - -# Vagrant -.vagrant \ No newline at end of file diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 20ee601..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/ironic-inspector.git diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index bdeda71..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,367 +0,0 @@ -================= -How To Contribute -================= - -Basics -~~~~~~ - -* Our source code is hosted on `OpenStack GitHub`_, but please do not send pull - requests there. - -* Please follow usual OpenStack `Gerrit Workflow`_ to submit a patch. - -* Update change log in README.rst on any significant change. - -* It goes without saying that any code change should by accompanied by unit - tests. - -* Note the branch you're proposing changes to. ``master`` is the current focus - of development, use ``stable/VERSION`` for proposing an urgent fix, where - ``VERSION`` is the current stable series. E.g. at the moment of writing the - stable branch is ``stable/1.0``. - -* Please file a launchpad_ blueprint for any significant code change and a bug - for any significant bug fix. - -.. _OpenStack GitHub: https://github.com/openstack/ironic-inspector -.. _Gerrit Workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow -.. _launchpad: https://bugs.launchpad.net/ironic-inspector - -Development Environment -~~~~~~~~~~~~~~~~~~~~~~~ - -First of all, install *tox* utility. It's likely to be in your distribution -repositories under name of ``python-tox``. Alternatively, you can install it -from PyPI. - -Next checkout and create environments:: - - git clone https://github.com/openstack/ironic-inspector.git - cd ironic-inspector - tox - -Repeat *tox* command each time you need to run tests. If you don't have Python -interpreter of one of supported versions (currently 2.7 and 3.4), use -``-e`` flag to select only some environments, e.g. - -:: - - tox -e py27 - -.. note:: - Support for Python 3 is highly experimental, stay with Python 2 for the - production environment for now. - -.. note:: - This command also runs tests for database migrations. By default the sqlite - backend is used. For testing with mysql or postgresql, you need to set up - a db named 'openstack_citest' with user 'openstack_citest' and password - 'openstack_citest' on localhost. Use the script - ``tools/test_setup.sh`` to set the database up the same way as - done in the OpenStack CI environment. - -.. note:: - Users of Fedora <= 23 will need to run "sudo dnf --releasever=24 update - python-virtualenv" to run unit tests - -To run the functional tests, use:: - - tox -e func - -Once you have added new state or transition into inspection state machine, you -should regenerate :ref:`State machine diagram ` with:: - - tox -e genstates - -Run the service with:: - - .tox/py27/bin/ironic-inspector --config-file example.conf - -Of course you may have to modify ``example.conf`` to match your OpenStack -environment. - -You can develop and test **ironic-inspector** using DevStack - see -`Deploying Ironic Inspector with DevStack`_ for the current status. - -Deploying Ironic Inspector with DevStack -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`DevStack `_ provides a way to -quickly build a full OpenStack development environment with requested -components. There is a plugin for installing **ironic-inspector** in DevStack. -Installing **ironic-inspector** requires a machine running Ubuntu 14.04 (or -later) or Fedora 23 (or later). Make sure this machine is fully up to date and -has the latest packages installed before beginning this process. - -Download DevStack:: - - git clone https://git.openstack.org/openstack-dev/devstack.git - cd devstack - - -Create ``local.conf`` file with minimal settings required to -enable both the **ironic** and the **ironic-inspector**. You can start with the -`Example local.conf`_ and extend it as needed. - - -Example local.conf ------------------- - -.. literalinclude:: ../../../devstack/example.local.conf - - -Notes ------ - -* Set IRONIC_INSPECTOR_BUILD_RAMDISK to True if you want to build ramdisk. - Default value is False and ramdisk will be downloaded instead of building. - -* 1024 MiB of RAM is a minimum required for the default build of IPA based on - CoreOS. If you plan to use another operating system and build IPA with - diskimage-builder 2048 MiB is recommended. - -* Network configuration is pretty sensitive, better not to touch it - without deep understanding. - -* This configuration disables **horizon**, **heat**, **cinder** and - **tempest**, adjust it if you need these services. - -Start the install:: - - ./stack.sh - -Usage ------ - -After installation is complete, you can source ``openrc`` in your shell, and -then use the OpenStack CLI to manage your DevStack:: - - source openrc admin demo - -Show DevStack screens:: - - screen -x stack - -To exit screen, hit ``CTRL-a d``. - -List baremetal nodes:: - - openstack baremetal node list - -Bring the node to manageable state:: - - openstack baremetal node manage - -Inspect the node:: - - openstack baremetal node inspect - -.. note:: - The deploy driver used must support the inspect interface. See also the - `Ironic Python Agent - `_. - -A node can also be inspected using the following command. However, this will -not affect the provision state of the node:: - - openstack baremetal introspection start - -Check inspection status:: - - openstack baremetal introspection status - -Optionally, get the inspection data:: - - openstack baremetal introspection data save - - -Writing a Plugin -~~~~~~~~~~~~~~~~ - -* **ironic-inspector** allows you to hook code into the data processing chain - after introspection. Inherit ``ProcessingHook`` class defined in - ironic_inspector.plugins.base_ module and overwrite any or both of - the following methods: - - ``before_processing(introspection_data,**)`` - called before any data processing, providing the raw data. Each plugin in - the chain can modify the data, so order in which plugins are loaded - matters here. Returns nothing. - ``before_update(introspection_data,node_info,**)`` - called after node is found and ports are created, but before data is - updated on a node. Please refer to the docstring for details - and examples. - - You can optionally define the following attribute: - - ``dependencies`` - a list of entry point names of the hooks this hook depends on. These - hooks are expected to be enabled before the current hook. - - Make your plugin a setuptools entry point under - ``ironic_inspector.hooks.processing`` namespace and enable it in the - configuration file (``processing.processing_hooks`` option). - -* **ironic-inspector** allows plugins to override the action when node is not - found in node cache. Write a callable with the following signature: - - ``(introspection_data,**)`` - called when node is not found in cache, providing the processed data. - Should return a ``NodeInfo`` class instance. - - Make your plugin a setuptools entry point under - ``ironic_inspector.hooks.node_not_found`` namespace and enable it in the - configuration file (``processing.node_not_found_hook`` option). - -* **ironic-inspector** allows more condition types to be added for - `Introspection Rules`_. Inherit ``RuleConditionPlugin`` class defined in - ironic_inspector.plugins.base_ module and overwrite at least the following - method: - - ``check(node_info,field,params,**)`` - called to check that condition holds for a given field. Field value is - provided as ``field`` argument, ``params`` is a dictionary defined - at the time of condition creation. Returns boolean value. - - The following methods and attributes may also be overridden: - - ``validate(params,**)`` - called to validate parameters provided during condition creating. - Default implementation requires keys listed in ``REQUIRED_PARAMS`` (and - only them). - - ``REQUIRED_PARAMS`` - contains set of required parameters used in the default implementation - of ``validate`` method, defaults to ``value`` parameter. - - ``ALLOW_NONE`` - if it's set to ``True``, missing fields will be passed as ``None`` - values instead of failing the condition. Defaults to ``False``. - - Make your plugin a setuptools entry point under - ``ironic_inspector.rules.conditions`` namespace. - -* **ironic-inspector** allows more action types to be added for `Introspection - Rules`_. Inherit ``RuleActionPlugin`` class defined in - ironic_inspector.plugins.base_ module and overwrite at least the following - method: - - ``apply(node_info,params,**)`` - called to apply the action. - - The following methods and attributes may also be overridden: - - ``validate(params,**)`` - called to validate parameters provided during actions creating. - Default implementation requires keys listed in ``REQUIRED_PARAMS`` (and - only them). - - ``REQUIRED_PARAMS`` - contains set of required parameters used in the default implementation - of ``validate`` method, defaults to no parameters. - - Make your plugin a setuptools entry point under - ``ironic_inspector.rules.conditions`` namespace. - -.. note:: - ``**`` argument is needed so that we can add optional arguments without - breaking out-of-tree plugins. Please make sure to include and ignore it. - -.. _ironic_inspector.plugins.base: http://docs.openstack.org/developer/ironic-inspector/api/ironic_inspector.plugins.base.html -.. _Introspection Rules: http://docs.openstack.org/developer/ironic-inspector/usage.html#introspection-rules - -Making changes to the database -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In order to make a change to the ironic-inspector database you must update the -database models found in ironic_inspector.db_ and then create a migration to -reflect that change. - -There are two ways to create a migration which are described below, both of -these generate a new migration file. In this file there is only one function: - -* ``upgrade`` - The function to run when - ``ironic-inspector-dbsync upgrade`` is run, and should be populated with - code to bring the database up to its new state from the state it was in - after the last migration. - -For further information on creating a migration, refer to -`Create a Migration Script`_ from the alembic documentation. - -Autogenerate ------------- - -This is the simplest way to create a migration. Alembic will compare the models -to an up to date database, and then attempt to write a migration based on the -differences. This should generate correct migrations in most cases however -there are some cases when it can not detect some changes and may require -manual modification, see `What does Autogenerate Detect (and what does it not -detect?)`_ from the alembic documentation. - -:: - - ironic-inspector-dbsync upgrade - ironic-inspector-dbsync revision -m "A short description" --autogenerate - -Manual ------- - -This will generate an empty migration file, with the correct revision -information already included. However the upgrade function is left empty -and must be manually populated in order to perform the correct actions on -the database:: - - ironic-inspector-dbsync revision -m "A short description" - -.. _Create a Migration Script: http://alembic.zzzcomputing.com/en/latest/tutorial.html#create-a-migration-script -.. _ironic_inspector.db: http://docs.openstack.org/developer/ironic-inspector/api/ironic_inspector.db.html -.. _What does Autogenerate Detect (and what does it not detect?): http://alembic.zzzcomputing.com/en/latest/autogenerate.html#what-does-autogenerate-detect-and-what-does-it-not-detect - -Implementing PXE Filter Drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Background ----------- - -**inspector** in-band introspection PXE-boots the Ironic Python Agent "live" -image, to inspect the baremetal server. **ironic** also PXE-boots IPA to -perform tasks on a node, such as deploying an image. **ironic** uses -**neutron** to provide DHCP, however **neutron** does not provide DHCP for -unknown MAC addresses so **inspector** has to use its own DHCP/TFTP stack for -discovery and inspection. - -When **ironic** and **inspector** are operating in the same L2 network, there -is a potential for the two DHCPs to race, which could result in a node being -deployed by **ironic** being PXE booted by **inspector**. - -To prevent DHCP races between the **inspector** DHCP and **ironic** DHCP, -**inspector** has to be able to filter which nodes can get a DHCP lease from -the **inspector** DHCP server. These filters can then be used to prevent -node's enrolled in **ironic** inventory from being PXE-booted unless they are -explicitly moved into the ``inspected`` state. - -Filter Interface ----------------- - -.. py:currentmodule:: ironic_inspector.pxe_filter.interface - -The contract between **inspector** and a PXE filter driver is described in the -:class:`FilterDriver` interface. The methods a driver has to implement are: - -* :meth:`~FilterDriver.init_filter` called on the service start to initialize - internal driver state - -* :meth:`~FilterDriver.sync` called both periodically and when a node starts or - finishes introspection to white or blacklist its ports MAC addresses in the - driver - -* :meth:`~FilterDriver.tear_down_filter` called on service exit to reset the - internal driver state - -.. py:currentmodule:: ironic_inspector.pxe_filter.base - -The driver-specific configuration is suggested to be parsed during -instantiation. There's also a convenience generic interface implementation -:class:`BaseFilter` that provides base locking and initialization -implementation. If required, a driver can opt-out from the periodic -synchronization by overriding the :meth:`~BaseFilter.get_periodic_sync_task`. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d645695..0000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README b/README new file mode 100644 index 0000000..8fcd2b2 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 694ecbd..0000000 --- a/README.rst +++ /dev/null @@ -1,37 +0,0 @@ -=============================================== -Hardware introspection for OpenStack Bare Metal -=============================================== - -Introduction -============ - -.. image:: http://governance.openstack.org/badges/ironic-inspector.svg - :target: http://governance.openstack.org/reference/tags/index.html - -This is an auxiliary service for discovering hardware properties for a -node managed by `Ironic`_. Hardware introspection or hardware -properties discovery is a process of getting hardware parameters required for -scheduling from a bare metal node, given it's power management credentials -(e.g. IPMI address, user name and password). - -* Free software: Apache license -* Source: http://git.openstack.org/cgit/openstack/ironic-inspector -* Bugs: http://bugs.launchpad.net/ironic-inspector -* Downloads: https://pypi.python.org/pypi/ironic-inspector -* Documentation: http://docs.openstack.org/developer/ironic-inspector -* Python client library and CLI tool: `python-ironic-inspector-client - `_ - (`documentation - `_). - -.. _Ironic: https://wiki.openstack.org/wiki/Ironic - -.. note:: - **ironic-inspector** was called *ironic-discoverd* before version 2.0.0. - -Release Notes -============= - -For information on any current or prior version, see `the release notes`_. - -.. _the release notes: http://docs.openstack.org/releasenotes/ironic-inspector/ diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab8..0000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/config-generator.conf b/config-generator.conf deleted file mode 100644 index 1509509..0000000 --- a/config-generator.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -output_file = example.conf -namespace = ironic_inspector -namespace = ironic_inspector.common.ironic -namespace = ironic_inspector.common.swift -namespace = ironic_inspector.plugins.capabilities -namespace = ironic_inspector.plugins.discovery -namespace = ironic_inspector.plugins.pci_devices -namespace = keystonemiddleware.auth_token -namespace = oslo.db -namespace = oslo.log -namespace = oslo.middleware.cors diff --git a/devstack/example.local.conf b/devstack/example.local.conf deleted file mode 100644 index 7ea69ac..0000000 --- a/devstack/example.local.conf +++ /dev/null @@ -1,61 +0,0 @@ -[[local|localrc]] -# Credentials -# Reference: http://docs.openstack.org/developer/devstack/configuration.html -ADMIN_PASSWORD=password -DATABASE_PASSWORD=$ADMIN_PASSWORD -RABBIT_PASSWORD=$ADMIN_PASSWORD -SERVICE_PASSWORD=$ADMIN_PASSWORD -SERVICE_TOKEN=$ADMIN_PASSWORD -SWIFT_HASH=$ADMIN_PASSWORD - -# Enable Neutron which is required by Ironic and disable nova-network. -disable_service n-net n-novnc -enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta - -# Enable Swift for agent_* drivers -enable_service s-proxy s-object s-container s-account - -# Enable Ironic, Ironic Inspector plugins -enable_plugin ironic https://github.com/openstack/ironic -enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector - -# Disable services -disable_service horizon -disable_service heat h-api h-api-cfn h-api-cw h-eng -disable_service cinder c-sch c-api c-vol -disable_service tempest - -# Swift temp URL's are required for agent_* drivers. -SWIFT_ENABLE_TEMPURLS=True - -# Create 2 virtual machines to pose as Ironic's baremetal nodes. -IRONIC_VM_COUNT=2 -IRONIC_VM_SPECS_RAM=1024 -IRONIC_VM_SPECS_DISK=10 -IRONIC_BAREMETAL_BASIC_OPS=True -DEFAULT_INSTANCE_TYPE=baremetal - -# Enable Ironic drivers. -IRONIC_ENABLED_DRIVERS=fake,agent_ipmitool,pxe_ipmitool - -# This driver should be in the enabled list above. -IRONIC_DEPLOY_DRIVER=agent_ipmitool - -IRONIC_BUILD_DEPLOY_RAMDISK=False -IRONIC_INSPECTOR_BUILD_RAMDISK=False - -VIRT_DRIVER=ironic - -TEMPEST_ALLOW_TENANT_ISOLATION=False - -# By default, DevStack creates a 10.0.0.0/24 network for instances. -# If this overlaps with the hosts network, you may adjust with the -# following. -NETWORK_GATEWAY=10.1.0.1 -FIXED_RANGE=10.1.0.0/24 - -# Log all output to files -LOGDAYS=1 -LOGFILE=$HOME/logs/stack.sh.log -SCREEN_LOGDIR=$HOME/logs/screen -IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 0ea8de3..0000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,354 +0,0 @@ -IRONIC_INSPECTOR_DEBUG=${IRONIC_INSPECTOR_DEBUG:-True} -IRONIC_INSPECTOR_DIR=$DEST/ironic-inspector -IRONIC_INSPECTOR_DATA_DIR=$DATA_DIR/ironic-inspector -IRONIC_INSPECTOR_BIN_DIR=$(get_python_exec_prefix) -IRONIC_INSPECTOR_BIN_FILE=$IRONIC_INSPECTOR_BIN_DIR/ironic-inspector -IRONIC_INSPECTOR_DBSYNC_BIN_FILE=$IRONIC_INSPECTOR_BIN_DIR/ironic-inspector-dbsync -IRONIC_INSPECTOR_CONF_DIR=${IRONIC_INSPECTOR_CONF_DIR:-/etc/ironic-inspector} -IRONIC_INSPECTOR_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/inspector.conf -IRONIC_INSPECTOR_CMD="$IRONIC_INSPECTOR_BIN_FILE --config-file $IRONIC_INSPECTOR_CONF_FILE" -IRONIC_INSPECTOR_DHCP_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/dnsmasq.conf -IRONIC_INSPECTOR_ROOTWRAP_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/rootwrap.conf -IRONIC_INSPECTOR_ADMIN_USER=${IRONIC_INSPECTOR_ADMIN_USER:-ironic-inspector} -IRONIC_INSPECTOR_AUTH_CACHE_DIR=${IRONIC_INSPECTOR_AUTH_CACHE_DIR:-/var/cache/ironic-inspector} -IRONIC_INSPECTOR_MANAGE_FIREWALL=$(trueorfalse True IRONIC_INSPECTOR_MANAGE_FIREWALL) -IRONIC_INSPECTOR_HOST=$HOST_IP -IRONIC_INSPECTOR_PORT=5050 -IRONIC_INSPECTOR_URI="http://$IRONIC_INSPECTOR_HOST:$IRONIC_INSPECTOR_PORT" -IRONIC_INSPECTOR_BUILD_RAMDISK=$(trueorfalse False IRONIC_INSPECTOR_BUILD_RAMDISK) -IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz} -IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} -IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs,pci-devices} -IRONIC_INSPECTOR_RAMDISK_LOGDIR=${IRONIC_INSPECTOR_RAMDISK_LOGDIR:-$IRONIC_INSPECTOR_DATA_DIR/ramdisk-logs} -IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS=${IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS:-True} -IRONIC_INSPECTOR_TIMEOUT=${IRONIC_INSPECTOR_TIMEOUT:-600} -IRONIC_INSPECTOR_CLEAN_UP_PERIOD=${IRONIC_INSPECTOR_CLEAN_UP_PERIOD:-} -# These should not overlap with other ranges/networks -IRONIC_INSPECTOR_INTERNAL_IP=${IRONIC_INSPECTOR_INTERNAL_IP:-172.24.42.254} -IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE=${IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE:-24} -IRONIC_INSPECTOR_DHCP_RANGE=${IRONIC_INSPECTOR_DHCP_RANGE:-172.24.42.100,172.24.42.253} -IRONIC_INSPECTOR_INTERFACE=${IRONIC_INSPECTOR_INTERFACE:-br-inspector} -IRONIC_INSPECTOR_INTERFACE_PHYSICAL=$(trueorfalse False IRONIC_INSPECTOR_INTERFACE_PHYSICAL) -IRONIC_INSPECTOR_INTERNAL_URI="http://$IRONIC_INSPECTOR_INTERNAL_IP:$IRONIC_INSPECTOR_PORT" -IRONIC_INSPECTOR_INTERNAL_IP_WITH_NET="$IRONIC_INSPECTOR_INTERNAL_IP/$IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE" -# Whether DevStack will be setup for bare metal or VMs -IRONIC_IS_HARDWARE=$(trueorfalse False IRONIC_IS_HARDWARE) -IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK=${IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK:-""} -IRONIC_INSPECTOR_OVS_PORT=${IRONIC_INSPECTOR_OVS_PORT:-brbm-inspector} - -GITDIR["python-ironic-inspector-client"]=$DEST/python-ironic-inspector-client -GITREPO["python-ironic-inspector-client"]=${IRONIC_INSPECTOR_CLIENT_REPO:-${GIT_BASE}/openstack/python-ironic-inspector-client.git} -GITBRANCH["python-ironic-inspector-client"]=${IRONIC_INSPECTOR_CLIENT_BRANCH:-master} - -### Utilities - -function mkdir_chown_stack { - if [[ ! -d "$1" ]]; then - sudo mkdir -p "$1" - fi - sudo chown $STACK_USER "$1" -} - -function inspector_iniset { - iniset "$IRONIC_INSPECTOR_CONF_FILE" $1 $2 $3 -} - -### Install-start-stop - -function install_inspector { - setup_develop $IRONIC_INSPECTOR_DIR -} - -function install_inspector_dhcp { - install_package dnsmasq -} - -function install_inspector_client { - if use_library_from_git python-ironic-inspector-client; then - git_clone_by_name python-ironic-inspector-client - setup_dev_lib python-ironic-inspector-client - else - pip_install_gr python-ironic-inspector-client - fi -} - -function start_inspector { - run_process ironic-inspector "$IRONIC_INSPECTOR_CMD" -} - -function start_inspector_dhcp { - # NOTE(dtantsur): USE_SYSTEMD requires an absolute path - run_process ironic-inspector-dhcp \ - "$(which dnsmasq) --conf-file=$IRONIC_INSPECTOR_DHCP_CONF_FILE" \ - "" root -} - -function stop_inspector { - stop_process ironic-inspector -} - -function stop_inspector_dhcp { - stop_process ironic-inspector-dhcp -} - -### Configuration - -function prepare_tftp { - IRONIC_INSPECTOR_IMAGE_PATH="$TOP_DIR/files/ironic-inspector" - IRONIC_INSPECTOR_KERNEL_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.kernel" - IRONIC_INSPECTOR_INITRAMFS_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.initramfs" - IRONIC_INSPECTOR_CALLBACK_URI="$IRONIC_INSPECTOR_INTERNAL_URI/v1/continue" - - IRONIC_INSPECTOR_KERNEL_CMDLINE="ipa-inspection-callback-url=$IRONIC_INSPECTOR_CALLBACK_URI systemd.journald.forward_to_console=yes" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE vga=normal console=tty0 console=ttyS0" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-inspection-collectors=$IRONIC_INSPECTOR_COLLECTORS" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-debug=1" - if [[ "$IRONIC_INSPECTOR_BUILD_RAMDISK" == "True" ]]; then - if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - build_ipa_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" - fi - else - # download the agent image tarball - if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - if [ -e "$IRONIC_DEPLOY_KERNEL" -a -e "$IRONIC_DEPLOY_RAMDISK" ]; then - cp $IRONIC_DEPLOY_KERNEL $IRONIC_INSPECTOR_KERNEL_PATH - cp $IRONIC_DEPLOY_RAMDISK $IRONIC_INSPECTOR_INITRAMFS_PATH - else - wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_INSPECTOR_KERNEL_PATH - wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_INSPECTOR_INITRAMFS_PATH - fi - fi - fi - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - cp $IRONIC_INSPECTOR_KERNEL_PATH $IRONIC_HTTP_DIR/ironic-inspector.kernel - cp $IRONIC_INSPECTOR_INITRAMFS_PATH $IRONIC_HTTP_DIR - - cat > "$IRONIC_HTTP_DIR/ironic-inspector.ipxe" < "$IRONIC_TFTPBOOT_DIR/pxelinux.cfg/default" <$tempfile - chmod 0640 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/ironic-inspector-rootwrap - - inspector_iniset DEFAULT rootwrap_config $IRONIC_INSPECTOR_ROOTWRAP_CONF_FILE - - mkdir_chown_stack "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" - inspector_iniset processing ramdisk_logs_dir "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" - inspector_iniset processing always_store_ramdisk_logs "$IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS" - if [ -n "$IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK" ]; then - inspector_iniset processing node_not_found_hook "$IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK" - fi - inspector_iniset DEFAULT timeout $IRONIC_INSPECTOR_TIMEOUT - if [ -n "$IRONIC_INSPECTOR_CLEAN_UP_PERIOD" ]; then - inspector_iniset DEFAULT clean_up_period "$IRONIC_INSPECTOR_CLEAN_UP_PERIOD" - fi - get_or_create_service "ironic-inspector" "baremetal-introspection" "Ironic Inspector baremetal introspection service" - get_or_create_endpoint "baremetal-introspection" "$REGION_NAME" \ - "$IRONIC_INSPECTOR_URI" "$IRONIC_INSPECTOR_URI" "$IRONIC_INSPECTOR_URI" - -} - -function configure_inspector_swift { - inspector_configure_auth_for swift - inspector_iniset processing store_data swift -} - -function configure_inspector_dhcp { - mkdir_chown_stack "$IRONIC_INSPECTOR_CONF_DIR" - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - cat > "$IRONIC_INSPECTOR_DHCP_CONF_FILE" < "$IRONIC_INSPECTOR_DHCP_CONF_FILE" <, which is install_inspector in our case: -# https://github.com/openstack-dev/devstack/blob/dec121114c3ea6f9e515a452700e5015d1e34704/lib/stack#L32 -stack_install_service inspector - -if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then - stack_install_service inspector_dhcp -fi - -$IRONIC_INSPECTOR_DBSYNC_BIN_FILE --config-file $IRONIC_INSPECTOR_CONF_FILE upgrade - -# calls upgrade inspector for specific release -upgrade_project ironic-inspector $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH - - -start_inspector - -if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then - start_inspector_dhcp -fi - -# Don't succeed unless the services come up -ensure_services_started ironic-inspector -ensure_logs_exist ironic-inspector - -if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then - ensure_services_started dnsmasq - ensure_logs_exist ironic-inspector-dhcp -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End $0" -echo "*********************************************************************" diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 5cdf0aa..0000000 --- a/doc/Makefile +++ /dev/null @@ -1,159 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " xml to make Docutils-native XML files" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Heat.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Heat.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Heat" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Heat" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The xml files are in $(BUILDDIR)/xml." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." \ No newline at end of file diff --git a/doc/source/.gitignore b/doc/source/.gitignore deleted file mode 100644 index 8647666..0000000 --- a/doc/source/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -target/ -build/ \ No newline at end of file diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 4cd8458..0000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Administrator Guide -=================== - -How to upgrade Ironic Inspector -------------------------------- - -.. toctree:: - :maxdepth: 2 - - upgrade diff --git a/doc/source/admin/upgrade.rst b/doc/source/admin/upgrade.rst deleted file mode 100644 index 8bdba7c..0000000 --- a/doc/source/admin/upgrade.rst +++ /dev/null @@ -1,28 +0,0 @@ -Upgrade Guide -------------- - -The `release notes `_ -should always be read carefully when upgrading the ironic-inspector service. -Starting with the Mitaka series, specific upgrade steps and considerations are -well-documented in the release notes. - -Upgrades are only supported one series at a time, or within a series. -Only offline (with downtime) upgrades are currently supported. - -When upgrading ironic-inspector, the following steps should always be taken: - -* Update ironic-inspector code, without restarting the service yet. - -* Stop the ironic-inspector service. - -* Run database migrations:: - - ironic-inspector-dbsync --config-file upgrade - -* Start the ironic-inspector service. - -* Upgrade the ironic-python-agent image used for introspection. - -.. note:: - There is no implicit upgrade order between ironic and ironic-inspector, - unless the `release notes`_ say otherwise. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index b40737f..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -# - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - ] -try: - import openstackdocstheme - extensions.append('openstackdocstheme') -except ImportError: - openstackdocstheme = None - -repository_name = 'openstack/ironic-inspector' -bug_project = 'ironic-inspector' -bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -wsme_protocols = ['restjson'] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Ironic Inspector' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -#from ironic import version as ironic_version -# The full version, including alpha/beta/rc tags. -#release = ironic_version.version_info.release_string() -# The short X.Y version. -#version = ironic_version.version_info.version_string() - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['ironic.'] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# NOTE(cinerama): mock out nova modules so docs can build without warnings -#import mock -#import sys -#MOCK_MODULES = ['nova', 'nova.compute', 'nova.context'] -#for module in MOCK_MODULES: -# sys.modules[module] = mock.Mock() - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -if openstackdocstheme is not None: - html_theme = 'openstackdocs' -else: - html_theme = 'default' -#html_theme_path = ["."] -#html_theme = '_theme' -#html_static_path = ['_static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ( - 'index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', - 'manual' - ), -] - -# -- Options for seqdiag ------------------------------------------------------ - -seqdiag_html_image_format = "SVG" diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index ac9f221..0000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _contributing_link: - -.. include:: ../../../CONTRIBUTING.rst - -Python API -~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - api/autoindex diff --git a/doc/source/images/states.svg b/doc/source/images/states.svg deleted file mode 100644 index b2a70a2..0000000 --- a/doc/source/images/states.svg +++ /dev/null @@ -1,230 +0,0 @@ - - - - - - -Ironic Inspector states - - - -enrolling - -enrolling - - - -error - -error - - - -enrolling->error - - -error - - - -enrolling->error - - -timeout - - - -processing - -processing - - - -enrolling->processing - - -process - - - -error->error - - -abort - - - -error->error - - -error - - - -reapplying - -reapplying - - - -error->reapplying - - -reapply - - - -starting - -starting - - - -error->starting - - -start - - - -processing->error - - -error - - - -processing->error - - -timeout - - - -finished - -finished - - - -processing->finished - - -finish - - - -reapplying->error - - -error - - - -reapplying->error - - -timeout - - - -reapplying->reapplying - - -reapply - - - -reapplying->finished - - -finish - - - -starting->error - - -error - - - -starting->error - - -timeout - - - -starting->starting - - -start - - - -waiting - -waiting - - - -starting->waiting - - -wait - - - -finished->reapplying - - -reapply - - - -finished->starting - - -start - - - -finished->finished - - -finish - - - -waiting->error - - -abort - - - -waiting->error - - -timeout - - - -waiting->processing - - -process - - - -waiting->starting - - -start - - - diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index cb600ca..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. include:: ../../README.rst - -Using Ironic Inspector -====================== - -.. toctree:: - :maxdepth: 2 - - install/index - user/index - admin/index - -Contributor Docs -================ - -.. toctree:: - :maxdepth: 2 - - contributor/index - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 37409cc..0000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,361 +0,0 @@ -Install Guide -============= - -Install from PyPI_ (you may want to use virtualenv to isolate your -environment):: - - pip install ironic-inspector - -Also there is a `DevStack `_ -plugin for **ironic-inspector** - see :ref:`contributing_link` for the -current status. - -Finally, some distributions (e.g. Fedora) provide **ironic-inspector** -packaged, some of them - under its old name *ironic-discoverd*. - -There are several projects you can use to set up **ironic-inspector** in -production. `puppet-ironic -`_ provides Puppet -manifests, while `bifrost `_ -provides an Ansible-based standalone installer. Refer to Configuration_ -if you plan on installing **ironic-inspector** manually. - -.. _PyPI: https://pypi.python.org/pypi/ironic-inspector - -.. note:: - Please beware of :ref:`possible DNS issues ` when installing - **ironic-inspector** on Ubuntu. - -Version Support Matrix ----------------------- - -**ironic-inspector** currently requires the Bare Metal API version -``1.11`` to be provided by **ironic**. This version is available starting -with the Liberty release of **ironic**. - -Here is a mapping between the ironic versions and the supported -ironic-inspector versions. The Standalone column shows which -ironic-inspector versions can be used in standalone mode with each -ironic version. The Inspection Interface column shows which -ironic-inspector versions can be used with the inspection interface in -each version of **ironic**. - -============== ============ ==================== -Ironic Version Standalone Inspection Interface -============== ============ ==================== -Juno 1.0 N/A -Kilo 1.0 - 2.2 1.0 - 1.1 -Liberty 1.1 - 2.2.7 2.0 - 2.2.7 -Mitaka 2.3 - 3.X 2.3 - 3.X -Newton 3.3 - 4.X 3.3 - 4.X -Ocata+ 5.0 - 5.X 5.0 - 5.X -============== ============ ==================== - -.. note:: - ``3.X`` means there are no specific plans to deprecate support for this - ironic version. This does not imply that it will be supported forever. - -Configuration -------------- - -Copy ``example.conf`` to some permanent place -(e.g. ``/etc/ironic-inspector/inspector.conf``). -Fill in these minimum configuration values: - -* The ``keystone_authtoken`` section - credentials to use when checking user - authentication. - -* The ``ironic`` section - credentials to use when accessing **ironic** - API. - -* ``connection`` in the ``database`` section - SQLAlchemy connection string - for the database. - -* ``dnsmasq_interface`` in the ``firewall`` section - interface on which - ``dnsmasq`` (or another DHCP service) listens for PXE boot requests - (defaults to ``br-ctlplane`` which is a sane default for **tripleo**-based - installations but is unlikely to work for other cases). - -See comments inside `example.conf -`_ -for other possible configuration options. - -.. note:: - Configuration file contains a password and thus should be owned by ``root`` - and should have access rights like ``0600``. - -Here is an example *inspector.conf* (adapted from a gate run):: - - [DEFAULT] - debug = false - rootwrap_config = /etc/ironic-inspector/rootwrap.conf - - [database] - connection = mysql+pymysql://root:@127.0.0.1/ironic_inspector?charset=utf8 - - [firewall] - dnsmasq_interface = br-ctlplane - - [ironic] - os_region = RegionOne - project_name = service - password = - username = ironic-inspector - auth_url = http://127.0.0.1/identity - auth_type = password - - [keystone_authtoken] - auth_uri = http://127.0.0.1/identity - project_name = service - password = - username = ironic-inspector - auth_url = http://127.0.0.1/identity_v2_admin - auth_type = password - - [processing] - ramdisk_logs_dir = /var/log/ironic-inspector/ramdisk - store_data = swift - - [swift] - os_region = RegionOne - project_name = service - password = - username = ironic-inspector - auth_url = http://127.0.0.1/identity - auth_type = password - -.. note:: - Set ``debug = true`` if you want to see complete logs. - -**ironic-inspector** requires root rights for managing ``iptables``. It -gets them by running ``ironic-inspector-rootwrap`` utility with ``sudo``. -To allow it, copy file ``rootwrap.conf`` and directory ``rootwrap.d`` to the -configuration directory (e.g. ``/etc/ironic-inspector/``) and create file -``/etc/sudoers.d/ironic-inspector-rootwrap`` with the following content:: - - Defaults:stack !requiretty - stack ALL=(root) NOPASSWD: /usr/bin/ironic-inspector-rootwrap /etc/ironic-inspector/rootwrap.conf * - -.. DANGER:: - Be very careful about typos in ``/etc/sudoers.d/ironic-inspector-rootwrap`` - as any typo will break sudo for **ALL** users on the system. Especially, - make sure there is a new line at the end of this file. - -.. note:: - ``rootwrap.conf`` and all files in ``rootwrap.d`` must be writeable - only by root. - -.. note:: - If you store ``rootwrap.d`` in a different location, make sure to update - the *filters_path* option in ``rootwrap.conf`` to reflect the change. - - If your ``rootwrap.conf`` is in a different location, then you need - to update the *rootwrap_config* option in ``ironic-inspector.conf`` - to point to that location. - -Replace ``stack`` with whatever user you'll be using to run -**ironic-inspector**. - -Configuring IPA -~~~~~~~~~~~~~~~ - -ironic-python-agent_ is a ramdisk developed for **ironic** and support -for **ironic-inspector** was added during the Liberty cycle. This is the -default ramdisk starting with the Mitaka release. - -.. note:: - You need at least 1.5 GiB of RAM on the machines to use IPA built with - diskimage-builder_ and at least 384 MiB to use the *TinyIPA*. - -To build an **ironic-python-agent** ramdisk, do the following: - -* Get the new enough version of diskimage-builder_:: - - sudo pip install -U "diskimage-builder>=1.1.2" - -* Build the ramdisk:: - - disk-image-create ironic-agent fedora -o ironic-agent - - .. note:: - Replace "fedora" with your distribution of choice. - -* Use the resulting files ``ironic-agent.kernel`` and - ``ironic-agent.initramfs`` in the following instructions to set PXE or iPXE. - -Alternatively, you can download a `prebuilt TinyIPA image -`_ or use -the `other builders -`_. - -.. _diskimage-builder: https://docs.openstack.org/developer/diskimage-builder/ -.. _ironic-python-agent: https://docs.openstack.org/developer/ironic-python-agent/ - -Configuring PXE -~~~~~~~~~~~~~~~ - -For the PXE boot environment, you'll need: - -* TFTP server running and accessible (see below for using *dnsmasq*). - Ensure ``pxelinux.0`` is present in the TFTP root. - - Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` to the TFTP - root as well. - -* Next, setup ``$TFTPROOT/pxelinux.cfg/default`` as follows:: - - default introspect - - label introspect - kernel ironic-agent.kernel - append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue systemd.journald.forward_to_console=yes - - ipappend 3 - - Replace ``{IP}`` with IP of the machine (do not use loopback interface, it - will be accessed by ramdisk on a booting machine). - - .. note:: - While ``systemd.journald.forward_to_console=yes`` is not actually - required, it will substantially simplify debugging if something - goes wrong. You can also enable IPA debug logging by appending - ``ipa-debug=1``. - - IPA is pluggable: you can insert introspection plugins called - *collectors* into it. For example, to enable a very handy ``logs`` collector - (sending ramdisk logs to **ironic-inspector**), modify the ``append`` - line in ``$TFTPROOT/pxelinux.cfg/default``:: - - append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue ipa-inspection-collectors=default,logs systemd.journald.forward_to_console=yes - - .. note:: - You probably want to always keep the ``default`` collector, as it provides - the basic information required for introspection. - -* You need PXE boot server (e.g. *dnsmasq*) running on **the same** machine as - **ironic-inspector**. Don't do any firewall configuration: - **ironic-inspector** will handle it for you. In **ironic-inspector** - configuration file set ``dnsmasq_interface`` to the interface your - PXE boot server listens on. Here is an example *dnsmasq.conf*:: - - port=0 - interface={INTERFACE} - bind-interfaces - dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150} - enable-tftp - tftp-root={TFTP ROOT, e.g. /tftpboot} - dhcp-boot=pxelinux.0 - dhcp-sequential-ip - - .. note:: - ``dhcp-sequential-ip`` is used because otherwise a lot of nodes booting - simultaneously cause conflicts - the same IP address is suggested to - several nodes. - -Configuring iPXE -~~~~~~~~~~~~~~~~ - -iPXE allows better scaling as it primarily uses the HTTP protocol instead of -slow and unreliable TFTP. You still need a TFTP server as a fallback for -nodes not supporting iPXE. To use iPXE, you'll need: - -* TFTP server running and accessible (see above for using *dnsmasq*). - Ensure ``undionly.kpxe`` is present in the TFTP root. If any of your nodes - boot with UEFI, you'll also need ``ipxe.efi`` there. - -* You also need an HTTP server capable of serving static files. - Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` there. - -* Create a file called ``inspector.ipxe`` in the HTTP root (you can name and - place it differently, just don't forget to adjust the *dnsmasq.conf* example - below):: - - #!ipxe - - :retry_dhcp - dhcp || goto retry_dhcp - - :retry_boot - imgfree - kernel --timeout 30000 http://{IP}:8088/ironic-agent.kernel ipa-inspection-callback-url=http://{IP}>:5050/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=agent.ramdisk || goto retry_boot - initrd --timeout 30000 http://{IP}:8088/ironic-agent.ramdisk || goto retry_boot - boot - - .. note:: - Older versions of the iPXE ROM tend to misbehave on unreliable network - connection, thus we use the timeout option with retries. - - Just like with PXE, you can customize the list of collectors by appending - the ``ipa-inspector-collectors`` kernel option. For example:: - - ipa-inspection-collectors=default,logs,extra_hardware - -* Just as with PXE, you'll need a PXE boot server. The configuration, however, - will be different. Here is an example *dnsmasq.conf*:: - - port=0 - interface={INTERFACE} - bind-interfaces - dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150} - enable-tftp - tftp-root={TFTP ROOT, e.g. /tftpboot} - dhcp-sequential-ip - dhcp-match=ipxe,175 - dhcp-match=set:efi,option:client-arch,7 - dhcp-boot=tag:ipxe,http://{IP}:8088/inspector.ipxe - dhcp-boot=tag:efi,ipxe.efi - dhcp-boot=undionly.kpxe,localhost.localdomain,{IP} - - First, we configure the same common parameters as with PXE. Then we define - ``ipxe`` and ``efi`` tags. Nodes already supporting iPXE are ordered to - download and execute ``inspector.ipxe``. Nodes without iPXE booted with UEFI - will get ``ipxe.efi`` firmware to execute, while the remaining will get - ``undionly.kpxe``. - -Managing the **ironic-inspector** Database ------------------------------------------- - -**ironic-inspector** provides a command line client for managing its -database. This client can be used for upgrading, and downgrading the database -using `alembic `_ migrations. - -If this is your first time running **ironic-inspector** to migrate the -database, simply run: -:: - - ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade - -If you have previously run a version of **ironic-inspector** earlier than -2.2.0, the safest thing is to delete the existing SQLite database and run -``upgrade`` as shown above. However, if you want to save the existing -database, to ensure your database will work with the migrations, you'll need to -run an extra step before upgrading the database. You only need to do this the -first time running version 2.2.0 or later. - -If you are upgrading from **ironic-inspector** version 2.1.0 or lower: -:: - - ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf stamp --revision 578f84f38d - ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade - -If you are upgrading from a git master install of the **ironic-inspector** -after :ref:`rules ` were introduced: -:: - - ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf stamp --revision d588418040d - ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade - -Other available commands can be discovered by running:: - - ironic-inspector-dbsync --help - -Running -------- - -:: - - ironic-inspector --config-file /etc/ironic-inspector/inspector.conf - -A good starting point for writing your own *systemd* unit should be `one used -in Fedora `_ -(note usage of old name). diff --git a/doc/source/user/http-api.rst b/doc/source/user/http-api.rst deleted file mode 100644 index 4a99f2c..0000000 --- a/doc/source/user/http-api.rst +++ /dev/null @@ -1,392 +0,0 @@ -HTTP API --------- - -.. _http_api: - -By default **ironic-inspector** listens on ``0.0.0.0:5050``, port -can be changed in configuration. Protocol is JSON over HTTP. - -Start Introspection -~~~~~~~~~~~~~~~~~~~ - -``POST /v1/introspection/`` initiate hardware introspection for node -````. All power management configuration for this node needs to be -done prior to calling the endpoint. - -Requires X-Auth-Token header with Keystone token for authentication. - -Response: - -* 202 - accepted introspection request -* 400 - bad request -* 401, 403 - missing or invalid authentication -* 404 - node cannot be found - -Get Introspection Status -~~~~~~~~~~~~~~~~~~~~~~~~ - -``GET /v1/introspection/`` get hardware introspection status. - -Requires X-Auth-Token header with Keystone token for authentication. - -Response: - -* 200 - OK -* 400 - bad request -* 401, 403 - missing or invalid authentication -* 404 - node cannot be found - -Response body: JSON dictionary with keys: - -* ``finished`` (boolean) whether introspection is finished - (``true`` on introspection completion or if it ends because of an error) -* ``state`` state of the introspection -* ``error`` error string or ``null``; ``Canceled by operator`` in - case introspection was aborted -* ``uuid`` node UUID -* ``started_at`` a UTC ISO8601 timestamp -* ``finished_at`` a UTC ISO8601 timestamp or ``null`` -* ``links`` containing a self URL - -Get All Introspection Statuses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``GET /v1/introspection`` get all hardware introspection statuses. - -Requires X-Auth-Token header with Keystone token for authentication. - -Returned status list is sorted by the ``started_at, uuid`` attribute pair, -newer items first, and is paginated with these query string fields: - -* ``marker`` the UUID of the last node returned previously -* ``limit`` default, max: ``CONF.api_max_limit`` - -Response: - -* 200 - OK -* 400 - bad request -* 401, 403 - missing or invalid authentication - -Response body: a JSON object containing a list of status objects:: - - { - 'introspection': [ - { - 'finished': false, - 'state': 'waiting', - 'error': null, - ... - }, - ... - ] - } - -Each status object contains these keys: - -* ``finished`` (boolean) whether introspection is finished - (``true`` on introspection completion or if it ends because of an error) -* ``state`` state of the introspection -* ``error`` error string or ``null``; ``Canceled by operator`` in - case introspection was aborted -* ``uuid`` node UUID -* ``started_at`` an UTC ISO8601 timestamp -* ``finished_at`` an UTC ISO8601 timestamp or ``null`` - - -Abort Running Introspection -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``POST /v1/introspection//abort`` abort running introspection. - -Requires X-Auth-Token header with Keystone token for authentication. - -Response: - -* 202 - accepted -* 400 - bad request -* 401, 403 - missing or invalid authentication -* 404 - node cannot be found -* 409 - inspector has locked this node for processing - - -Get Introspection Data -~~~~~~~~~~~~~~~~~~~~~~ - -``GET /v1/introspection//data`` get stored data from successful -introspection. - -Requires X-Auth-Token header with Keystone token for authentication. - -Response: - -* 200 - OK -* 400 - bad request -* 401, 403 - missing or invalid authentication -* 404 - data cannot be found or data storage not configured - -Response body: JSON dictionary with introspection data - -.. note:: - We do not provide any backward compatibility guarantees regarding the - format and contents of the stored data. Notably, it depends on the ramdisk - used and plugins enabled both in the ramdisk and in inspector itself. - -Reapply introspection on stored data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``POST /v1/introspection//data/unprocessed`` to trigger -introspection on stored unprocessed data. No data is allowed to be -sent along with the request. - -Requires X-Auth-Token header with Keystone token for authentication. -Requires enabling Swift store in processing section of the -configuration file. - -Response: - -* 202 - accepted -* 400 - bad request or store not configured -* 401, 403 - missing or invalid authentication -* 404 - node not found for Node ID -* 409 - inspector locked node for processing - -Introspection Rules -~~~~~~~~~~~~~~~~~~~ - -See :ref:`rules ` for details. - -All these API endpoints require X-Auth-Token header with Keystone token for -authentication. - -* ``POST /v1/rules`` create a new introspection rule. - - Request body: JSON dictionary with keys: - - * ``conditions`` rule conditions, see :ref:`rules ` - * ``actions`` rule actions, see :ref:`rules ` - * ``description`` (optional) human-readable description - * ``uuid`` (optional) rule UUID, autogenerated if missing - - Response - - * 200 - OK for API version < 1.6 - * 201 - OK for API version 1.6 and higher - * 400 - bad request - - Response body: JSON dictionary with introspection rule representation (the - same as above with UUID filled in). - -* ``GET /v1/rules`` list all introspection rules. - - Response - - * 200 - OK - - Response body: JSON dictionary with key ``rules`` - list of short rule - representations. Short rule representation is a JSON dictionary with keys: - - * ``uuid`` rule UUID - * ``description`` human-readable description - * ``links`` list of HTTP links, use one with ``rel=self`` to get the full - rule details - -* ``DELETE /v1/rules`` delete all introspection rules. - - Response - - * 204 - OK - -* ``GET /v1/rules/`` get one introspection rule by its ````. - - Response - - * 200 - OK - * 404 - not found - - Response body: JSON dictionary with introspection rule representation - (see ``POST /v1/rules`` above). - -* ``DELETE /v1/rules/`` delete one introspection rule by its ````. - - Response - - * 204 - OK - * 404 - not found - -Ramdisk Callback -~~~~~~~~~~~~~~~~ - -.. _ramdisk_callback: - -``POST /v1/continue`` internal endpoint for the ramdisk to post back -discovered data. Should not be used for anything other than implementing -the ramdisk. Request body: JSON dictionary with at least these keys: - -* ``inventory`` full `hardware inventory`_ from the ironic-python-agent with at - least the following keys: - - * ``memory`` memory information containing at least key ``physical_mb`` - - physical memory size as reported by dmidecode, - - * ``cpu`` CPU information containing at least keys ``count`` (CPU count) and - ``architecture`` (CPU architecture, e.g. ``x86_64``), - - * ``bmc_address`` IP address of the node's BMC, - - * ``interfaces`` list of dictionaries with the following keys: - - * ``name`` interface name, - - * ``ipv4_address`` IPv4 address of the interface, - - * ``mac_address`` MAC (physical) address of the interface. - - * ``client_id`` InfiniBand Client-ID, for Ethernet is None. - - * ``disks`` list of disk block devices containing at least ``name`` and - ``size`` (in bytes) keys. In case ``disks`` are not provided - **ironic-inspector** assumes that this is a disk-less node. - -* ``root_disk`` default deployment root disk as calculated by the - ironic-python-agent algorithm. - - .. note:: - **ironic-inspector** default plugin ``root_disk_selection`` may change - ``root_disk`` based on root device hints if node specify hints via - properties ``root_device`` key. See `Specifying the disk for deployment - root device hints`_ for more details. - -* ``boot_interface`` MAC address of the NIC that the machine PXE booted from - either in standard format ``11:22:33:44:55:66`` or in *PXELinux* ``BOOTIF`` - format ``01-11-22-33-44-55-66``. Strictly speaking, this key is optional, - but some features will now work as expected, if it is not provided. - -Optionally the following keys might be provided: - -* ``error`` error happened during ramdisk run, interpreted by - ``ramdisk_error`` plugin. - -* ``logs`` base64-encoded logs from the ramdisk. - -.. note:: - This list highly depends on enabled plugins, provided above are - expected keys for the default set of plugins. See - :ref:`plugins ` for details. - -.. note:: - This endpoint is not expected to be versioned, though versioning will work - on it. - -Response: - -* 200 - OK -* 400 - bad request -* 403 - node is not on introspection -* 404 - node cannot be found or multiple nodes found - -Response body: JSON dictionary with ``uuid`` key. - -.. _hardware inventory: http://docs.openstack.org/developer/ironic-python-agent/#hardware-inventory -.. _Specifying the disk for deployment root device hints: - http://docs.openstack.org/project-install-guide/baremetal/draft/advanced.html#specifying-the-disk-for-deployment-root-device-hints - -Error Response -~~~~~~~~~~~~~~ - -If an error happens during request processing, **Ironic Inspector** returns -a response with an appropriate HTTP code set, e.g. 400 for bad request or -404 when something was not found (usually node in cache or node in ironic). -The following JSON body is returned:: - - { - "error": { - "message": "Full error message" - } - } - -This body may be extended in the future to include details that are more error -specific. - -API Versioning -~~~~~~~~~~~~~~ - -The API supports optional API versioning. You can query for minimum and -maximum API version supported by the server. You can also declare required API -version in your requests, so that the server rejects request of unsupported -version. - -.. note:: - Versioning was introduced in **Ironic Inspector 2.1.0**. - -All versions must be supplied as string in form of ``X.Y``, where ``X`` is a -major version and is always ``1`` for now, ``Y`` is a minor version. - -* If ``X-OpenStack-Ironic-Inspector-API-Version`` header is sent with request, - the server will check if it supports this version. HTTP error 406 will be - returned for unsupported API version. - -* All HTTP responses contain - ``X-OpenStack-Ironic-Inspector-API-Minimum-Version`` and - ``X-OpenStack-Ironic-Inspector-API-Maximum-Version`` headers with minimum - and maximum API versions supported by the server. - - .. note:: - Maximum is server API version used by default. - - -API Discovery -~~~~~~~~~~~~~ - -The API supports API discovery. You can query different parts of the API to -discover what other endpoints are avaliable. - -* ``GET /`` List API Versions - - Response: - - * 200 - OK - - Response body: JSON dictionary containing a list of ``versions``, each - version contains: - - * ``status`` Either CURRENT or SUPPORTED - * ``id`` The version identifier - * ``links`` A list of links to this version endpoint containing: - - * ``href`` The URL - * ``rel`` The relationship between the version and the href - -* ``GET /v1`` List API v1 resources - - Response: - - * 200 - OK - - Response body: JSON dictionary containing a list of ``resources``, each - resource contains: - - * ``name`` The name of this resources - * ``links`` A list of link to this resource containing: - - * ``href`` The URL - * ``rel`` The relationship between the resource and the href - -Version History -^^^^^^^^^^^^^^^ - -* **1.0** version of API at the moment of introducing versioning. -* **1.1** adds endpoint to retrieve stored introspection data. -* **1.2** endpoints for manipulating introspection rules. -* **1.3** endpoint for canceling running introspection -* **1.4** endpoint for reapplying the introspection over stored data. -* **1.5** support for Ironic node names. -* **1.6** endpoint for rules creating returns 201 instead of 200 on success. -* **1.7** UUID, started_at, finished_at in the introspection status API. -* **1.8** support for listing all introspection statuses. -* **1.9** de-activate setting IPMI credentials, if IPMI credentials - are requested, API gets HTTP 400 response. -* **1.10** adds node state to the GET /v1/introspection/ and - GET /v1/introspection API response data. -* **1.11** adds invert&multiple fields into rules response data -* **1.12** this version indicates that support for setting IPMI credentials - was completely removed from API (all versions). diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index 260076b..0000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -User Guide -========== - -How Ironic Inspector Works --------------------------- - -.. toctree:: - :maxdepth: 2 - - workflow - -How to use Ironic Inspector ---------------------------- - -.. toctree:: - :maxdepth: 2 - - usage - -HTTP API Reference ------------------- - -.. toctree:: - :maxdepth: 2 - - http-api - -Troubleshooting ---------------- - -.. toctree:: - :maxdepth: 2 - - troubleshooting diff --git a/doc/source/user/troubleshooting.rst b/doc/source/user/troubleshooting.rst deleted file mode 100644 index 337564b..0000000 --- a/doc/source/user/troubleshooting.rst +++ /dev/null @@ -1,149 +0,0 @@ -Troubleshooting ---------------- - -Errors when starting introspection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* *Invalid provision state "available"* - - In Kilo release with *python-ironicclient* 0.5.0 or newer Ironic defaults to - reporting provision state ``AVAILABLE`` for newly enrolled nodes. - **ironic-inspector** will refuse to conduct introspection in this state, as - such nodes are supposed to be used by Nova for scheduling. See :ref:`node - states ` for instructions on how to put nodes into the correct - state. - -Introspection times out -~~~~~~~~~~~~~~~~~~~~~~~ - -There may be 3 reasons why introspection can time out after some time -(defaulting to 60 minutes, altered by ``timeout`` configuration option): - -#. Fatal failure in processing chain before node was found in the local cache. - See `Troubleshooting data processing`_ for the hints. - -#. Failure to load the ramdisk on the target node. See `Troubleshooting - PXE boot`_ for the hints. - -#. Failure during ramdisk run. See `Troubleshooting ramdisk run`_ for the - hints. - -Troubleshooting data processing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In this case **ironic-inspector** logs should give a good idea what went wrong. -E.g. for RDO or Fedora the following command will output the full log:: - - sudo journalctl -u openstack-ironic-inspector - -(use ``openstack-ironic-discoverd`` for version < 2.0.0). - -.. note:: - Service name and specific command might be different for other Linux - distributions (and for old version of **ironic-inspector**). - -If ``ramdisk_error`` plugin is enabled and ``ramdisk_logs_dir`` configuration -option is set, **ironic-inspector** will store logs received from the ramdisk -to the ``ramdisk_logs_dir`` directory. This depends, however, on the ramdisk -implementation. - -Troubleshooting PXE boot -^^^^^^^^^^^^^^^^^^^^^^^^ - -PXE booting most often becomes a problem for bare metal environments with -several physical networks. If the hardware vendor provides a remote console -(e.g. iDRAC for DELL), use it to connect to the machine and see what is going -on. You may need to restart introspection. - -Another source of information is DHCP and TFTP server logs. Their location -depends on how the servers were installed and run. For RDO or Fedora use:: - - $ sudo journalctl -u openstack-ironic-inspector-dnsmasq - -(use ``openstack-ironic-discoverd-dnsmasq`` for version < 2.0.0). - -The last resort is ``tcpdump`` utility. Use something like -:: - - $ sudo tcpdump -i any port 67 or port 68 or port 69 - -to watch both DHCP and TFTP traffic going through your machine. Replace -``any`` with a specific network interface to check that DHCP and TFTP -requests really reach it. - -If you see node not attempting PXE boot or attempting PXE boot on the wrong -network, reboot the machine into BIOS settings and make sure that only one -relevant NIC is allowed to PXE boot. - -If you see node attempting PXE boot using the correct NIC but failing, make -sure that: - -#. network switches configuration does not prevent PXE boot requests from - propagating, - -#. there is no additional firewall rules preventing access to port 67 on the - machine where *ironic-inspector* and its DHCP server are installed. - -If you see node receiving DHCP address and then failing to get kernel and/or -ramdisk or to boot them, make sure that: - -#. TFTP server is running and accessible (use ``tftp`` utility to verify), - -#. no firewall rules prevent access to TFTP port, - -#. DHCP server is correctly set to point to the TFTP server, - -#. ``pxelinux.cfg/default`` within TFTP root contains correct reference to the - kernel and ramdisk. - -.. note:: - If using iPXE instead of PXE, check the HTTP server logs and the iPXE - configuration instead. - -Troubleshooting ramdisk run -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -First, check if the ramdisk logs were stored locally as described in the -`Troubleshooting data processing`_ section. If not, ensure that the ramdisk -actually booted as described in the `Troubleshooting PXE boot`_ section. - -Finally, you can try connecting to the IPA ramdisk. If you have any remote -console access to the machine, you can check the logs as they appear on the -screen. Otherwise, you can rebuild the IPA image with your SSH key to be able -to log into it. Use the `dynamic-login`_ or `devuser`_ element for a DIB-based -build or put an authorized_keys file in ``/usr/share/oem/`` for a CoreOS-based -one. - -.. _devuser: http://docs.openstack.org/developer/diskimage-builder/elements/devuser/README.html -.. _dynamic-login: http://docs.openstack.org/developer/diskimage-builder/elements/dynamic-login/README.html - -Troubleshooting DNS issues on Ubuntu -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. _ubuntu-dns: - -Ubuntu uses local DNS caching, so tries localhost for DNS results first -before calling out to an external DNS server. When DNSmasq is installed and -configured for use with ironic-inspector, it can cause problems by interfering -with the local DNS cache. To fix this issue ensure that ``/etc/resolve.conf`` -points to your external DNS servers and not to ``127.0.0.1``. - -On Ubuntu 14.04 this can be done by editing your -``/etc/resolvconf/resolv.conf.d/head`` and adding your nameservers there. -This will ensure they will come up first when ``/etc/resolv.conf`` -is regenerated. - -Running Inspector in a VirtualBox environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default VirtualBox does not expose a DMI table to the guest. This prevents -ironic-inspector from being able to discover the properties of the a node. In -order to run ironic-inspector on a VirtualBox guest the host must be configured -to expose DMI data inside the guest. To do this run the following command on -the VirtualBox host:: - - VBoxManage setextradata {NodeName} "VBoxInternal/Devices/pcbios/0/Config/DmiExposeMemoryTable" 1 - -.. note:: - Replace `{NodeName}` with the name of the guest you wish to expose the DMI - table on. This command will need to be run once per host to enable this - functionality. diff --git a/doc/source/user/usage.rst b/doc/source/user/usage.rst deleted file mode 100644 index 7cccc23..0000000 --- a/doc/source/user/usage.rst +++ /dev/null @@ -1,395 +0,0 @@ -Usage ------ - -.. _usage_guide: - -Refer to :ref:`api ` for information on the HTTP API. -Refer to the `client documentation`_ for information on how to use CLI and -Python library. - -.. _client documentation: http://docs.openstack.org/developer/python-ironic-inspector-client - -Using from Ironic API -~~~~~~~~~~~~~~~~~~~~~ - -Ironic Kilo introduced support for hardware introspection under name of -"inspection". **ironic-inspector** introspection is supported for some generic -drivers, please refer to `Ironic inspection documentation`_ for details. - -.. _Ironic inspection documentation: http://docs.openstack.org/developer/ironic/deploy/inspection.html - -Node States -~~~~~~~~~~~ - -.. _node_states: - -* The nodes should be moved to ``MANAGEABLE`` provision state before - introspection (requires *python-ironicclient* of version 0.5.0 or newer):: - - ironic node-set-provision-state manage - -* After successful introspection and before deploying nodes should be made - available to Nova, by moving them to ``AVAILABLE`` state:: - - ironic node-set-provision-state provide - - .. note:: - Due to how Nova interacts with Ironic driver, you should wait 1 minute - before Nova becomes aware of available nodes after issuing this command. - Use ``nova hypervisor-stats`` command output to check it. - -Introspection Rules -~~~~~~~~~~~~~~~~~~~ - -.. _introspection_rules: - -Inspector supports a simple JSON-based DSL to define rules to run during -introspection. Inspector provides an API to manage such rules, and will run -them automatically after running all processing hooks. - -A rule consists of conditions to check, and actions to run. If conditions -evaluate to true on the introspection data, then actions are run on a node. - -Available conditions and actions are defined by plugins, and can be extended, -see :ref:`contributing_link` for details. See :ref:`api ` for -specific calls to define introspection rules. - -Conditions -^^^^^^^^^^ - -A condition is represented by an object with fields: - -``op`` the type of comparison operation, default available operators include: - -* ``eq``, ``le``, ``ge``, ``ne``, ``lt``, ``gt`` - basic comparison operators; - -* ``in-net`` - checks that an IP address is in a given network; - -* ``matches`` - requires a full match against a given regular expression; - -* ``contains`` - requires a value to contain a given regular expression; - -* ``is-empty`` - checks that field is an empty string, list, dict or - None value. - -``field`` a `JSON path `_ to the field -in the introspection data to use in comparison. - -Starting with the Mitaka release, you can also apply conditions to ironic node -field. Prefix field with schema (``data://`` or ``node://``) to distinguish -between values from introspection data and node. Both schemes use JSON path:: - - {"field": "node://property.path", "op": "eq", "value": "val"} - {"field": "data://introspection.path", "op": "eq", "value": "val"} - -if scheme (node or data) is missing, condition compares data with -introspection data. - -``invert`` boolean value, whether to invert the result of the comparison. - -``multiple`` how to treat situations where the ``field`` query returns multiple -results (e.g. the field contains a list), available options are: - -* ``any`` (the default) require any to match, -* ``all`` require all to match, -* ``first`` requrie the first to match. - -All other fields are passed to the condition plugin, e.g. numeric comparison -operations require a ``value`` field to compare against. - -Actions -^^^^^^^ - -An action is represented by an object with fields: - -``action`` type of action. Possible values are defined by plugins. - -All other fields are passed to the action plugin. - -Default available actions include: - -* ``fail`` fail introspection. Requires a ``message`` parameter for the failure - message. - -* ``set-attribute`` sets an attribute on an Ironic node. Requires a ``path`` - field, which is the path to the attribute as used by ironic (e.g. - ``/properties/something``), and a ``value`` to set. - -* ``set-capability`` sets a capability on an Ironic node. Requires ``name`` - and ``value`` fields, which are the name and the value for a new capability - accordingly. Existing value for this same capability is replaced. - -* ``extend-attribute`` the same as ``set-attribute``, but treats existing - value as a list and appends value to it. If optional ``unique`` parameter is - set to ``True``, nothing will be added if given value is already in a list. - -Starting from Mitaka release, ``value`` field in actions supports fetching data -from introspection, it's using `python string formatting notation -`_ :: - - {"action": "set-attribute", "path": "/driver_info/ipmi_address", - "value": "{data[inventory][bmc_address]}"} - -Plugins -~~~~~~~ - -.. _introspection_plugins: - -**ironic-inspector** heavily relies on plugins for data processing. Even the -standard functionality is largely based on plugins. Set ``processing_hooks`` -option in the configuration file to change the set of plugins to be run on -introspection data. Note that order does matter in this option, especially -for hooks that have dependencies on other hooks. - -These are plugins that are enabled by default and should not be disabled, -unless you understand what you're doing: - -``scheduler`` - validates and updates basic hardware scheduling properties: CPU number and - architecture, memory and disk size. - - .. note:: - - Diskless nodes have the disk size property ``local_gb == 0``. Always use - node driver ``root_device`` hints to prevent unexpected HW failures - passing silently. - -``validate_interfaces`` validates network interfaces information. Creates new - ports, optionally deletes ports that were not present in the introspection - data. Also sets the ``pxe_enabled`` flag for the PXE-booting port and - unsets it for all the other ports to avoid **nova** picking a random port - to boot the node. - -The following plugins are enabled by default, but can be disabled if not -needed: - -``ramdisk_error`` - reports error, if ``error`` field is set by the ramdisk, also optionally - stores logs from ``logs`` field, see :ref:`api ` for details. -``capabilities`` - detect node capabilities: CPU, boot mode, etc. See `Capabilities - Detection`_ for more details. -``pci_devices`` - gathers the list of all PCI devices returned by the ramdisk and compares to - those defined in ``alias`` field(s) from ``pci_devices`` section of - configuration file. The recognized PCI devices and their count are then - stored in node properties. This information can be later used in nova - flavors for node scheduling. - -Here are some plugins that can be additionally enabled: - -``example`` - example plugin logging it's input and output. -``raid_device`` - gathers block devices from ramdisk and exposes root device in multiple - runs. -``extra_hardware`` - stores the value of the 'data' key returned by the ramdisk as a JSON - encoded string in a Swift object. The plugin will also attempt to convert - the data into a format usable by introspection rules. If this is successful - then the new format will be stored in the 'extra' key. The 'data' key is - then deleted from the introspection data, as unless converted it's assumed - unusable by introspection rules. -``local_link_connection`` - Processes LLDP data returned from inspection specifically looking for the - port ID and chassis ID, if found it configures the local link connection - information on the nodes Ironic ports with that data. To enable LLDP in the - inventory from IPA ``ipa-collect-lldp=1`` should be passed as a kernel - parameter to the IPA ramdisk. In order to avoid processing the raw LLDP - data twice, the ``lldp_basic`` plugin should also be installed and run - prior to this plugin. -``lldp_basic`` - Processes LLDP data returned from inspection and parses TLVs from the - Basic Management (802.1AB), 802.1Q, and 802.3 sets and stores the - processed data back to the Ironic inspector data in Swift. - -Refer to :ref:`contributing_link` for information on how to write your -own plugin. - -Discovery -~~~~~~~~~ - -Starting from Mitaka, **ironic-inspector** is able to register new nodes -in Ironic. - -The existing ``node-not-found-hook`` handles what happens if -**ironic-inspector** receives inspection data from a node it can not identify. -This can happen if a node is manually booted without registering it with -Ironic first. - -For discovery, the configuration file option ``node_not_found_hook`` should be -set to load the hook called ``enroll``. This hook will enroll the unidentified -node into Ironic using the ``fake`` driver (this driver is a configurable -option, set ``enroll_node_driver`` in the **ironic-inspector** configuration -file, to the Ironic driver you want). - -The ``enroll`` hook will also set the ``ipmi_address`` property on the new -node, if its available in the introspection data we received, -see :ref:`ramdisk_callback `. - -Once the ``enroll`` hook is finished, **ironic-inspector** will process the -introspection data in the same way it would for an identified node. It runs -the processing :ref:`plugins `, and after that it runs -introspection rules, which would allow for more customisable node -configuration, see :ref:`rules `. - -A rule to set a node's Ironic driver to the ``agent_ipmitool`` driver and -populate the required driver_info for that driver would look like:: - - [{ - "description": "Set IPMI driver_info if no credentials", - "actions": [ - {"action": "set-attribute", "path": "driver", "value": "agent_ipmitool"}, - {"action": "set-attribute", "path": "driver_info/ipmi_username", - "value": "username"}, - {"action": "set-attribute", "path": "driver_info/ipmi_password", - "value": "password"} - ], - "conditions": [ - {"op": "is-empty", "field": "node://driver_info.ipmi_password"}, - {"op": "is-empty", "field": "node://driver_info.ipmi_username"} - ] - },{ - "description": "Set deploy info if not already set on node", - "actions": [ - {"action": "set-attribute", "path": "driver_info/deploy_kernel", - "value": ""}, - {"action": "set-attribute", "path": "driver_info/deploy_ramdisk", - "value": ""} - ], - "conditions": [ - {"op": "is-empty", "field": "node://driver_info.deploy_ramdisk"}, - {"op": "is-empty", "field": "node://driver_info.deploy_kernel"} - ] - }] - -All nodes discovered and enrolled via the ``enroll`` hook, will contain an -``auto_discovered`` flag in the introspection data, this flag makes it -possible to distinguish between manually enrolled nodes and auto-discovered -nodes in the introspection rules using the rule condition ``eq``:: - - { - "description": "Enroll auto-discovered nodes with fake driver", - "actions": [ - {"action": "set-attribute", "path": "driver", "value": "fake"} - ], - "conditions": [ - {"op": "eq", "field": "data://auto_discovered", "value": true} - ] - } - -Reapplying introspection on stored data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To allow correcting mistakes in introspection rules the API provides -an entry point that triggers the introspection over stored data. The -data to use for processing is kept in Swift separately from the data -already processed. Reapplying introspection overwrites processed data -in the store. Updating the introspection data through the endpoint -isn't supported yet. Following preconditions are checked before -reapplying introspection: - -* no data is being sent along with the request -* Swift store is configured and enabled -* introspection data is stored in Swift for the node UUID -* node record is kept in database for the UUID -* introspection is not ongoing for the node UUID - -Should the preconditions fail an immediate response is given to the -user: - -* ``400`` if the request contained data or in case Swift store is not - enabled in configuration -* ``404`` in case Ironic doesn't keep track of the node UUID -* ``409`` if an introspection is already ongoing for the node - -If the preconditions are met a background task is executed to carry -out the processing and a ``202 Accepted`` response is returned to the -endpoint user. As requested, these steps are performed in the -background task: - -* preprocessing hooks -* post processing hooks, storing result in Swift -* introspection rules - -These steps are avoided, based on the feature requirements: - -* ``node_not_found_hook`` is skipped -* power operations -* roll-back actions done by hooks - -Limitations: - -* there's no way to update the unprocessed data atm. -* the unprocessed data is never cleaned from the store -* check for stored data presence is performed in background; - missing data situation still results in a ``202`` response - -Capabilities Detection -~~~~~~~~~~~~~~~~~~~~~~ - -Starting with the Newton release, **Ironic Inspector** can optionally discover -several node capabilities. A recent (Newton or newer) IPA image is required -for it to work. - -Boot mode -^^^^^^^^^ - -The current boot mode (BIOS or UEFI) can be detected and recorded as -``boot_mode`` capability in Ironic. It will make some drivers to change their -behaviour to account for this capability. Set the ``[capabilities]boot_mode`` -configuration option to ``True`` to enable. - -CPU capabilities -^^^^^^^^^^^^^^^^ - -Several CPU flags are detected by default and recorded as following -capabilities: - -* ``cpu_aes`` AES instructions. - -* ``cpu_vt`` virtualization support. - -* ``cpu_txt`` TXT support. - -* ``cpu_hugepages`` huge pages (2 MiB) support. - -* ``cpu_hugepages_1g`` huge pages (1 GiB) support. - -It is possible to define your own rules for detecting CPU capabilities. -Set the ``[capabilities]cpu_flags`` configuration option to a mapping between -a CPU flag and a capability, for example:: - - cpu_flags = aes:cpu_aes,svm:cpu_vt,vmx:cpu_vt - -See the default value of this option for a more detail example. - -InfiniBand support -^^^^^^^^^^^^^^^^^^ -Starting with the Ocata release, **Ironic Inspector** supports detection of -InfiniBand network interfaces. A recent (Ocata or newer) IPA image is required -for that to work. When an InfiniBand network interface is discovered, the -**Ironic Inspector** adds a ``client-id`` attribute to the ``extra`` attribute -in the ironic port. The **Ironic Inspector** should be configured with -``firewall.ethoib_interfaces`` to indicate the Ethernet Over InfiniBand (EoIB) -which are used for physical access access to the DHCP network. -For example if **Ironic Inspector** DHCP server is using ``br-inspector`` and -the ``br-inspector`` has EoIB port e.g. ``eth0``, -the ``firewall.ethoib_interfaces`` should be set to ``eth0``. -The ``firewall.ethoib_interfaces`` allows to map the baremetal GUID to it's -EoIB MAC based on the neighs files. This is needed for blocking DHCP traffic -of the nodes (MACs) which are not part of the introspection. - -The format of the ``/sys/class/net//eth/neighs`` file:: - - # EMAC= IMAC= - # For example: - IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:26:52 - qp number=97:fe - lid=80:00:00:00:00:00:00 - GUID=7c:fe:90:03:00:29:26:52 - -Example of content:: - - EMAC=02:00:02:97:00:01 IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:26:52 - EMAC=02:00:00:61:00:02 IMAC=61:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:24:4f diff --git a/doc/source/user/workflow.rst b/doc/source/user/workflow.rst deleted file mode 100644 index 0cf5b10..0000000 --- a/doc/source/user/workflow.rst +++ /dev/null @@ -1,83 +0,0 @@ -How Ironic Inspector Works -========================== - -Workflow --------- - -Usual hardware introspection flow is as follows: - -* Operator enrolls nodes into Ironic_ e.g. via ironic CLI command. - Power management credentials should be provided to Ironic at this step. - -* Nodes are put in the correct state for introspection as described in - :ref:`node states `. - -* Operator sends nodes on introspection using **ironic-inspector** API or CLI - (see :ref:`usage `). - -* On receiving node UUID **ironic-inspector**: - - * validates node power credentials, current power and provisioning states, - * allows firewall access to PXE boot service for the nodes, - * issues reboot command for the nodes, so that they boot the ramdisk. - -* The ramdisk collects the required information and posts it back to - **ironic-inspector**. - -* On receiving data from the ramdisk, **ironic-inspector**: - - * validates received data, - * finds the node in Ironic database using it's BMC address (MAC address in - case of SSH driver), - * fills missing node properties with received data and creates missing ports. - - .. note:: - **ironic-inspector** is responsible to create Ironic ports for some or all - NIC's found on the node. **ironic-inspector** is also capable of - deleting ports that should not be present. There are two important - configuration options that affect this behavior: ``add_ports`` and - ``keep_ports`` (please refer to ``example.conf`` for detailed explanation). - - Default values as of **ironic-inspector** 1.1.0 are ``add_ports=pxe``, - ``keep_ports=all``, which means that only one port will be added, which is - associated with NIC the ramdisk PXE booted from. No ports will be deleted. - This setting ensures that deploying on introspected nodes will succeed - despite `Ironic bug 1405131 - `_. - - Ironic inspection feature by default requires different settings: - ``add_ports=all``, ``keep_ports=present``, which means that ports will be - created for all detected NIC's, and all other ports will be deleted. - Refer to the `Ironic inspection documentation`_ for details. - - Ironic inspector can also be configured to not create any ports. This is - done by setting ``add_ports=disabled``. If setting ``add_ports`` to disabled - the ``keep_ports`` option should be also set to ``all``. This will ensure - no manually added ports will be deleted. - -.. _Ironic inspection documentation: http://docs.openstack.org/developer/ironic/deploy/inspection.html - -* Separate API (see :ref:`usage ` and :ref:`api `) can - be used to query introspection results for a given node. - -* Nodes are put in the correct state for deploying as described in - :ref:`node states `. - -Starting DHCP server and configuring PXE boot environment is not part of this -package and should be done separately. - -State machine diagram ---------------------- - -.. _state_machine_diagram: - -The diagram below shows the introspection states that an **ironic-inspector** -FSM goes through during the node introspection, discovery and reprocessing. -The diagram also shows events that trigger state transitions. - -.. figure:: ../images/states.svg - :width: 660px - :align: center - :alt: ironic-inspector state machine diagram - -.. _Ironic: https://wiki.openstack.org/wiki/Ironic diff --git a/example.conf b/example.conf deleted file mode 100644 index 822b3a9..0000000 --- a/example.conf +++ /dev/null @@ -1,915 +0,0 @@ -[DEFAULT] - -# -# From ironic_inspector -# - -# IP to listen on. (string value) -#listen_address = 0.0.0.0 - -# Port to listen on. (port value) -# Minimum value: 0 -# Maximum value: 65535 -#listen_port = 5050 - -# Authentication method used on the ironic-inspector API. Either -# "noauth" or "keystone" are currently valid options. "noauth" will -# disable all authentication. (string value) -# Allowed values: keystone, noauth -#auth_strategy = keystone - -# Timeout after which introspection is considered failed, set to 0 to -# disable. (integer value) -#timeout = 3600 - -# DEPRECATED: For how much time (in seconds) to keep status -# information about nodes after introspection was finished for them. -# Set to 0 (the default) to disable the timeout. (integer value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#node_status_keep_time = 0 - -# Amount of time in seconds, after which repeat clean up of timed out -# nodes and old nodes status information. (integer value) -#clean_up_period = 60 - -# SSL Enabled/Disabled (boolean value) -#use_ssl = false - -# Path to SSL certificate (string value) -#ssl_cert_path = - -# Path to SSL key (string value) -#ssl_key_path = - -# The green thread pool size. (integer value) -# Minimum value: 2 -#max_concurrency = 1000 - -# Delay (in seconds) between two introspections. (integer value) -#introspection_delay = 5 - -# DEPRECATED: Only node with drivers matching this regular expression -# will be affected by introspection_delay setting. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#introspection_delay_drivers = .* - -# Ironic driver_info fields that are equivalent to ipmi_address. (list -# value) -#ipmi_address_fields = ilo_address,drac_host,drac_address,cimc_address - -# Path to the rootwrap configuration file to use for running commands -# as root (string value) -#rootwrap_config = /etc/ironic-inspector/rootwrap.conf - -# Limit the number of elements an API list-call returns (integer -# value) -# Minimum value: 1 -#api_max_limit = 1000 - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of -# the default INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# The name of a logging configuration file. This file is appended to -# any existing logging configuration files. For details about logging -# configuration files, see the Python logging module documentation. -# Note that when logging configuration files are used then all logging -# configuration is set in the configuration file and other logging -# configuration options are ignored (for example, -# logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. -# (string value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default -# is set, logging will go to stderr as defined by use_stderr. This -# option is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. -# This option is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is -# moved or removed this handler will open a new log file with -# specified path instantaneously. It makes sense only if log_file -# option is specified and Linux platform is used. This option is -# ignored if log_config_append is set. (boolean value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and -# will be changed later to honor RFC5424. This option is ignored if -# log_config_append is set. (boolean value) -#use_syslog = false - -# Enable journald for logging. If running in a systemd environment you -# may wish to enable journal support. Doing so will use the journal -# native protocol which includes structured metadata in addition to -# log messages.This option is ignored if log_config_append is set. -# (boolean value) -#use_journal = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if -# log_config_append is set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. -# (string value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the -# message is DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string -# value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is -# ignored if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. -# (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. -# (string value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer -# value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, -# WARNING, DEBUG or empty string. Logs with level greater or equal to -# rate_limit_except_level are not filtered. An empty string means that -# all levels are filtered. (string value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[capabilities] - -# -# From ironic_inspector.plugins.capabilities -# - -# Whether to store the boot mode (BIOS or UEFI). (boolean value) -#boot_mode = false - -# Mapping between a CPU flag and a capability to set if this flag is -# present. (dict value) -#cpu_flags = aes:cpu_aes,pdpe1gb:cpu_hugepages_1g,pse:cpu_hugepages,smx:cpu_txt,svm:cpu_vt,vmx:cpu_vt - - -[cors] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials -# (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to -# HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list -# value) -#allow_methods = GET,POST,PUT,HEAD,PATCH,DELETE,OPTIONS - -# Indicate which header field names may be used during the actual -# request. (list value) -#allow_headers = X-Auth-Token,X-OpenStack-Ironic-Inspector-API-Minimum-Version,X-OpenStack-Ironic-Inspector-API-Maximum-Version,X-OpenStack-Ironic-Inspector-API-Version - - -[cors.subdomain] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials -# (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to -# HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list -# value) -#allow_methods = GET,POST,PUT,HEAD,PATCH,DELETE,OPTIONS - -# Indicate which header field names may be used during the actual -# request. (list value) -#allow_headers = X-Auth-Token,X-OpenStack-Ironic-Inspector-API-Minimum-Version,X-OpenStack-Ironic-Inspector-API-Maximum-Version,X-OpenStack-Ironic-Inspector-API-Version - - -[database] - -# -# From oslo.db -# - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. -# (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave -# database. (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including -# the default, overrides any server-set SQL mode. To use whatever SQL -# mode is set by the server configuration, set this to no value. -# Example: mysql_sql_mode= (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a -# value of 0 indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -# -1 to specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. -# (integer value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer -# value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection -# lost. (boolean value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database -# operation up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries -# of a database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before -# error is raised. Set to -1 to specify an infinite retry count. -# (integer value) -#db_max_retries = 20 - - -[discovery] - -# -# From ironic_inspector.plugins.discovery -# - -# The name of the Ironic driver used by the enroll hook when creating -# a new node in Ironic. (string value) -#enroll_node_driver = fake - - -[firewall] - -# -# From ironic_inspector -# - -# Whether to manage firewall rules for PXE port. (boolean value) -#manage_firewall = true - -# Interface on which dnsmasq listens, the default is for VM's. (string -# value) -#dnsmasq_interface = br-ctlplane - -# Amount of time in seconds, after which repeat periodic update of -# firewall. (integer value) -#firewall_update_period = 15 - -# iptables chain name to use. (string value) -#firewall_chain = ironic-inspector - -# List of Etherent Over InfiniBand interfaces on the Inspector host -# which are used for physical access to the DHCP network. Multiple -# interfaces would be attached to a bond or bridge specified in -# dnsmasq_interface. The MACs of the InfiniBand nodes which are not in -# desired state are going to be blacklisted based on the list of -# neighbor MACs on these interfaces. (list value) -#ethoib_interfaces = - - -[ironic] - -# -# From ironic_inspector.common.ironic -# - -# Authentication URL (string value) -#auth_url = - -# Method to use for authentication: noauth or keystone. (string value) -# Allowed values: keystone, noauth -#auth_strategy = keystone - -# Authentication type to load (string value) -# Deprecated group/name - [ironic]/auth_plugin -#auth_type = - -# PEM encoded Certificate Authority to use when verifying HTTPs -# connections. (string value) -#cafile = - -# PEM encoded client certificate cert file (string value) -#certfile = - -# Optional domain ID to use with v3 and v2 parameters. It will be used -# for both the user and project domain in v3 and ignored in v2 -# authentication. (string value) -#default_domain_id = - -# Optional domain name to use with v3 API and v2 parameters. It will -# be used for both the user and project domain in v3 and ignored in v2 -# authentication. (string value) -#default_domain_name = - -# Domain ID to scope to (string value) -#domain_id = - -# Domain name to scope to (string value) -#domain_name = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# Ironic API URL, used to set Ironic API URL when auth_strategy option -# is noauth to work with standalone Ironic without keystone. (string -# value) -#ironic_url = http://localhost:6385/ - -# PEM encoded client certificate key file (string value) -#keyfile = - -# Maximum number of retries in case of conflict error (HTTP 409). -# (integer value) -#max_retries = 30 - -# Ironic endpoint type. (string value) -#os_endpoint_type = internalURL - -# Keystone region used to get Ironic endpoints. (string value) -#os_region = - -# Ironic service type. (string value) -#os_service_type = baremetal - -# User's password (string value) -#password = - -# Domain ID containing project (string value) -#project_domain_id = - -# Domain name containing project (string value) -#project_domain_name = - -# Project ID to scope to (string value) -# Deprecated group/name - [ironic]/tenant_id -#project_id = - -# Project name to scope to (string value) -# Deprecated group/name - [ironic]/tenant_name -#project_name = - -# Interval between retries in case of conflict error (HTTP 409). -# (integer value) -#retry_interval = 2 - -# Tenant ID (string value) -#tenant_id = - -# Tenant Name (string value) -#tenant_name = - -# Timeout value for http requests (integer value) -#timeout = - -# Trust ID (string value) -#trust_id = - -# User's domain id (string value) -#user_domain_id = - -# User's domain name (string value) -#user_domain_name = - -# User id (string value) -#user_id = - -# Username (string value) -# Deprecated group/name - [ironic]/user_name -#username = - - -[keystone_authtoken] - -# -# From keystonemiddleware.auth_token -# - -# Complete "public" Identity API endpoint. This endpoint should not be -# an "admin" endpoint, as it should be accessible by all end users. -# Unauthenticated clients are redirected to this endpoint to -# authenticate. Although this endpoint should ideally be unversioned, -# client support in the wild varies. If you're using a versioned v2 -# endpoint here, then this should *not* be the same endpoint the -# service user utilizes for validating tokens, because normal end -# users may not be able to reach that endpoint. (string value) -#auth_uri = - -# API version of the admin Identity API endpoint. (string value) -#auth_version = - -# Do not handle authorization requests within the middleware, but -# delegate the authorization decision to downstream WSGI components. -# (boolean value) -#delay_auth_decision = false - -# Request timeout value for communicating with Identity API server. -# (integer value) -#http_connect_timeout = - -# How many times are we trying to reconnect when communicating with -# Identity API Server. (integer value) -#http_request_max_retries = 3 - -# Request environment key where the Swift cache object is stored. When -# auth_token middleware is deployed with a Swift cache, use this -# option to have the middleware share a caching backend with swift. -# Otherwise, use the ``memcached_servers`` option instead. (string -# value) -#cache = - -# Required if identity server requires client certificate (string -# value) -#certfile = - -# Required if identity server requires client certificate (string -# value) -#keyfile = - -# A PEM encoded Certificate Authority to use when verifying HTTPs -# connections. Defaults to system CAs. (string value) -#cafile = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# The region in which the identity server can be found. (string value) -#region_name = - -# DEPRECATED: Directory used to cache files related to PKI tokens. -# This option has been deprecated in the Ocata release and will be -# removed in the P release. (string value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#signing_dir = - -# Optionally specify a list of memcached server(s) to use for caching. -# If left undefined, tokens will instead be cached in-process. (list -# value) -# Deprecated group/name - [keystone_authtoken]/memcache_servers -#memcached_servers = - -# In order to prevent excessive effort spent validating tokens, the -# middleware caches previously-seen tokens for a configurable duration -# (in seconds). Set to -1 to disable caching completely. (integer -# value) -#token_cache_time = 300 - -# DEPRECATED: Determines the frequency at which the list of revoked -# tokens is retrieved from the Identity service (in seconds). A high -# number of revocation events combined with a low cache duration may -# significantly reduce performance. Only valid for PKI tokens. This -# option has been deprecated in the Ocata release and will be removed -# in the P release. (integer value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#revocation_cache_time = 10 - -# (Optional) If defined, indicate whether token data should be -# authenticated or authenticated and encrypted. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token data is -# encrypted and authenticated in the cache. If the value is not one of -# these options or empty, auth_token will raise an exception on -# initialization. (string value) -# Allowed values: None, MAC, ENCRYPT -#memcache_security_strategy = None - -# (Optional, mandatory if memcache_security_strategy is defined) This -# string is used for key derivation. (string value) -#memcache_secret_key = - -# (Optional) Number of seconds memcached server is considered dead -# before it is tried again. (integer value) -#memcache_pool_dead_retry = 300 - -# (Optional) Maximum total number of open connections to every -# memcached server. (integer value) -#memcache_pool_maxsize = 10 - -# (Optional) Socket timeout in seconds for communicating with a -# memcached server. (integer value) -#memcache_pool_socket_timeout = 3 - -# (Optional) Number of seconds a connection to memcached is held -# unused in the pool before it is closed. (integer value) -#memcache_pool_unused_timeout = 60 - -# (Optional) Number of seconds that an operation will wait to get a -# memcached client connection from the pool. (integer value) -#memcache_pool_conn_get_timeout = 10 - -# (Optional) Use the advanced (eventlet safe) memcached client pool. -# The advanced pool will only work under python 2.x. (boolean value) -#memcache_use_advanced_pool = false - -# (Optional) Indicate whether to set the X-Service-Catalog header. If -# False, middleware will not ask for service catalog on token -# validation and will not set the X-Service-Catalog header. (boolean -# value) -#include_service_catalog = true - -# Used to control the use and type of token binding. Can be set to: -# "disabled" to not check token binding. "permissive" (default) to -# validate binding information if the bind type is of a form known to -# the server and ignore it if not. "strict" like "permissive" but if -# the bind type is unknown the token will be rejected. "required" any -# form of token binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string value) -#enforce_token_bind = permissive - -# DEPRECATED: If true, the revocation list will be checked for cached -# tokens. This requires that PKI tokens are configured on the identity -# server. (boolean value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#check_revocations_for_cached = false - -# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may -# be a single algorithm or multiple. The algorithms are those -# supported by Python standard hashlib.new(). The hashes will be tried -# in the order given, so put the preferred one first for performance. -# The result of the first hash will be stored in the cache. This will -# typically be set to multiple values only while migrating from a less -# secure algorithm to a more secure one. Once all the old tokens are -# expired this option should be set to a single value for better -# performance. (list value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#hash_algorithms = md5 - -# A choice of roles that must be present in a service token. Service -# tokens are allowed to request that an expired token can be used and -# so this check should tightly control that only actual services -# should be sending this token. Roles here are applied as an ANY check -# so any role in this list must be present. For backwards -# compatibility reasons this currently only affects the allow_expired -# check. (list value) -#service_token_roles = service - -# For backwards compatibility reasons we must let valid service tokens -# pass that don't pass the service_token_roles check as valid. Setting -# this true will become the default in a future release and should be -# enabled if possible. (boolean value) -#service_token_roles_required = false - -# Authentication type to load (string value) -# Deprecated group/name - [keystone_authtoken]/auth_plugin -#auth_type = - -# Config Section from which to load plugin specific options (string -# value) -#auth_section = - - -[pci_devices] - -# -# From ironic_inspector.plugins.pci_devices -# - -# An alias for PCI device identified by 'vendor_id' and 'product_id' -# fields. Format: {"vendor_id": "1234", "product_id": "5678", "name": -# "pci_dev1"} (multi valued) -#alias = - - -[processing] - -# -# From ironic_inspector -# - -# Which MAC addresses to add as ports during introspection. Possible -# values: all (all MAC addresses), active (MAC addresses of NIC with -# IP addresses), pxe (only MAC address of NIC node PXE booted from, -# falls back to "active" if PXE MAC is not supplied by the ramdisk). -# (string value) -# Allowed values: all, active, pxe, disabled -#add_ports = pxe - -# Which ports (already present on a node) to keep after introspection. -# Possible values: all (do not delete anything), present (keep ports -# which MACs were present in introspection data), added (keep only -# MACs that we added during introspection). (string value) -# Allowed values: all, present, added -#keep_ports = all - -# Whether to overwrite existing values in node database. Disable this -# option to make introspection a non-destructive operation. (boolean -# value) -#overwrite_existing = true - -# Comma-separated list of default hooks for processing pipeline. Hook -# 'scheduler' updates the node with the minimum properties required by -# the Nova scheduler. Hook 'validate_interfaces' ensures that valid -# NIC data was provided by the ramdisk. Do not exclude these two -# unless you really know what you're doing. (string value) -#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces,capabilities,pci_devices - -# Comma-separated list of enabled hooks for processing pipeline. The -# default for this is $default_processing_hooks, hooks can be added -# before or after the defaults like this: -# "prehook,$default_processing_hooks,posthook". (string value) -#processing_hooks = $default_processing_hooks - -# If set, logs from ramdisk will be stored in this directory. (string -# value) -#ramdisk_logs_dir = - -# Whether to store ramdisk logs even if it did not return an error -# message (dependent upon "ramdisk_logs_dir" option being set). -# (boolean value) -#always_store_ramdisk_logs = false - -# The name of the hook to run when inspector receives inspection -# information from a node it isn't already aware of. This hook is -# ignored by default. (string value) -#node_not_found_hook = - -# Method for storing introspection data. If set to 'none', -# introspection data will not be stored. (string value) -# Allowed values: none, swift -#store_data = none - -# Name of the key to store the location of stored data in the extra -# column of the Ironic database. (string value) -#store_data_location = - -# Whether to leave 1 GiB of disk size untouched for partitioning. Only -# has effect when used with the IPA as a ramdisk, for older ramdisk -# local_gb is calculated on the ramdisk side. (boolean value) -#disk_partitioning_spacing = true - -# DEPRECATED: Whether to log node BMC address with every message -# during processing. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#log_bmc_address = true - -# File name template for storing ramdisk logs. The following -# replacements can be used: {uuid} - node UUID or "unknown", {bmc} - -# node BMC address or "unknown", {dt} - current UTC date and time, -# {mac} - PXE booting MAC or "unknown". (string value) -#ramdisk_logs_filename_format = {uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz - -# Whether to power off a node after introspection. (boolean value) -#power_off = true - - -[pxe_filter] - -# -# From ironic_inspector -# - -# PXE boot filter driver to use, such as iptables (string value) -#driver = noop - -# Amount of time in seconds, after which repeat periodic update of the -# filter. (integer value) -# Minimum value: 0 -#sync_period = 15 - - -[swift] - -# -# From ironic_inspector.common.swift -# - -# Authentication URL (string value) -#auth_url = - -# Authentication type to load (string value) -# Deprecated group/name - [swift]/auth_plugin -#auth_type = - -# PEM encoded Certificate Authority to use when verifying HTTPs -# connections. (string value) -#cafile = - -# PEM encoded client certificate cert file (string value) -#certfile = - -# Default Swift container to use when creating objects. (string value) -#container = ironic-inspector - -# Optional domain ID to use with v3 and v2 parameters. It will be used -# for both the user and project domain in v3 and ignored in v2 -# authentication. (string value) -#default_domain_id = - -# Optional domain name to use with v3 API and v2 parameters. It will -# be used for both the user and project domain in v3 and ignored in v2 -# authentication. (string value) -#default_domain_name = - -# Number of seconds that the Swift object will last before being -# deleted. (set to 0 to never delete the object). (integer value) -#delete_after = 0 - -# Domain ID to scope to (string value) -#domain_id = - -# Domain name to scope to (string value) -#domain_name = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# PEM encoded client certificate key file (string value) -#keyfile = - -# Maximum number of times to retry a Swift request, before failing. -# (integer value) -#max_retries = 2 - -# Swift endpoint type. (string value) -#os_endpoint_type = internalURL - -# Keystone region to get endpoint for. (string value) -#os_region = - -# Swift service type. (string value) -#os_service_type = object-store - -# User's password (string value) -#password = - -# Domain ID containing project (string value) -#project_domain_id = - -# Domain name containing project (string value) -#project_domain_name = - -# Project ID to scope to (string value) -# Deprecated group/name - [swift]/tenant_id -#project_id = - -# Project name to scope to (string value) -# Deprecated group/name - [swift]/tenant_name -#project_name = - -# Tenant ID (string value) -#tenant_id = - -# Tenant Name (string value) -#tenant_name = - -# Timeout value for http requests (integer value) -#timeout = - -# Trust ID (string value) -#trust_id = - -# User's domain id (string value) -#user_domain_id = - -# User's domain name (string value) -#user_domain_name = - -# User id (string value) -#user_id = - -# Username (string value) -# Deprecated group/name - [swift]/user_name -#username = diff --git a/ironic-inspector.8 b/ironic-inspector.8 deleted file mode 100644 index c0f3e93..0000000 --- a/ironic-inspector.8 +++ /dev/null @@ -1,20 +0,0 @@ -.\" Manpage for ironic-inspector. -.TH man 8 "08 Oct 2014" "1.0" "ironic-inspector man page" -.SH NAME -ironic-inspector \- hardware introspection daemon for OpenStack Ironic. -.SH SYNOPSIS -ironic-inspector CONFFILE -.SH DESCRIPTION -This command starts ironic-inspector service, which starts and finishes -hardware discovery and maintains firewall rules for nodes accessing PXE -boot service (usually dnsmasq). -.SH OPTIONS -The ironic-inspector does not take any options. However, you should supply -path to the configuration file. -.SH SEE ALSO -README page located at https://pypi.python.org/pypi/ironic-inspector -provides some information about how to configure and use the service. -.SH BUGS -No known bugs. -.SH AUTHOR -Dmitry Tantsur (divius.inside@gmail.com) diff --git a/ironic_inspector/__init__.py b/ironic_inspector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/alembic.ini b/ironic_inspector/alembic.ini deleted file mode 100644 index d1831a6..0000000 --- a/ironic_inspector/alembic.ini +++ /dev/null @@ -1,38 +0,0 @@ -[alembic] -# path to migration scripts -script_location = %(here)s/migrations - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/ironic_inspector/api_tools.py b/ironic_inspector/api_tools.py deleted file mode 100644 index a8aa755..0000000 --- a/ironic_inspector/api_tools.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Generic Rest Api tools.""" - -import flask -from oslo_config import cfg -from oslo_utils import uuidutils -import six - -from ironic_inspector.common.i18n import _ -from ironic_inspector import utils - -CONF = cfg.CONF - - -def raises_coercion_exceptions(fn): - """Convert coercion function exceptions to utils.Error. - - :raises: utils.Error when the coercion function raises an - AssertionError or a ValueError - """ - @six.wraps(fn) - def inner(*args, **kwargs): - try: - ret = fn(*args, **kwargs) - except (AssertionError, ValueError) as exc: - raise utils.Error(_('Bad request: %s') % exc, code=400) - return ret - return inner - - -def request_field(field_name): - """Decorate a function that coerces the specified field. - - :param field_name: name of the field to fetch - :returns: a decorator - """ - def outer(fn): - @six.wraps(fn) - def inner(*args, **kwargs): - default = kwargs.pop('default', None) - field = flask.request.args.get(field_name, default=default) - if field == default: - # field not found or the same as the default, just return - return default - return fn(field, *args, **kwargs) - return inner - return outer - - -@request_field('marker') -@raises_coercion_exceptions -def marker_field(value): - """Fetch the pagination marker field from flask.request.args. - - :returns: an uuid - """ - assert uuidutils.is_uuid_like(value), _('Marker not UUID-like') - return value - - -@request_field('limit') -@raises_coercion_exceptions -def limit_field(value): - """Fetch the pagination limit field from flask.request.args. - - :returns: the limit - """ - # limit of zero means the default limit - value = int(value) or CONF.api_max_limit - assert value >= 0, _('Limit cannot be negative') - assert value <= CONF.api_max_limit, _('Limit over %s') % CONF.api_max_limit - return value diff --git a/ironic_inspector/cmd/__init__.py b/ironic_inspector/cmd/__init__.py deleted file mode 100644 index 882e5df..0000000 --- a/ironic_inspector/cmd/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -import eventlet # noqa -eventlet.monkey_patch() diff --git a/ironic_inspector/cmd/all.py b/ironic_inspector/cmd/all.py deleted file mode 100644 index 2475e97..0000000 --- a/ironic_inspector/cmd/all.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Ironic Inspector service.""" - -import sys - -from ironic_inspector.common import service_utils -from ironic_inspector import wsgi_service - - -def main(args=sys.argv[1:]): - # Parse config file and command line options, then start logging - service_utils.prepare_service(args) - - server = wsgi_service.WSGIService() - server.run() - -if __name__ == '__main__': - sys.exit(main()) diff --git a/ironic_inspector/common/__init__.py b/ironic_inspector/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/common/i18n.py b/ironic_inspector/common/i18n.py deleted file mode 100644 index 7ab0e45..0000000 --- a/ironic_inspector/common/i18n.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 NEC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_i18n - -_translators = oslo_i18n.TranslatorFactory(domain='ironic_inspector') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/ironic_inspector/common/ironic.py b/ironic_inspector/common/ironic.py deleted file mode 100644 index f61cf6e..0000000 --- a/ironic_inspector/common/ironic.py +++ /dev/null @@ -1,188 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from ironicclient import client -from ironicclient import exceptions as ironic_exc -import netaddr -from oslo_config import cfg - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import keystone -from ironic_inspector import utils - -CONF = cfg.CONF -LOG = utils.getProcessingLogger(__name__) - -# See http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html # noqa -VALID_STATES = {'enroll', 'manageable', 'inspecting', 'inspect failed'} - -# 1.19 is API version, which supports port.pxe_enabled -DEFAULT_IRONIC_API_VERSION = '1.19' - -IRONIC_GROUP = 'ironic' - -IRONIC_OPTS = [ - cfg.StrOpt('os_region', - help=_('Keystone region used to get Ironic endpoints.')), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=('keystone', 'noauth'), - help=_('Method to use for authentication: noauth or ' - 'keystone.')), - cfg.StrOpt('ironic_url', - default='http://localhost:6385/', - help=_('Ironic API URL, used to set Ironic API URL when ' - 'auth_strategy option is noauth to work with standalone ' - 'Ironic without keystone.')), - cfg.StrOpt('os_service_type', - default='baremetal', - help=_('Ironic service type.')), - cfg.StrOpt('os_endpoint_type', - default='internalURL', - help=_('Ironic endpoint type.')), - cfg.IntOpt('retry_interval', - default=2, - help=_('Interval between retries in case of conflict error ' - '(HTTP 409).')), - cfg.IntOpt('max_retries', - default=30, - help=_('Maximum number of retries in case of conflict error ' - '(HTTP 409).')), -] - - -CONF.register_opts(IRONIC_OPTS, group=IRONIC_GROUP) -keystone.register_auth_opts(IRONIC_GROUP) - -IRONIC_SESSION = None - - -class NotFound(utils.Error): - """Node not found in Ironic.""" - - def __init__(self, node_ident, code=404, *args, **kwargs): - msg = _('Node %s was not found in Ironic') % node_ident - super(NotFound, self).__init__(msg, code, *args, **kwargs) - - -def reset_ironic_session(): - """Reset the global session variable. - - Mostly useful for unit tests. - """ - global IRONIC_SESSION - IRONIC_SESSION = None - - -def get_ipmi_address(node): - ipmi_fields = ['ipmi_address'] + CONF.ipmi_address_fields - # NOTE(sambetts): IPMI Address is useless to us if bridging is enabled so - # just ignore it and return None - if node.driver_info.get("ipmi_bridging", "no") != "no": - return - for name in ipmi_fields: - value = node.driver_info.get(name) - if not value: - continue - - try: - ip = socket.gethostbyname(value) - except socket.gaierror: - msg = _('Failed to resolve the hostname (%(value)s)' - ' for node %(uuid)s') - raise utils.Error(msg % {'value': value, - 'uuid': node.uuid}, - node_info=node) - - if netaddr.IPAddress(ip).is_loopback(): - LOG.warning('Ignoring loopback BMC address %s', ip, - node_info=node) - ip = None - - return ip - - -def get_client(token=None, - api_version=DEFAULT_IRONIC_API_VERSION): # pragma: no cover - """Get Ironic client instance.""" - # NOTE: To support standalone ironic without keystone - if CONF.ironic.auth_strategy == 'noauth': - args = {'token': 'noauth', - 'endpoint': CONF.ironic.ironic_url} - else: - global IRONIC_SESSION - if not IRONIC_SESSION: - IRONIC_SESSION = keystone.get_session(IRONIC_GROUP) - if token is None: - args = {'session': IRONIC_SESSION, - 'region_name': CONF.ironic.os_region} - else: - ironic_url = IRONIC_SESSION.get_endpoint( - service_type=CONF.ironic.os_service_type, - endpoint_type=CONF.ironic.os_endpoint_type, - region_name=CONF.ironic.os_region - ) - args = {'token': token, - 'endpoint': ironic_url} - args['os_ironic_api_version'] = api_version - args['max_retries'] = CONF.ironic.max_retries - args['retry_interval'] = CONF.ironic.retry_interval - return client.Client(1, **args) - - -def check_provision_state(node): - state = node.provision_state.lower() - if state not in VALID_STATES: - msg = _('Invalid provision state for introspection: ' - '"%(state)s", valid states are "%(valid)s"') - raise utils.Error(msg % {'state': state, 'valid': list(VALID_STATES)}, - node_info=node) - - -def capabilities_to_dict(caps): - """Convert the Node's capabilities into a dictionary.""" - if not caps: - return {} - return dict([key.split(':', 1) for key in caps.split(',')]) - - -def dict_to_capabilities(caps_dict): - """Convert a dictionary into a string with the capabilities syntax.""" - return ','.join(["%s:%s" % (key, value) - for key, value in caps_dict.items() - if value is not None]) - - -def get_node(node_id, ironic=None, **kwargs): - """Get a node from Ironic. - - :param node_id: node UUID or name. - :param ironic: ironic client instance. - :param kwargs: arguments to pass to Ironic client. - :raises: Error on failure - """ - ironic = ironic if ironic is not None else get_client() - - try: - return ironic.node.get(node_id, **kwargs) - except ironic_exc.NotFound: - raise NotFound(node_id) - except ironic_exc.HttpError as exc: - raise utils.Error(_("Cannot get node %(node)s: %(exc)s") % - {'node': node_id, 'exc': exc}) - - -def list_opts(): - return keystone.add_auth_options(IRONIC_OPTS, IRONIC_GROUP) diff --git a/ironic_inspector/common/keystone.py b/ironic_inspector/common/keystone.py deleted file mode 100644 index 28bf629..0000000 --- a/ironic_inspector/common/keystone.py +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from keystoneauth1 import loading -from oslo_config import cfg - - -CONF = cfg.CONF - - -def register_auth_opts(group): - loading.register_session_conf_options(CONF, group) - loading.register_auth_conf_options(CONF, group) - CONF.set_default('auth_type', default='password', group=group) - - -def get_session(group): - auth = loading.load_auth_from_conf_options(CONF, group) - session = loading.load_session_from_conf_options( - CONF, group, auth=auth) - return session - - -def add_auth_options(options, group): - - def add_options(opts, opts_to_add): - for new_opt in opts_to_add: - for opt in opts: - if opt.name == new_opt.name: - break - else: - opts.append(new_opt) - - opts = copy.deepcopy(options) - opts.insert(0, loading.get_auth_common_conf_options()[0]) - # NOTE(dims): There are a lot of auth plugins, we just generate - # the config options for a few common ones - plugins = ['password', 'v2password', 'v3password'] - for name in plugins: - plugin = loading.get_plugin_loader(name) - add_options(opts, loading.get_auth_plugin_conf_options(plugin)) - add_options(opts, loading.get_session_conf_options()) - opts.sort(key=lambda x: x.name) - return [(group, opts)] diff --git a/ironic_inspector/common/lldp_parsers.py b/ironic_inspector/common/lldp_parsers.py deleted file mode 100644 index b48d222..0000000 --- a/ironic_inspector/common/lldp_parsers.py +++ /dev/null @@ -1,365 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Names and mapping functions used to map LLDP TLVs to name/value pairs """ - -import binascii - -from construct import core -import netaddr - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import lldp_tlvs as tlv -from ironic_inspector import utils - -LOG = utils.getProcessingLogger(__name__) - - -# Names used in name/value pair from parsed TLVs -LLDP_CHASSIS_ID_NM = 'switch_chassis_id' -LLDP_PORT_ID_NM = 'switch_port_id' -LLDP_PORT_DESC_NM = 'switch_port_description' -LLDP_SYS_NAME_NM = 'switch_system_name' -LLDP_SYS_DESC_NM = 'switch_system_description' -LLDP_SWITCH_CAP_NM = 'switch_capabilities' -LLDP_CAP_SUPPORT_NM = 'switch_capabilities_support' -LLDP_CAP_ENABLED_NM = 'switch_capabilities_enabled' -LLDP_MGMT_ADDRESSES_NM = 'switch_mgmt_addresses' -LLDP_PORT_VLANID_NM = 'switch_port_untagged_vlan_id' -LLDP_PORT_PROT_NM = 'switch_port_protocol' -LLDP_PORT_PROT_VLAN_ENABLED_NM = 'switch_port_protocol_vlan_enabled' -LLDP_PORT_PROT_VLAN_SUPPORT_NM = 'switch_port_protocol_vlan_support' -LLDP_PORT_PROT_VLANIDS_NM = 'switch_port_protocol_vlan_ids' -LLDP_PORT_VLANS_NM = 'switch_port_vlans' -LLDP_PROTOCOL_IDENTITIES_NM = 'switch_protocol_identities' -LLDP_PORT_MGMT_VLANID_NM = 'switch_port_management_vlan_id' -LLDP_PORT_LINK_AGG_NM = 'switch_port_link_aggregation' -LLDP_PORT_LINK_AGG_ENABLED_NM = 'switch_port_link_aggregation_enabled' -LLDP_PORT_LINK_AGG_SUPPORT_NM = 'switch_port_link_aggregation_support' -LLDP_PORT_LINK_AGG_ID_NM = 'switch_port_link_aggregation_id' -LLDP_PORT_MAC_PHY_NM = 'switch_port_mac_phy_config' -LLDP_PORT_LINK_AUTONEG_ENABLED_NM = 'switch_port_autonegotiation_enabled' -LLDP_PORT_LINK_AUTONEG_SUPPORT_NM = 'switch_port_autonegotiation_support' -LLDP_PORT_CAPABILITIES_NM = 'switch_port_physical_capabilities' -LLDP_PORT_MAU_TYPE_NM = 'switch_port_mau_type' -LLDP_MTU_NM = 'switch_port_mtu' - - -class LLDPParser(object): - """Base class to handle parsing of LLDP TLVs - - Each class that inherits from this base class must provide a parser map. - Parser maps are used to associate a LLDP TLV with a function handler - and arguments necessary to parse the TLV and generate one or more - name/value pairs. Each LLDP TLV maps to a tuple with the following - fields: - - function - handler function to generate name/value pairs - - construct - name of construct definition for TLV - - name - user-friendly name of TLV. For TLVs that generate only one - name/value pair this is the name used - - len_check - boolean indicating if length check should be done on construct - - It's valid to have a function handler of None, this is for TLVs that - are not mapped to a name/value pair(e.g.LLDP_TLV_TTL). - """ - - def __init__(self, node_info, nv=None): - """Create LLDPParser - - :param node_info - node being introspected - :param nv - dictionary of name/value pairs to use - """ - self.nv_dict = nv or {} - self.node_info = node_info - self.parser_map = {} - - def set_value(self, name, value): - """Set name value pair in dictionary - - The value for a name should not be changed if it exists. - """ - self.nv_dict.setdefault(name, value) - - def append_value(self, name, value): - """Add value to a list mapped to name""" - self.nv_dict.setdefault(name, []).append(value) - - def add_single_value(self, struct, name, data): - """Add a single name/value pair the the nv dict""" - self.set_value(name, struct.value) - - def parse_tlv(self, tlv_type, data): - """Parse TLVs from mapping table - - This functions takes the TLV type and the raw data for - this TLV and gets a tuple from the parser_map. The - construct field in the tuple contains the construct lib - definition of the TLV which can be parsed to access - individual fields. Once the TLV is parsed, the handler - function for each TLV will store the individual fields as - name/value pairs in nv_dict. - - If the handler function does not exist, then no name/value pairs - will be added to nv_dict, but since the TLV was handled, - True will be returned. - - :param: tlv_type - type identifier for TLV - :param: data - raw TLV value - :returns: True if TLV in parser_map and data is valid, otherwise False. - """ - - s = self.parser_map.get(tlv_type) - if not s: - return False - - func = s[0] # handler - - if not func: - return True # TLV is handled - - try: - tlv_parser = s[1] - name = s[2] - check_len = s[3] - except KeyError as e: - LOG.warning("Key error in TLV table: %s", e, - node_info=self.node_info) - return False - - # Some constructs require a length validation to ensure the - # proper number of bytes has been provided, for example - # when a BitStruct is used. - if check_len and (tlv_parser.sizeof() != len(data)): - LOG.warning("Invalid data for %(name)s expected len %(expect)d, " - "got %(actual)d", {'name': name, - 'expect': tlv_parser.sizeof(), - 'actual': len(data)}, - node_info=self.node_info) - return False - - # Use the construct parser to parse TLV so that it's - # individual fields can be accessed - try: - struct = tlv_parser.parse(data) - except (core.RangeError, core.FieldError, core.MappingError, - netaddr.AddrFormatError) as e: - LOG.warning("TLV parse error: %s", e, - node_info=self.node_info) - return False - - # Call functions with parsed structure - try: - func(struct, name, data) - except ValueError as e: - LOG.warning("TLV value error: %s", e, - node_info=self.node_info) - return False - - return True - - def add_dot1_link_aggregation(self, struct, name, data): - """Add name/value pairs for TLV Dot1_LinkAggregationId - - This is in base class since it can be used by both dot1 and dot3. - """ - - self.set_value(LLDP_PORT_LINK_AGG_ENABLED_NM, - struct.status.enabled) - self.set_value(LLDP_PORT_LINK_AGG_SUPPORT_NM, - struct.status.supported) - self.set_value(LLDP_PORT_LINK_AGG_ID_NM, struct.portid) - - -class LLDPBasicMgmtParser(LLDPParser): - """Class to handle parsing of 802.1AB Basic Management set - - This class will also handle 802.1Q and 802.3 OUI TLVs. - """ - def __init__(self, nv=None): - super(LLDPBasicMgmtParser, self).__init__(nv) - - self.parser_map = { - tlv.LLDP_TLV_CHASSIS_ID: - (self.add_single_value, tlv.ChassisId, - LLDP_CHASSIS_ID_NM, False), - tlv.LLDP_TLV_PORT_ID: - (self.add_single_value, tlv.PortId, LLDP_PORT_ID_NM, False), - tlv.LLDP_TLV_TTL: (None, None, None, False), - tlv.LLDP_TLV_PORT_DESCRIPTION: - (self.add_single_value, tlv.PortDesc, LLDP_PORT_DESC_NM, - False), - tlv.LLDP_TLV_SYS_NAME: - (self.add_single_value, tlv.SysName, LLDP_SYS_NAME_NM, False), - tlv.LLDP_TLV_SYS_DESCRIPTION: - (self.add_single_value, tlv.SysDesc, LLDP_SYS_DESC_NM, False), - tlv.LLDP_TLV_SYS_CAPABILITIES: - (self.add_capabilities, tlv.SysCapabilities, - LLDP_SWITCH_CAP_NM, True), - tlv.LLDP_TLV_MGMT_ADDRESS: - (self.add_mgmt_address, tlv.MgmtAddress, - LLDP_MGMT_ADDRESSES_NM, False), - tlv.LLDP_TLV_ORG_SPECIFIC: - (self.handle_org_specific_tlv, tlv.OrgSpecific, None, False), - tlv.LLDP_TLV_END_LLDPPDU: (None, None, None, False) - } - - def add_mgmt_address(self, struct, name, data): - """Handle LLDP_TLV_MGMT_ADDRESS - - There can be multiple Mgmt Address TLVs, store in list. - """ - self.append_value(name, struct.address) - - def _get_capabilities_list(self, caps): - """Get capabilities from bit map""" - cap_map = [ - (caps.repeater, 'Repeater'), - (caps.bridge, 'Bridge'), - (caps.wlan, 'WLAN'), - (caps.router, 'Router'), - (caps.telephone, 'Telephone'), - (caps.docsis, 'DOCSIS cable device'), - (caps.station, 'Station only'), - (caps.cvlan, 'C-Vlan'), - (caps.svlan, 'S-Vlan'), - (caps.tpmr, 'TPMR')] - - return [cap for (bit, cap) in cap_map if bit] - - def add_capabilities(self, struct, name, data): - """Handle LLDP_TLV_SYS_CAPABILITIES""" - self.set_value(LLDP_CAP_SUPPORT_NM, - self._get_capabilities_list(struct.system)) - self.set_value(LLDP_CAP_ENABLED_NM, - self._get_capabilities_list(struct.enabled)) - - def handle_org_specific_tlv(self, struct, name, data): - """Handle Organizationally Unique ID TLVs - - This class supports 802.1Q and 802.3 OUI TLVs. - - See http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D - and http://standards.ieee.org/about/get/802/802.3.html - """ - oui = binascii.hexlify(struct.oui).decode() - subtype = struct.subtype - oui_data = data[4:] - - if oui == tlv.LLDP_802dot1_OUI: - parser = LLDPdot1Parser(self.node_info, self.nv_dict) - if parser.parse_tlv(subtype, oui_data): - LOG.debug("Handled 802.1 subtype %d", subtype) - else: - LOG.debug("Subtype %d not found for 802.1", subtype) - elif oui == tlv.LLDP_802dot3_OUI: - parser = LLDPdot3Parser(self.node_info, self.nv_dict) - if parser.parse_tlv(subtype, oui_data): - LOG.debug("Handled 802.3 subtype %d", subtype) - else: - LOG.debug("Subtype %d not found for 802.3", subtype) - else: - LOG.warning("Organizationally Unique ID %s not " - "recognized", oui, node_info=self.node_info) - - -class LLDPdot1Parser(LLDPParser): - """Class to handle parsing of 802.1Q TLVs""" - def __init__(self, node_info, nv=None): - super(LLDPdot1Parser, self).__init__(node_info, nv) - - self.parser_map = { - tlv.dot1_PORT_VLANID: - (self.add_single_value, tlv.Dot1_UntaggedVlanId, - LLDP_PORT_VLANID_NM, False), - tlv.dot1_PORT_PROTOCOL_VLANID: - (self.add_dot1_port_protocol_vlan, tlv.Dot1_PortProtocolVlan, - LLDP_PORT_PROT_NM, True), - tlv.dot1_VLAN_NAME: - (self.add_dot1_vlans, tlv.Dot1_VlanName, None, False), - tlv.dot1_PROTOCOL_IDENTITY: - (self.add_dot1_protocol_identities, tlv.Dot1_ProtocolIdentity, - LLDP_PROTOCOL_IDENTITIES_NM, False), - tlv.dot1_MANAGEMENT_VID: - (self.add_single_value, tlv.Dot1_MgmtVlanId, - LLDP_PORT_MGMT_VLANID_NM, False), - tlv.dot1_LINK_AGGREGATION: - (self.add_dot1_link_aggregation, tlv.Dot1_LinkAggregationId, - LLDP_PORT_LINK_AGG_NM, True) - } - - def add_dot1_port_protocol_vlan(self, struct, name, data): - """Handle dot1_PORT_PROTOCOL_VLANID""" - self.set_value(LLDP_PORT_PROT_VLAN_ENABLED_NM, struct.flags.enabled) - self.set_value(LLDP_PORT_PROT_VLAN_SUPPORT_NM, struct.flags.supported) - - # There can be multiple port/protocol vlans TLVs, store in list - self.append_value(LLDP_PORT_PROT_VLANIDS_NM, struct.vlanid) - - def add_dot1_vlans(self, struct, name, data): - """Handle dot1_VLAN_NAME - - There can be multiple vlan TLVs, add dictionary entry with id/vlan - to list. - """ - vlan_dict = {} - vlan_dict['name'] = struct.vlan_name - vlan_dict['id'] = struct.vlanid - self.append_value(LLDP_PORT_VLANS_NM, vlan_dict) - - def add_dot1_protocol_identities(self, struct, name, data): - """Handle dot1_PROTOCOL_IDENTITY - - There can be multiple protocol ids TLVs, store in list - """ - self.append_value(LLDP_PROTOCOL_IDENTITIES_NM, - binascii.b2a_hex(struct.protocol).decode()) - - -class LLDPdot3Parser(LLDPParser): - """Class to handle parsing of 802.3 TLVs""" - def __init__(self, node_info, nv=None): - super(LLDPdot3Parser, self).__init__(node_info, nv) - - # Note that 802.3 link Aggregation has been deprecated and moved to - # 802.1 spec, but it is in the same format. Use the same function as - # dot1 handler. - self.parser_map = { - tlv.dot3_MACPHY_CONFIG_STATUS: - (self.add_dot3_macphy_config, tlv.Dot3_MACPhy_Config_Status, - LLDP_PORT_MAC_PHY_NM, True), - tlv.dot3_LINK_AGGREGATION: - (self.add_dot1_link_aggregation, tlv.Dot1_LinkAggregationId, - LLDP_PORT_LINK_AGG_NM, True), - tlv.dot3_MTU: - (self.add_single_value, tlv.Dot3_MTU, LLDP_MTU_NM, False) - } - - def add_dot3_macphy_config(self, struct, name, data): - """Handle dot3_MACPHY_CONFIG_STATUS""" - - try: - mau_type = tlv.OPER_MAU_TYPES[struct.mau_type] - except KeyError: - raise ValueError(_('Invalid index for mau type')) - - self.set_value(LLDP_PORT_LINK_AUTONEG_ENABLED_NM, - struct.autoneg.enabled) - self.set_value(LLDP_PORT_LINK_AUTONEG_SUPPORT_NM, - struct.autoneg.supported) - self.set_value(LLDP_PORT_CAPABILITIES_NM, - tlv.get_autoneg_cap(struct.pmd_autoneg)) - self.set_value(LLDP_PORT_MAU_TYPE_NM, mau_type) diff --git a/ironic_inspector/common/lldp_tlvs.py b/ironic_inspector/common/lldp_tlvs.py deleted file mode 100644 index 9b85861..0000000 --- a/ironic_inspector/common/lldp_tlvs.py +++ /dev/null @@ -1,366 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Link Layer Discovery Protocol TLVs """ - -import functools - -# See http://construct.readthedocs.io/en/latest/index.html -import construct -from construct import core -import netaddr - -from ironic_inspector import utils - -LOG = utils.getProcessingLogger(__name__) - -# Constants defined according to 802.1AB-2016 LLDP spec -# https://standards.ieee.org/findstds/standard/802.1AB-2016.html - -# TLV types -LLDP_TLV_END_LLDPPDU = 0 -LLDP_TLV_CHASSIS_ID = 1 -LLDP_TLV_PORT_ID = 2 -LLDP_TLV_TTL = 3 -LLDP_TLV_PORT_DESCRIPTION = 4 -LLDP_TLV_SYS_NAME = 5 -LLDP_TLV_SYS_DESCRIPTION = 6 -LLDP_TLV_SYS_CAPABILITIES = 7 -LLDP_TLV_MGMT_ADDRESS = 8 -LLDP_TLV_ORG_SPECIFIC = 127 - -# 802.1Q defines from http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D -LLDP_802dot1_OUI = "0080c2" -# subtypes -dot1_PORT_VLANID = 1 -dot1_PORT_PROTOCOL_VLANID = 2 -dot1_VLAN_NAME = 3 -dot1_PROTOCOL_IDENTITY = 4 -dot1_MANAGEMENT_VID = 6 -dot1_LINK_AGGREGATION = 7 - -# 802.3 defines from http://standards.ieee.org/about/get/802/802.3.html, -# section 79 -LLDP_802dot3_OUI = "00120f" -# Subtypes -dot3_MACPHY_CONFIG_STATUS = 1 -dot3_LINK_AGGREGATION = 3 # Deprecated, but still in use -dot3_MTU = 4 - - -def bytes_to_int(obj): - """Convert bytes to an integer - - :param: obj - array of bytes - """ - return functools.reduce(lambda x, y: x << 8 | y, obj) - - -def mapping_for_enum(mapping): - """Return tuple used for keys as a dict - - :param: mapping - dict with tuple as keys - """ - return dict(mapping.keys()) - - -def mapping_for_switch(mapping): - """Return dict from values - - :param: mapping - dict with tuple as keys - """ - return {key[0]: value for key, value in mapping.items()} - - -IPv4Address = core.ExprAdapter( - core.Byte[4], - encoder=lambda obj, ctx: netaddr.IPAddress(obj).words, - decoder=lambda obj, ctx: str(netaddr.IPAddress(bytes_to_int(obj))) -) - -IPv6Address = core.ExprAdapter( - core.Byte[16], - encoder=lambda obj, ctx: netaddr.IPAddress(obj).words, - decoder=lambda obj, ctx: str(netaddr.IPAddress(bytes_to_int(obj))) -) - -MACAddress = core.ExprAdapter( - core.Byte[6], - encoder=lambda obj, ctx: netaddr.EUI(obj).words, - decoder=lambda obj, ctx: str(netaddr.EUI(bytes_to_int(obj), - dialect=netaddr.mac_unix_expanded)) -) - -IANA_ADDRESS_FAMILY_ID_MAPPING = { - ('ipv4', 1): IPv4Address, - ('ipv6', 2): IPv6Address, - ('mac', 6): MACAddress, -} - -IANAAddress = core.Embedded(core.Struct( - 'family' / core.Enum(core.Int8ub, **mapping_for_enum( - IANA_ADDRESS_FAMILY_ID_MAPPING)), - 'value' / core.Switch(construct.this.family, mapping_for_switch( - IANA_ADDRESS_FAMILY_ID_MAPPING)))) - -# Note that 'GreedyString()' is used in cases where string len is not defined -CHASSIS_ID_MAPPING = { - ('entPhysAlias_c', 1): core.Struct('value' / core.GreedyString("utf8")), - ('ifAlias', 2): core.Struct('value' / core.GreedyString("utf8")), - ('entPhysAlias_p', 3): core.Struct('value' / core.GreedyString("utf8")), - ('mac_address', 4): core.Struct('value' / MACAddress), - ('IANA_address', 5): IANAAddress, - ('ifName', 6): core.Struct('value' / core.GreedyString("utf8")), - ('local', 7): core.Struct('value' / core.GreedyString("utf8")) -} - -# -# Basic Management Set TLV field definitions -# - -# Chassis ID value is based on the subtype -ChassisId = core.Struct( - 'subtype' / core.Enum(core.Byte, **mapping_for_enum( - CHASSIS_ID_MAPPING)), - 'value' / - core.Embedded(core.Switch(construct.this.subtype, - mapping_for_switch(CHASSIS_ID_MAPPING))) -) - -PORT_ID_MAPPING = { - ('ifAlias', 1): core.Struct('value' / core.GreedyString("utf8")), - ('entPhysicalAlias', 2): core.Struct('value' / core.GreedyString("utf8")), - ('mac_address', 3): core.Struct('value' / MACAddress), - ('IANA_address', 4): IANAAddress, - ('ifName', 5): core.Struct('value' / core.GreedyString("utf8")), - ('local', 7): core.Struct('value' / core.GreedyString("utf8")) -} - -# Port ID value is based on the subtype -PortId = core.Struct( - 'subtype' / core.Enum(core.Byte, **mapping_for_enum( - PORT_ID_MAPPING)), - 'value' / - core.Embedded(core.Switch(construct.this.subtype, - mapping_for_switch(PORT_ID_MAPPING))) -) - -PortDesc = core.Struct('value' / core.GreedyString("utf8")) - -SysName = core.Struct('value' / core.GreedyString("utf8")) - -SysDesc = core.Struct('value' / core.GreedyString("utf8")) - -MgmtAddress = core.Struct( - 'len' / core.Int8ub, - 'family' / core.Enum(core.Int8ub, **mapping_for_enum( - IANA_ADDRESS_FAMILY_ID_MAPPING)), - 'address' / core.Switch(construct.this.family, mapping_for_switch( - IANA_ADDRESS_FAMILY_ID_MAPPING)) -) - -Capabilities = core.BitStruct( - core.Padding(5), - 'tpmr' / core.Bit, - 'svlan' / core.Bit, - 'cvlan' / core.Bit, - 'station' / core.Bit, - 'docsis' / core.Bit, - 'telephone' / core.Bit, - 'router' / core.Bit, - 'wlan' / core.Bit, - 'bridge' / core.Bit, - 'repeater' / core.Bit, - core.Padding(1) -) - -SysCapabilities = core.Struct( - 'system' / Capabilities, - 'enabled' / Capabilities -) - -OrgSpecific = core.Struct( - 'oui' / core.Bytes(3), - 'subtype' / core.Int8ub -) - -# -# 802.1Q TLV field definitions -# See http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D -# - -Dot1_UntaggedVlanId = core.Struct('value' / core.Int16ub) - -Dot1_PortProtocolVlan = core.Struct( - 'flags' / core.BitStruct( - core.Padding(5), - 'enabled' / core.Flag, - 'supported' / core.Flag, - core.Padding(1), - ), - 'vlanid' / core.Int16ub -) - -Dot1_VlanName = core.Struct( - 'vlanid' / core.Int16ub, - 'name_len' / core.Rebuild(core.Int8ub, - construct.len_(construct.this.value)), - 'vlan_name' / core.String(construct.this.name_len, "utf8") -) - -Dot1_ProtocolIdentity = core.Struct( - 'len' / core.Rebuild(core.Int8ub, construct.len_(construct.this.value)), - 'protocol' / core.Bytes(construct.this.len) -) - -Dot1_MgmtVlanId = core.Struct('value' / core.Int16ub) - -Dot1_LinkAggregationId = core.Struct( - 'status' / core.BitStruct( - core.Padding(6), - 'enabled' / core.Flag, - 'supported' / core.Flag - ), - 'portid' / core.Int32ub -) - -# -# 802.3 TLV field definitions -# See http://standards.ieee.org/about/get/802/802.3.html, -# section 79 -# - - -def get_autoneg_cap(pmd): - """Get autonegotiated capability strings - - This returns a list of capability strings from the Physical Media - Dependent (PMD) capability bits. - - :param pmd: PMD bits - :return: Sorted ist containing capability strings - """ - caps_set = set() - - pmd_map = [ - (pmd._10base_t_hdx, '10BASE-T hdx'), - (pmd._10base_t_hdx, '10BASE-T fdx'), - (pmd._10base_t4, '10BASE-T4'), - (pmd._100base_tx_hdx, '100BASE-TX hdx'), - (pmd._100base_tx_fdx, '100BASE-TX fdx'), - (pmd._100base_t2_hdx, '100BASE-T2 hdx'), - (pmd._100base_t2_fdx, '100BASE-T2 fdx'), - (pmd.pause_fdx, 'PAUSE fdx'), - (pmd.asym_pause, 'Asym PAUSE fdx'), - (pmd.sym_pause, 'Sym PAUSE fdx'), - (pmd.asym_sym_pause, 'Asym and Sym PAUSE fdx'), - (pmd._1000base_x_hdx, '1000BASE-X hdx'), - (pmd._1000base_x_fdx, '1000BASE-X fdx'), - (pmd._1000base_t_hdx, '1000BASE-T hdx'), - (pmd._1000base_t_fdx, '1000BASE-T fdx')] - - for bit, cap in pmd_map: - if bit: - caps_set.add(cap) - - return sorted(caps_set) - -Dot3_MACPhy_Config_Status = core.Struct( - 'autoneg' / core.BitStruct( - core.Padding(6), - 'enabled' / core.Flag, - 'supported' / core.Flag, - ), - # See IANAifMauAutoNegCapBits - # RFC 4836, Definitions of Managed Objects for IEEE 802.3 - 'pmd_autoneg' / core.BitStruct( - core.Padding(1), - '_10base_t_hdx' / core.Bit, - '_10base_t_fdx' / core.Bit, - '_10base_t4' / core.Bit, - '_100base_tx_hdx' / core.Bit, - '_100base_tx_fdx' / core.Bit, - '_100base_t2_hdx' / core.Bit, - '_100base_t2_fdx' / core.Bit, - 'pause_fdx' / core.Bit, - 'asym_pause' / core.Bit, - 'sym_pause' / core.Bit, - 'asym_sym_pause' / core.Bit, - '_1000base_x_hdx' / core.Bit, - '_1000base_x_fdx' / core.Bit, - '_1000base_t_hdx' / core.Bit, - '_1000base_t_fdx' / core.Bit - ), - 'mau_type' / core.Int16ub -) - -# See ifMauTypeList in -# RFC 4836, Definitions of Managed Objects for IEEE 802.3 -OPER_MAU_TYPES = { - 0: "Unknown", - 1: "AUI", - 2: "10BASE-5", - 3: "FOIRL", - 4: "10BASE-2", - 5: "10BASE-T duplex mode unknown", - 6: "10BASE-FP", - 7: "10BASE-FB", - 8: "10BASE-FL duplex mode unknown", - 9: "10BROAD36", - 10: "10BASE-T half duplex", - 11: "10BASE-T full duplex", - 12: "10BASE-FL half duplex", - 13: "10BASE-FL full duplex", - 14: "100 BASE-T4", - 15: "100BASE-TX half duplex", - 16: "100BASE-TX full duplex", - 17: "100BASE-FX half duplex", - 18: "100BASE-FX full duplex", - 19: "100BASE-T2 half duplex", - 20: "100BASE-T2 full duplex", - 21: "1000BASE-X half duplex", - 22: "1000BASE-X full duplex", - 23: "1000BASE-LX half duplex", - 24: "1000BASE-LX full duplex", - 25: "1000BASE-SX half duplex", - 26: "1000BASE-SX full duplex", - 27: "1000BASE-CX half duplex", - 28: "1000BASE-CX full duplex", - 29: "1000BASE-T half duplex", - 30: "1000BASE-T full duplex", - 31: "10GBASE-X", - 32: "10GBASE-LX4", - 33: "10GBASE-R", - 34: "10GBASE-ER", - 35: "10GBASE-LR", - 36: "10GBASE-SR", - 37: "10GBASE-W", - 38: "10GBASE-EW", - 39: "10GBASE-LW", - 40: "10GBASE-SW", - 41: "10GBASE-CX4", - 42: "2BASE-TL", - 43: "10PASS-TS", - 44: "100BASE-BX10D", - 45: "100BASE-BX10U", - 46: "100BASE-LX10", - 47: "1000BASE-BX10D", - 48: "1000BASE-BX10U", - 49: "1000BASE-LX10", - 50: "1000BASE-PX10D", - 51: "1000BASE-PX10U", - 52: "1000BASE-PX20D", - 53: "1000BASE-PX20U", -} - -Dot3_MTU = core.Struct('value' / core.Int16ub) diff --git a/ironic_inspector/common/service_utils.py b/ironic_inspector/common/service_utils.py deleted file mode 100644 index f41f24d..0000000 --- a/ironic_inspector/common/service_utils.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -def prepare_service(args): - log.register_options(CONF) - log.set_defaults(default_log_levels=['sqlalchemy=WARNING', - 'iso8601=WARNING', - 'requests=WARNING', - 'urllib3.connectionpool=WARNING', - 'keystonemiddleware=WARNING', - 'swiftclient=WARNING', - 'keystoneauth=WARNING', - 'ironicclient=WARNING']) - CONF(args, project='ironic-inspector') - log.setup(CONF, 'ironic_inspector') - - LOG.debug("Configuration:") - CONF.log_opt_values(LOG, log.DEBUG) diff --git a/ironic_inspector/common/swift.py b/ironic_inspector/common/swift.py deleted file mode 100644 index 547a2d2..0000000 --- a/ironic_inspector/common/swift.py +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Mostly copied from ironic/common/swift.py - -import json - -from oslo_config import cfg -from swiftclient import client as swift_client -from swiftclient import exceptions as swift_exceptions - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import keystone -from ironic_inspector import utils - -CONF = cfg.CONF - - -SWIFT_GROUP = 'swift' -SWIFT_OPTS = [ - cfg.IntOpt('max_retries', - default=2, - help=_('Maximum number of times to retry a Swift request, ' - 'before failing.')), - cfg.IntOpt('delete_after', - default=0, - help=_('Number of seconds that the Swift object will last ' - 'before being deleted. (set to 0 to never delete the ' - 'object).')), - cfg.StrOpt('container', - default='ironic-inspector', - help=_('Default Swift container to use when creating ' - 'objects.')), - cfg.StrOpt('os_service_type', - default='object-store', - help=_('Swift service type.')), - cfg.StrOpt('os_endpoint_type', - default='internalURL', - help=_('Swift endpoint type.')), - cfg.StrOpt('os_region', - help=_('Keystone region to get endpoint for.')), -] - -CONF.register_opts(SWIFT_OPTS, group=SWIFT_GROUP) -keystone.register_auth_opts(SWIFT_GROUP) - -OBJECT_NAME_PREFIX = 'inspector_data' -SWIFT_SESSION = None - - -def reset_swift_session(): - """Reset the global session variable. - - Mostly useful for unit tests. - """ - global SWIFT_SESSION - SWIFT_SESSION = None - - -class SwiftAPI(object): - """API for communicating with Swift.""" - - def __init__(self): - """Constructor for creating a SwiftAPI object. - - Authentification is loaded from config file. - """ - global SWIFT_SESSION - if not SWIFT_SESSION: - SWIFT_SESSION = keystone.get_session(SWIFT_GROUP) - - self.connection = swift_client.Connection(session=SWIFT_SESSION) - - def create_object(self, object, data, container=CONF.swift.container, - headers=None): - """Uploads a given string to Swift. - - :param object: The name of the object in Swift - :param data: string data to put in the object - :param container: The name of the container for the object. - :param headers: the headers for the object to pass to Swift - :returns: The Swift UUID of the object - :raises: utils.Error, if any operation with Swift fails. - """ - try: - self.connection.put_container(container) - except swift_exceptions.ClientException as e: - err_msg = (_('Swift failed to create container %(container)s. ' - 'Error was: %(error)s') % - {'container': container, 'error': e}) - raise utils.Error(err_msg) - - if CONF.swift.delete_after > 0: - headers = headers or {} - headers['X-Delete-After'] = CONF.swift.delete_after - - try: - obj_uuid = self.connection.put_object(container, - object, - data, - headers=headers) - except swift_exceptions.ClientException as e: - err_msg = (_('Swift failed to create object %(object)s in ' - 'container %(container)s. Error was: %(error)s') % - {'object': object, 'container': container, 'error': e}) - raise utils.Error(err_msg) - - return obj_uuid - - def get_object(self, object, container=CONF.swift.container): - """Downloads a given object from Swift. - - :param object: The name of the object in Swift - :param container: The name of the container for the object. - :returns: Swift object - :raises: utils.Error, if the Swift operation fails. - """ - try: - headers, obj = self.connection.get_object(container, object) - except swift_exceptions.ClientException as e: - err_msg = (_('Swift failed to get object %(object)s in ' - 'container %(container)s. Error was: %(error)s') % - {'object': object, 'container': container, 'error': e}) - raise utils.Error(err_msg) - - return obj - - -def store_introspection_data(data, uuid, suffix=None): - """Uploads introspection data to Swift. - - :param data: data to store in Swift - :param uuid: UUID of the Ironic node that the data came from - :param suffix: optional suffix to add to the underlying swift - object name - :returns: name of the Swift object that the data is stored in - """ - swift_api = SwiftAPI() - swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) - if suffix is not None: - swift_object_name = '%s-%s' % (swift_object_name, suffix) - swift_api.create_object(swift_object_name, json.dumps(data)) - return swift_object_name - - -def get_introspection_data(uuid, suffix=None): - """Downloads introspection data from Swift. - - :param uuid: UUID of the Ironic node that the data came from - :param suffix: optional suffix to add to the underlying swift - object name - :returns: Swift object with the introspection data - """ - swift_api = SwiftAPI() - swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) - if suffix is not None: - swift_object_name = '%s-%s' % (swift_object_name, suffix) - return swift_api.get_object(swift_object_name) - - -def list_opts(): - return keystone.add_auth_options(SWIFT_OPTS, SWIFT_GROUP) diff --git a/ironic_inspector/conf.py b/ironic_inspector/conf.py deleted file mode 100644 index 0f49f01..0000000 --- a/ironic_inspector/conf.py +++ /dev/null @@ -1,241 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_middleware import cors - -from ironic_inspector.common.i18n import _ - - -MIN_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Minimum-Version' -MAX_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Maximum-Version' -VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Version' - -VALID_ADD_PORTS_VALUES = ('all', 'active', 'pxe', 'disabled') -VALID_KEEP_PORTS_VALUES = ('all', 'present', 'added') -VALID_STORE_DATA_VALUES = ('none', 'swift') - - -FIREWALL_OPTS = [ - cfg.BoolOpt('manage_firewall', - default=True, - help=_('Whether to manage firewall rules for PXE port.')), - cfg.StrOpt('dnsmasq_interface', - default='br-ctlplane', - help=_('Interface on which dnsmasq listens, the default is for ' - 'VM\'s.')), - cfg.IntOpt('firewall_update_period', - default=15, - help=_('Amount of time in seconds, after which repeat periodic ' - 'update of firewall.')), - cfg.StrOpt('firewall_chain', - default='ironic-inspector', - help=_('iptables chain name to use.')), - cfg.ListOpt('ethoib_interfaces', - default=[], - help=_('List of Etherent Over InfiniBand interfaces ' - 'on the Inspector host which are used for physical ' - 'access to the DHCP network. Multiple interfaces would ' - 'be attached to a bond or bridge specified in ' - 'dnsmasq_interface. The MACs of the InfiniBand nodes ' - 'which are not in desired state are going to be ' - 'blacklisted based on the list of neighbor MACs ' - 'on these interfaces.')), -] - -PROCESSING_OPTS = [ - cfg.StrOpt('add_ports', - default='pxe', - help=_('Which MAC addresses to add as ports during ' - 'introspection. Possible values: all ' - '(all MAC addresses), active (MAC addresses of NIC with ' - 'IP addresses), pxe (only MAC address of NIC node PXE ' - 'booted from, falls back to "active" if PXE MAC is not ' - 'supplied by the ramdisk).'), - choices=VALID_ADD_PORTS_VALUES), - cfg.StrOpt('keep_ports', - default='all', - help=_('Which ports (already present on a node) to keep after ' - 'introspection. Possible values: all (do not delete ' - 'anything), present (keep ports which MACs were present ' - 'in introspection data), added (keep only MACs that we ' - 'added during introspection).'), - choices=VALID_KEEP_PORTS_VALUES), - cfg.BoolOpt('overwrite_existing', - default=True, - help=_('Whether to overwrite existing values in node ' - 'database. Disable this option to make ' - 'introspection a non-destructive operation.')), - cfg.StrOpt('default_processing_hooks', - default='ramdisk_error,root_disk_selection,scheduler,' - 'validate_interfaces,capabilities,pci_devices', - help=_('Comma-separated list of default hooks for processing ' - 'pipeline. Hook \'scheduler\' updates the node with the ' - 'minimum properties required by the Nova scheduler. ' - 'Hook \'validate_interfaces\' ensures that valid NIC ' - 'data was provided by the ramdisk. ' - 'Do not exclude these two unless you really know what ' - 'you\'re doing.')), - cfg.StrOpt('processing_hooks', - default='$default_processing_hooks', - help=_('Comma-separated list of enabled hooks for processing ' - 'pipeline. The default for this is ' - '$default_processing_hooks, hooks can be added before ' - 'or after the defaults like this: ' - '"prehook,$default_processing_hooks,posthook".')), - cfg.StrOpt('ramdisk_logs_dir', - help=_('If set, logs from ramdisk will be stored in this ' - 'directory.')), - cfg.BoolOpt('always_store_ramdisk_logs', - default=False, - help=_('Whether to store ramdisk logs even if it did not ' - 'return an error message (dependent upon ' - '"ramdisk_logs_dir" option being set).')), - cfg.StrOpt('node_not_found_hook', - help=_('The name of the hook to run when inspector receives ' - 'inspection information from a node it isn\'t already ' - 'aware of. This hook is ignored by default.')), - cfg.StrOpt('store_data', - default='none', - choices=VALID_STORE_DATA_VALUES, - help=_('Method for storing introspection data. If set to \'none' - '\', introspection data will not be stored.')), - cfg.StrOpt('store_data_location', - help=_('Name of the key to store the location of stored data ' - 'in the extra column of the Ironic database.')), - cfg.BoolOpt('disk_partitioning_spacing', - default=True, - help=_('Whether to leave 1 GiB of disk size untouched for ' - 'partitioning. Only has effect when used with the IPA ' - 'as a ramdisk, for older ramdisk local_gb is ' - 'calculated on the ramdisk side.')), - cfg.BoolOpt('log_bmc_address', - default=True, - help=_('Whether to log node BMC address with every message ' - 'during processing.'), - deprecated_for_removal=True), - cfg.StrOpt('ramdisk_logs_filename_format', - default='{uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz', - help=_('File name template for storing ramdisk logs. The ' - 'following replacements can be used: ' - '{uuid} - node UUID or "unknown", ' - '{bmc} - node BMC address or "unknown", ' - '{dt} - current UTC date and time, ' - '{mac} - PXE booting MAC or "unknown".')), - cfg.BoolOpt('power_off', - default=True, - help=_('Whether to power off a node after introspection.')), -] - -SERVICE_OPTS = [ - cfg.StrOpt('listen_address', - default='0.0.0.0', - help=_('IP to listen on.')), - cfg.PortOpt('listen_port', - default=5050, - help=_('Port to listen on.')), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=('keystone', 'noauth'), - help=_('Authentication method used on the ironic-inspector ' - 'API. Either "noauth" or "keystone" are currently valid ' - 'options. "noauth" will disable all authentication.')), - cfg.IntOpt('timeout', - default=3600, - help=_('Timeout after which introspection is considered ' - 'failed, set to 0 to disable.')), - cfg.IntOpt('node_status_keep_time', - default=0, - help=_('For how much time (in seconds) to keep status ' - 'information about nodes after introspection was ' - 'finished for them. Set to 0 (the default) ' - 'to disable the timeout.'), - deprecated_for_removal=True), - cfg.IntOpt('clean_up_period', - default=60, - help=_('Amount of time in seconds, after which repeat clean up ' - 'of timed out nodes and old nodes status information.')), - cfg.BoolOpt('use_ssl', - default=False, - help=_('SSL Enabled/Disabled')), - cfg.StrOpt('ssl_cert_path', - default='', - help=_('Path to SSL certificate')), - cfg.StrOpt('ssl_key_path', - default='', - help=_('Path to SSL key')), - cfg.IntOpt('max_concurrency', - default=1000, min=2, - help=_('The green thread pool size.')), - cfg.IntOpt('introspection_delay', - default=5, - help=_('Delay (in seconds) between two introspections.')), - cfg.StrOpt('introspection_delay_drivers', - default='.*', - help=_('Only node with drivers matching this regular ' - 'expression will be affected by introspection_delay ' - 'setting.'), - deprecated_for_removal=True), - cfg.ListOpt('ipmi_address_fields', - default=['ilo_address', 'drac_host', 'drac_address', - 'cimc_address'], - help=_('Ironic driver_info fields that are equivalent ' - 'to ipmi_address.')), - cfg.StrOpt('rootwrap_config', - default="/etc/ironic-inspector/rootwrap.conf", - help=_('Path to the rootwrap configuration file to use for ' - 'running commands as root')), - cfg.IntOpt('api_max_limit', default=1000, min=1, - help=_('Limit the number of elements an API list-call returns')) -] - -PXE_FILTER_OPTS = [ - cfg.StrOpt('driver', default='noop', - help=_('PXE boot filter driver to use, such as iptables')), - cfg.IntOpt('sync_period', default=15, min=0, - help=_('Amount of time in seconds, after which repeat periodic ' - 'update of the filter.')), -] - -cfg.CONF.register_opts(SERVICE_OPTS) -cfg.CONF.register_opts(FIREWALL_OPTS, group='firewall') -cfg.CONF.register_opts(PROCESSING_OPTS, group='processing') -cfg.CONF.register_opts(PXE_FILTER_OPTS, 'pxe_filter') - - -def list_opts(): - return [ - ('', SERVICE_OPTS), - ('firewall', FIREWALL_OPTS), - ('processing', PROCESSING_OPTS), - ('pxe_filter', PXE_FILTER_OPTS), - ] - - -def set_config_defaults(): - """This method updates all configuration default values.""" - set_cors_middleware_defaults() - - -def set_cors_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ - cfg.set_defaults( - cors.CORS_OPTS, - allow_headers=['X-Auth-Token', - MIN_VERSION_HEADER, - MAX_VERSION_HEADER, - VERSION_HEADER], - allow_methods=['GET', 'POST', 'PUT', 'HEAD', - 'PATCH', 'DELETE', 'OPTIONS'] - ) diff --git a/ironic_inspector/db.py b/ironic_inspector/db.py deleted file mode 100644 index 99bc413..0000000 --- a/ironic_inspector/db.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2015 NEC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy models for inspection data and shared database code.""" - -import contextlib - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_db import options as db_opts -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import types as db_types -from sqlalchemy import (Boolean, Column, DateTime, Enum, ForeignKey, - Integer, String, Text) -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import orm - -from ironic_inspector import conf # noqa -from ironic_inspector import introspection_state as istate - - -class ModelBase(models.ModelBase): - __table_args__ = {'mysql_engine': "InnoDB", - 'mysql_charset': "utf8"} - - -Base = declarative_base(cls=ModelBase) -CONF = cfg.CONF -_DEFAULT_SQL_CONNECTION = 'sqlite:///ironic_inspector.sqlite' -_CTX_MANAGER = None - -db_opts.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION) - -_synchronized = lockutils.synchronized_with_prefix("ironic-inspector-") - - -class Node(Base): - __tablename__ = 'nodes' - uuid = Column(String(36), primary_key=True) - version_id = Column(String(36), server_default='') - state = Column(Enum(*istate.States.all()), nullable=False, - default=istate.States.finished, - server_default=istate.States.finished) - started_at = Column(DateTime, nullable=True) - finished_at = Column(DateTime, nullable=True) - error = Column(Text, nullable=True) - - # version_id is being tracked in the NodeInfo object - # for the sake of consistency. See also SQLAlchemy docs: - # http://docs.sqlalchemy.org/en/latest/orm/versioning.html - __mapper_args__ = { - 'version_id_col': version_id, - 'version_id_generator': False, - } - - -class Attribute(Base): - __tablename__ = 'attributes' - uuid = Column(String(36), primary_key=True) - node_uuid = Column(String(36), ForeignKey('nodes.uuid', - name='fk_node_attribute')) - name = Column(String(255), nullable=False) - value = Column(String(255), nullable=True) - - -class Option(Base): - __tablename__ = 'options' - uuid = Column(String(36), ForeignKey('nodes.uuid'), primary_key=True) - name = Column(String(255), primary_key=True) - value = Column(Text) - - -class Rule(Base): - __tablename__ = 'rules' - uuid = Column(String(36), primary_key=True) - created_at = Column(DateTime, nullable=False) - description = Column(Text) - # NOTE(dtantsur): in the future we might need to temporary disable a rule - disabled = Column(Boolean, default=False) - - conditions = orm.relationship('RuleCondition', lazy='joined', - order_by='RuleCondition.id', - cascade="all, delete-orphan") - actions = orm.relationship('RuleAction', lazy='joined', - order_by='RuleAction.id', - cascade="all, delete-orphan") - - -class RuleCondition(Base): - __tablename__ = 'rule_conditions' - id = Column(Integer, primary_key=True) - rule = Column(String(36), ForeignKey('rules.uuid')) - op = Column(String(255), nullable=False) - multiple = Column(String(255), nullable=False) - invert = Column(Boolean, default=False) - # NOTE(dtantsur): while all operations now require a field, I can also - # imagine user-defined operations that do not, thus it's nullable. - field = Column(Text) - params = Column(db_types.JsonEncodedDict) - - def as_dict(self): - res = self.params.copy() - res['op'] = self.op - res['field'] = self.field - res['multiple'] = self.multiple - res['invert'] = self.invert - return res - - -class RuleAction(Base): - __tablename__ = 'rule_actions' - id = Column(Integer, primary_key=True) - rule = Column(String(36), ForeignKey('rules.uuid')) - action = Column(String(255), nullable=False) - params = Column(db_types.JsonEncodedDict) - - def as_dict(self): - res = self.params.copy() - res['action'] = self.action - return res - - -def init(): - """Initialize the database. - - Method called on service start up, initialize transaction - context manager and try to create db session. - """ - get_writer_session() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param session: if present, the session to use - """ - session = kwargs.get('session') or get_reader_session() - query = session.query(model, *args) - return query - - -@contextlib.contextmanager -def ensure_transaction(session=None): - session = session or get_writer_session() - with session.begin(subtransactions=True): - yield session - - -@_synchronized("transaction-context-manager") -def _create_context_manager(): - _ctx_mgr = enginefacade.transaction_context() - # TODO(aarefiev): enable foreign keys for SQLite once all unit - # tests with failed constraint will be fixed. - _ctx_mgr.configure(sqlite_fk=False) - - return _ctx_mgr - - -def get_context_manager(): - """Create transaction context manager lazily. - - :returns: The transaction context manager. - """ - global _CTX_MANAGER - if _CTX_MANAGER is None: - _CTX_MANAGER = _create_context_manager() - - return _CTX_MANAGER - - -def get_reader_session(): - """Help method to get reader session. - - :returns: The reader session. - """ - return get_context_manager().reader.get_sessionmaker()() - - -def get_writer_session(): - """Help method to get writer session. - - :returns: The writer session. - """ - return get_context_manager().writer.get_sessionmaker()() diff --git a/ironic_inspector/dbsync.py b/ironic_inspector/dbsync.py deleted file mode 100644 index 4d168bd..0000000 --- a/ironic_inspector/dbsync.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Cisco Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from alembic import command as alembic_command -from alembic import config as alembic_config -from alembic import util as alembic_util -import six - -from oslo_config import cfg -from oslo_log import log - -from ironic_inspector import conf # noqa - -CONF = cfg.CONF - - -def add_alembic_command(subparsers, name): - return subparsers.add_parser( - name, help=getattr(alembic_command, name).__doc__) - - -def add_command_parsers(subparsers): - for name in ['current', 'history', 'branches', 'heads']: - parser = add_alembic_command(subparsers, name) - parser.set_defaults(func=do_alembic_command) - - for name in ['stamp', 'show', 'edit']: - parser = add_alembic_command(subparsers, name) - parser.set_defaults(func=with_revision) - parser.add_argument('--revision', nargs='?', required=True) - - parser = add_alembic_command(subparsers, 'upgrade') - parser.set_defaults(func=with_revision) - parser.add_argument('--revision', nargs='?') - - parser = add_alembic_command(subparsers, 'revision') - parser.set_defaults(func=do_revision) - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help='Available commands', - handler=add_command_parsers) - -CONF.register_cli_opt(command_opt) - - -def _get_alembic_config(): - return alembic_config.Config(os.path.join(os.path.dirname(__file__), - 'alembic.ini')) - - -def do_revision(config, cmd, *args, **kwargs): - do_alembic_command(config, cmd, message=CONF.command.message, - autogenerate=CONF.command.autogenerate) - - -def with_revision(config, cmd, *args, **kwargs): - revision = CONF.command.revision or 'head' - do_alembic_command(config, cmd, revision) - - -def do_alembic_command(config, cmd, *args, **kwargs): - try: - getattr(alembic_command, cmd)(config, *args, **kwargs) - except alembic_util.CommandError as e: - alembic_util.err(six.text_type(e)) - - -def main(args=sys.argv[1:]): - log.register_options(CONF) - CONF(args, project='ironic-inspector') - config = _get_alembic_config() - config.set_main_option('script_location', "ironic_inspector:migrations") - config.ironic_inspector_config = CONF - - CONF.command.func(config, CONF.command.name) diff --git a/ironic_inspector/firewall.py b/ironic_inspector/firewall.py deleted file mode 100644 index a94165b..0000000 --- a/ironic_inspector/firewall.py +++ /dev/null @@ -1,257 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import os -import re - -from eventlet.green import subprocess -from eventlet import semaphore -from oslo_config import cfg -from oslo_log import log - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import node_cache - - -CONF = cfg.CONF -LOG = log.getLogger("ironic_inspector.firewall") -NEW_CHAIN = None -CHAIN = None -INTERFACE = None -LOCK = semaphore.BoundedSemaphore() -BASE_COMMAND = None -BLACKLIST_CACHE = None -ENABLED = True -EMAC_REGEX = 'EMAC=([0-9a-f]{2}(:[0-9a-f]{2}){5}) IMAC=.*' - - -def _iptables(*args, **kwargs): - # NOTE(dtantsur): -w flag makes it wait for xtables lock - cmd = BASE_COMMAND + args - ignore = kwargs.pop('ignore', False) - LOG.debug('Running iptables %s', args) - kwargs['stderr'] = subprocess.STDOUT - try: - subprocess.check_output(cmd, **kwargs) - except subprocess.CalledProcessError as exc: - output = exc.output.replace('\n', '. ') - if ignore: - LOG.debug('Ignoring failed iptables %(args)s: %(output)s', - {'args': args, 'output': output}) - else: - LOG.error('iptables %(iptables)s failed: %(exc)s', - {'iptables': args, 'exc': output}) - raise - - -def init(): - """Initialize firewall management. - - Must be called one on start-up. - """ - if not CONF.firewall.manage_firewall: - return - - global INTERFACE, CHAIN, NEW_CHAIN, BASE_COMMAND, BLACKLIST_CACHE - BLACKLIST_CACHE = None - INTERFACE = CONF.firewall.dnsmasq_interface - CHAIN = CONF.firewall.firewall_chain - NEW_CHAIN = CHAIN + '_temp' - BASE_COMMAND = ('sudo', 'ironic-inspector-rootwrap', - CONF.rootwrap_config, 'iptables',) - - # -w flag makes iptables wait for xtables lock, but it's not supported - # everywhere yet - try: - with open(os.devnull, 'wb') as null: - subprocess.check_call(BASE_COMMAND + ('-w', '-h'), - stderr=null, stdout=null) - except subprocess.CalledProcessError: - LOG.warning('iptables does not support -w flag, please update ' - 'it to at least version 1.4.21') - else: - BASE_COMMAND += ('-w',) - - _clean_up(CHAIN) - # Not really needed, but helps to validate that we have access to iptables - _iptables('-N', CHAIN) - - -def _clean_up(chain): - _iptables('-D', 'INPUT', '-i', INTERFACE, '-p', 'udp', - '--dport', '67', '-j', chain, - ignore=True) - _iptables('-F', chain, ignore=True) - _iptables('-X', chain, ignore=True) - - -def clean_up(): - """Clean up everything before exiting.""" - if not CONF.firewall.manage_firewall: - return - - _clean_up(CHAIN) - _clean_up(NEW_CHAIN) - - -def _should_enable_dhcp(): - """Check whether we should enable DHCP at all. - - We won't even open our DHCP if no nodes are on introspection and - node_not_found_hook is not set. - """ - return (node_cache.introspection_active() or - CONF.processing.node_not_found_hook) - - -@contextlib.contextmanager -def _temporary_chain(chain, main_chain): - """Context manager to operate on a temporary chain.""" - # Clean up a bit to account for possible troubles on previous run - _clean_up(chain) - _iptables('-N', chain) - - yield - - # Swap chains - _iptables('-I', 'INPUT', '-i', INTERFACE, '-p', 'udp', - '--dport', '67', '-j', chain) - _iptables('-D', 'INPUT', '-i', INTERFACE, '-p', 'udp', - '--dport', '67', '-j', main_chain, - ignore=True) - _iptables('-F', main_chain, ignore=True) - _iptables('-X', main_chain, ignore=True) - _iptables('-E', chain, main_chain) - - -def _disable_dhcp(): - """Disable DHCP completely.""" - global ENABLED, BLACKLIST_CACHE - - if not ENABLED: - LOG.debug('DHCP is already disabled, not updating') - return - - LOG.debug('No nodes on introspection and node_not_found_hook is ' - 'not set - disabling DHCP') - BLACKLIST_CACHE = None - with _temporary_chain(NEW_CHAIN, CHAIN): - # Blacklist everything - _iptables('-A', NEW_CHAIN, '-j', 'REJECT') - - ENABLED = False - - -def update_filters(ironic=None): - """Update firewall filter rules for introspection. - - Gives access to PXE boot port for any machine, except for those, - whose MAC is registered in Ironic and is not on introspection right now. - - This function is called from both introspection initialization code and - from periodic task. This function is supposed to be resistant to unexpected - iptables state. - - ``init()`` function must be called once before any call to this function. - This function is using ``eventlet`` semaphore to serialize access from - different green threads. - - Does nothing, if firewall management is disabled in configuration. - - :param ironic: Ironic client instance, optional. - """ - global BLACKLIST_CACHE, ENABLED - - if not CONF.firewall.manage_firewall: - return - - assert INTERFACE is not None - ironic = ir_utils.get_client() if ironic is None else ironic - with LOCK: - if not _should_enable_dhcp(): - _disable_dhcp() - return - - ports_active = ironic.port.list(limit=0, fields=['address', 'extra']) - macs_active = set(p.address for p in ports_active) - to_blacklist = macs_active - node_cache.active_macs() - ib_mac_mapping = ( - _ib_mac_to_rmac_mapping(to_blacklist, ports_active)) - - if (BLACKLIST_CACHE is not None and - to_blacklist == BLACKLIST_CACHE and not ib_mac_mapping): - LOG.debug('Not updating iptables - no changes in MAC list %s', - to_blacklist) - return - - LOG.debug('Blacklisting active MAC\'s %s', to_blacklist) - # Force update on the next iteration if this attempt fails - BLACKLIST_CACHE = None - - with _temporary_chain(NEW_CHAIN, CHAIN): - # - Blacklist active macs, so that nova can boot them - for mac in to_blacklist: - mac = ib_mac_mapping.get(mac) or mac - _iptables('-A', NEW_CHAIN, '-m', 'mac', - '--mac-source', mac, '-j', 'DROP') - # - Whitelist everything else - _iptables('-A', NEW_CHAIN, '-j', 'ACCEPT') - - # Cache result of successful iptables update - ENABLED = True - BLACKLIST_CACHE = to_blacklist - - -def _ib_mac_to_rmac_mapping(blacklist_macs, ports_active): - """Mapping between host InfiniBand MAC to EthernetOverInfiniBand MAC - - On InfiniBand deployment we need to map between the baremetal host - InfiniBand MAC to the EoIB MAC. The EoIB MAC addresses are learned - automatically by the EoIB interfaces and those MACs are recorded - to the /sys/class/net//eth/neighs file. - The InfiniBand GUID is taken from the ironic port client-id extra - attribute. The InfiniBand GUID is the last 8 bytes of the client-id. - The file format allows to map the GUID to EoIB MAC. The firewall - rules based on those MACs get applied to the dnsmasq_interface by the - update_filters function. - - :param blacklist_macs: List of InfiniBand baremetal hosts macs to - blacklist. - :param ports_active: list of active ironic ports - :return: baremetal InfiniBand to remote mac on ironic node mapping - """ - ethoib_interfaces = CONF.firewall.ethoib_interfaces - ib_mac_to_remote_mac = {} - for interface in ethoib_interfaces: - neighs_file = ( - os.path.join('/sys/class/net', interface, 'eth/neighs')) - try: - with open(neighs_file, 'r') as fd: - data = fd.read() - except IOError: - LOG.error('Interface %s is not Ethernet Over InfiniBand; ' - 'Skipping ...', interface) - continue - for port in ports_active: - if port.address in blacklist_macs: - client_id = port.extra.get('client-id') - if client_id: - # Note(moshele): The last 8 bytes in the client-id is - # the baremetal node InfiniBand GUID - guid = client_id[-23:] - p = re.compile(EMAC_REGEX + guid) - match = p.search(data) - if match: - ib_mac_to_remote_mac[port.address] = match.group(1) - return ib_mac_to_remote_mac diff --git a/ironic_inspector/introspect.py b/ironic_inspector/introspect.py deleted file mode 100644 index cc55e23..0000000 --- a/ironic_inspector/introspect.py +++ /dev/null @@ -1,184 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Handling introspection request.""" - -import re -import time - -from eventlet import semaphore -from oslo_config import cfg - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import firewall -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector import utils - -CONF = cfg.CONF - - -LOG = utils.getProcessingLogger(__name__) - -_LAST_INTROSPECTION_TIME = 0 -_LAST_INTROSPECTION_LOCK = semaphore.BoundedSemaphore() - - -def introspect(node_id, token=None): - """Initiate hardware properties introspection for a given node. - - :param node_id: node UUID or name - :param token: authentication token - :raises: Error - """ - ironic = ir_utils.get_client(token) - node = ir_utils.get_node(node_id, ironic=ironic) - - ir_utils.check_provision_state(node) - validation = ironic.node.validate(node.uuid) - if not validation.power['result']: - msg = _('Failed validation of power interface, reason: %s') - raise utils.Error(msg % validation.power['reason'], - node_info=node) - - bmc_address = ir_utils.get_ipmi_address(node) - node_info = node_cache.start_introspection(node.uuid, - bmc_address=bmc_address, - ironic=ironic) - - def _handle_exceptions(fut): - try: - fut.result() - except utils.Error as exc: - # Logging has already happened in Error.__init__ - node_info.finished(error=str(exc)) - except Exception as exc: - msg = _('Unexpected exception in background introspection thread') - LOG.exception(msg, node_info=node_info) - node_info.finished(error=msg) - - future = utils.executor().submit(_background_introspect, ironic, node_info) - future.add_done_callback(_handle_exceptions) - - -def _background_introspect(ironic, node_info): - global _LAST_INTROSPECTION_TIME - - if re.match(CONF.introspection_delay_drivers, node_info.node().driver): - LOG.debug('Attempting to acquire lock on last introspection time') - with _LAST_INTROSPECTION_LOCK: - delay = (_LAST_INTROSPECTION_TIME - time.time() - + CONF.introspection_delay) - if delay > 0: - LOG.debug('Waiting %d seconds before sending the next ' - 'node on introspection', delay) - time.sleep(delay) - _LAST_INTROSPECTION_TIME = time.time() - - node_info.acquire_lock() - try: - _background_introspect_locked(node_info, ironic) - finally: - node_info.release_lock() - - -@node_cache.fsm_transition(istate.Events.wait) -def _background_introspect_locked(node_info, ironic): - # TODO(dtantsur): pagination - macs = list(node_info.ports()) - if macs: - node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) - LOG.info('Whitelisting MAC\'s %s on the firewall', macs, - node_info=node_info) - firewall.update_filters(ironic) - - attrs = node_info.attributes - if CONF.processing.node_not_found_hook is None and not attrs: - raise utils.Error( - _('No lookup attributes were found, inspector won\'t ' - 'be able to find it after introspection, consider creating ' - 'ironic ports or providing an IPMI address'), - node_info=node_info) - - LOG.info('The following attributes will be used for look up: %s', - attrs, node_info=node_info) - - try: - ironic.node.set_boot_device(node_info.uuid, 'pxe', - persistent=False) - except Exception as exc: - LOG.warning('Failed to set boot device to PXE: %s', - exc, node_info=node_info) - - try: - ironic.node.set_power_state(node_info.uuid, 'reboot') - except Exception as exc: - raise utils.Error(_('Failed to power on the node, check it\'s ' - 'power management configuration: %s'), - exc, node_info=node_info) - LOG.info('Introspection started successfully', - node_info=node_info) - - -def abort(node_id, token=None): - """Abort running introspection. - - :param node_id: node UUID or name - :param token: authentication token - :raises: Error - """ - LOG.debug('Aborting introspection for node %s', node_id) - ironic = ir_utils.get_client(token) - node_info = node_cache.get_node(node_id, ironic=ironic, locked=False) - - # check pending operations - locked = node_info.acquire_lock(blocking=False) - if not locked: - # Node busy --- cannot abort atm - raise utils.Error(_('Node is locked, please, retry later'), - node_info=node_info, code=409) - - utils.executor().submit(_abort, node_info, ironic) - - -@node_cache.release_lock -@node_cache.fsm_transition(istate.Events.abort, reentrant=False) -def _abort(node_info, ironic): - # runs in background - if node_info.finished_at is not None: - # introspection already finished; nothing to do - LOG.info('Cannot abort introspection as it is already ' - 'finished', node_info=node_info) - node_info.release_lock() - return - - # finish the introspection - LOG.debug('Forcing power-off', node_info=node_info) - try: - ironic.node.set_power_state(node_info.uuid, 'off') - except Exception as exc: - LOG.warning('Failed to power off node: %s', exc, - node_info=node_info) - - node_info.finished(error=_('Canceled by operator')) - - # block this node from PXE Booting the introspection image - try: - firewall.update_filters(ironic) - except Exception as exc: - # Note(mkovacik): this will be retried in firewall update - # periodic task; we continue aborting - LOG.warning('Failed to update firewall filters: %s', exc, - node_info=node_info) - LOG.info('Introspection aborted', node_info=node_info) diff --git a/ironic_inspector/introspection_state.py b/ironic_inspector/introspection_state.py deleted file mode 100644 index 0ddba65..0000000 --- a/ironic_inspector/introspection_state.py +++ /dev/null @@ -1,148 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Introspection state.""" - -from automaton import machines - - -class States(object): - """States of an introspection.""" - # received introspection data from a nonexistent node - # active - the inspector performs an operation on the node - enrolling = 'enrolling' - # an error appeared in a previous introspection state - # passive - the inspector doesn't perform any operation on the node - error = 'error' - # introspection finished successfully - # passive - finished = 'finished' - # processing introspection data from the node - # active - processing = 'processing' - # processing stored introspection data from the node - # active - reapplying = 'reapplying' - # received a request to start node introspection - # active - starting = 'starting' - # waiting for node introspection data - # passive - waiting = 'waiting' - - @classmethod - def all(cls): - """Return a list of all states.""" - return [cls.starting, cls.waiting, cls.processing, cls.finished, - cls.error, cls.reapplying, cls.enrolling] - - -class Events(object): - """Events that change introspection state.""" - # cancel a waiting node introspection - # API, user - abort = 'abort' - # mark an introspection failed - # internal - error = 'error' - # mark an introspection finished - # internal - finish = 'finish' - # process node introspection data - # API, introspection image - process = 'process' - # process stored node introspection data - # API, user - reapply = 'reapply' - # initialize node introspection - # API, user - start = 'start' - # mark an introspection timed-out waiting for data - # internal - timeout = 'timeout' - # mark an introspection waiting for image data - # internal - wait = 'wait' - - @classmethod - def all(cls): - """Return a list of all events.""" - return [cls.process, cls.reapply, cls.timeout, cls.wait, cls.abort, - cls.error, cls.finish] - -# Error transition is allowed in any state. -State_space = [ - { - 'name': States.enrolling, - 'next_states': { - Events.error: States.error, - Events.process: States.processing, - Events.timeout: States.error, - }, - }, - { - 'name': States.error, - 'next_states': { - Events.abort: States.error, - Events.error: States.error, - Events.reapply: States.reapplying, - Events.start: States.starting, - }, - }, - { - 'name': States.finished, - 'next_states': { - Events.finish: States.finished, - Events.reapply: States.reapplying, - Events.start: States.starting - }, - }, - { - 'name': States.processing, - 'next_states': { - Events.error: States.error, - Events.finish: States.finished, - Events.timeout: States.error, - }, - }, - { - 'name': States.reapplying, - 'next_states': { - Events.error: States.error, - Events.finish: States.finished, - Events.reapply: States.reapplying, - Events.timeout: States.error, - }, - }, - { - 'name': States.starting, - 'next_states': { - Events.error: States.error, - Events.start: States.starting, - Events.wait: States.waiting, - Events.timeout: States.error - }, - }, - { - 'name': States.waiting, - 'next_states': { - Events.abort: States.error, - Events.process: States.processing, - Events.start: States.starting, - Events.timeout: States.error, - }, - }, -] - -FSM = machines.FiniteMachine.build(State_space) -FSM.default_start_state = States.finished diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py deleted file mode 100644 index 665c57a..0000000 --- a/ironic_inspector/main.py +++ /dev/null @@ -1,324 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import os -import re - -import flask -from oslo_config import cfg -from oslo_utils import uuidutils -import werkzeug - -from ironic_inspector import api_tools -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.common import swift -from ironic_inspector import conf # noqa -from ironic_inspector import introspect -from ironic_inspector import node_cache -from ironic_inspector import process -from ironic_inspector import rules -from ironic_inspector import utils - -CONF = cfg.CONF - - -app = flask.Flask(__name__) -LOG = utils.getProcessingLogger(__name__) - -MINIMUM_API_VERSION = (1, 0) -CURRENT_API_VERSION = (1, 12) -DEFAULT_API_VERSION = CURRENT_API_VERSION -_LOGGING_EXCLUDED_KEYS = ('logs',) - - -def _get_version(): - ver = flask.request.headers.get(conf.VERSION_HEADER, - _DEFAULT_API_VERSION) - try: - requested = tuple(int(x) for x in ver.split('.')) - except (ValueError, TypeError): - return error_response(_('Malformed API version: expected string ' - 'in form of X.Y'), code=400) - return requested - - -def _format_version(ver): - return '%d.%d' % ver - - -_DEFAULT_API_VERSION = _format_version(DEFAULT_API_VERSION) - - -def error_response(exc, code=500): - res = flask.jsonify(error={'message': str(exc)}) - res.status_code = code - LOG.debug('Returning error to client: %s', exc) - return res - - -def convert_exceptions(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except utils.Error as exc: - return error_response(exc, exc.http_code) - except werkzeug.exceptions.HTTPException as exc: - return error_response(exc, exc.code or 400) - except Exception as exc: - LOG.exception('Internal server error') - msg = _('Internal server error') - if CONF.debug: - msg += ' (%s): %s' % (exc.__class__.__name__, exc) - return error_response(msg) - - return wrapper - - -@app.before_request -def check_api_version(): - requested = _get_version() - - if requested < MINIMUM_API_VERSION or requested > CURRENT_API_VERSION: - return error_response(_('Unsupported API version %(requested)s, ' - 'supported range is %(min)s to %(max)s') % - {'requested': _format_version(requested), - 'min': _format_version(MINIMUM_API_VERSION), - 'max': _format_version(CURRENT_API_VERSION)}, - code=406) - - -@app.after_request -def add_version_headers(res): - res.headers[conf.MIN_VERSION_HEADER] = '%s.%s' % MINIMUM_API_VERSION - res.headers[conf.MAX_VERSION_HEADER] = '%s.%s' % CURRENT_API_VERSION - return res - - -def create_link_object(urls): - links = [] - for url in urls: - links.append({"rel": "self", - "href": os.path.join(flask.request.url_root, url)}) - return links - - -def generate_resource_data(resources): - data = [] - for resource in resources: - item = {} - item['name'] = str(resource).split('/')[-1] - item['links'] = create_link_object([str(resource)[1:]]) - data.append(item) - return data - - -def generate_introspection_status(node): - """Return a dict representing current node status. - - :param node: a NodeInfo instance - :return: dictionary - """ - started_at = node.started_at.isoformat() - finished_at = node.finished_at.isoformat() if node.finished_at else None - - status = {} - status['uuid'] = node.uuid - status['finished'] = bool(node.finished_at) - status['state'] = node.state - status['started_at'] = started_at - status['finished_at'] = finished_at - status['error'] = node.error - status['links'] = create_link_object( - ["v%s/introspection/%s" % (CURRENT_API_VERSION[0], node.uuid)]) - return status - - -@app.route('/', methods=['GET']) -@convert_exceptions -def api_root(): - versions = [ - { - "status": "CURRENT", - "id": '%s.%s' % CURRENT_API_VERSION, - }, - ] - - for version in versions: - version['links'] = create_link_object( - ["v%s" % version['id'].split('.')[0]]) - - return flask.jsonify(versions=versions) - - -@app.route('/', methods=['GET']) -@convert_exceptions -def version_root(version): - pat = re.compile("^\/%s\/[^\/]*?$" % version) - - resources = [] - for url in app.url_map.iter_rules(): - if pat.match(str(url)): - resources.append(url) - - if not resources: - raise utils.Error(_('Version not found.'), code=404) - - return flask.jsonify(resources=generate_resource_data(resources)) - - -@app.route('/v1/continue', methods=['POST']) -@convert_exceptions -def api_continue(): - data = flask.request.get_json(force=True) - if not isinstance(data, dict): - raise utils.Error(_('Invalid data: expected a JSON object, got %s') % - data.__class__.__name__) - - logged_data = {k: (v if k not in _LOGGING_EXCLUDED_KEYS else '') - for k, v in data.items()} - LOG.debug("Received data from the ramdisk: %s", logged_data, - data=data) - - return flask.jsonify(process.process(data)) - - -# TODO(sambetts) Add API discovery for this endpoint -@app.route('/v1/introspection/', methods=['GET', 'POST']) -@convert_exceptions -def api_introspection(node_id): - utils.check_auth(flask.request) - - if flask.request.method == 'POST': - introspect.introspect(node_id, - token=flask.request.headers.get('X-Auth-Token')) - return '', 202 - else: - node_info = node_cache.get_node(node_id) - return flask.json.jsonify(generate_introspection_status(node_info)) - - -@app.route('/v1/introspection', methods=['GET']) -@convert_exceptions -def api_introspection_statuses(): - utils.check_auth(flask.request) - - nodes = node_cache.get_node_list( - marker=api_tools.marker_field(), - limit=api_tools.limit_field(default=CONF.api_max_limit) - ) - data = { - 'introspection': [generate_introspection_status(node) - for node in nodes] - } - return flask.json.jsonify(data) - - -@app.route('/v1/introspection//abort', methods=['POST']) -@convert_exceptions -def api_introspection_abort(node_id): - utils.check_auth(flask.request) - introspect.abort(node_id, token=flask.request.headers.get('X-Auth-Token')) - return '', 202 - - -@app.route('/v1/introspection//data', methods=['GET']) -@convert_exceptions -def api_introspection_data(node_id): - utils.check_auth(flask.request) - - if CONF.processing.store_data == 'swift': - if not uuidutils.is_uuid_like(node_id): - node = ir_utils.get_node(node_id, fields=['uuid']) - node_id = node.uuid - res = swift.get_introspection_data(node_id) - return res, 200, {'Content-Type': 'application/json'} - else: - return error_response(_('Inspector is not configured to store data. ' - 'Set the [processing] store_data ' - 'configuration option to change this.'), - code=404) - - -@app.route('/v1/introspection//data/unprocessed', methods=['POST']) -@convert_exceptions -def api_introspection_reapply(node_id): - utils.check_auth(flask.request) - - if flask.request.content_length: - return error_response(_('User data processing is not ' - 'supported yet'), code=400) - - if CONF.processing.store_data == 'swift': - process.reapply(node_id) - return '', 202 - else: - return error_response(_('Inspector is not configured to store' - ' data. Set the [processing] ' - 'store_data configuration option to ' - 'change this.'), code=400) - - -def rule_repr(rule, short): - result = rule.as_dict(short=short) - result['links'] = [{ - 'href': flask.url_for('api_rule', uuid=result['uuid']), - 'rel': 'self' - }] - return result - - -@app.route('/v1/rules', methods=['GET', 'POST', 'DELETE']) -@convert_exceptions -def api_rules(): - utils.check_auth(flask.request) - - if flask.request.method == 'GET': - res = [rule_repr(rule, short=True) for rule in rules.get_all()] - return flask.jsonify(rules=res) - elif flask.request.method == 'DELETE': - rules.delete_all() - return '', 204 - else: - body = flask.request.get_json(force=True) - if body.get('uuid') and not uuidutils.is_uuid_like(body['uuid']): - raise utils.Error(_('Invalid UUID value'), code=400) - - rule = rules.create(conditions_json=body.get('conditions', []), - actions_json=body.get('actions', []), - uuid=body.get('uuid'), - description=body.get('description')) - - response_code = (200 if _get_version() < (1, 6) else 201) - return flask.make_response( - flask.jsonify(rule_repr(rule, short=False)), response_code) - - -@app.route('/v1/rules/', methods=['GET', 'DELETE']) -@convert_exceptions -def api_rule(uuid): - utils.check_auth(flask.request) - - if flask.request.method == 'GET': - rule = rules.get(uuid) - return flask.jsonify(rule_repr(rule, short=False)) - else: - rules.delete(uuid) - return '', 204 - - -@app.errorhandler(404) -def handle_404(error): - return error_response(error, code=404) diff --git a/ironic_inspector/migrations/env.py b/ironic_inspector/migrations/env.py deleted file mode 100644 index 71b186f..0000000 --- a/ironic_inspector/migrations/env.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2015 Cisco Systems -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from logging.config import fileConfig - -from alembic import context - -from ironic_inspector import db - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config -ironic_inspector_config = config.ironic_inspector_config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = db.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = ironic_inspector_config.database.connection - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - session = db.get_writer_session() - with session.connection() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/ironic_inspector/migrations/script.py.mako b/ironic_inspector/migrations/script.py.mako deleted file mode 100644 index 1c3fcb4..0000000 --- a/ironic_inspector/migrations/script.py.mako +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py b/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py deleted file mode 100644 index ee052f7..0000000 --- a/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2015 Cisco Systems, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""inital_db_schema - -Revision ID: 578f84f38d -Revises: -Create Date: 2015-09-15 14:52:22.448944 - -""" - -# revision identifiers, used by Alembic. -revision = '578f84f38d' -down_revision = None -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'nodes', - sa.Column('uuid', sa.String(36), primary_key=True), - sa.Column('started_at', sa.Float, nullable=True), - sa.Column('finished_at', sa.Float, nullable=True), - sa.Column('error', sa.Text, nullable=True), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - - op.create_table( - 'attributes', - sa.Column('name', sa.String(255), primary_key=True), - sa.Column('value', sa.String(255), primary_key=True), - sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid')), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - - op.create_table( - 'options', - sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid'), - primary_key=True), - sa.Column('name', sa.String(255), primary_key=True), - sa.Column('value', sa.Text), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) diff --git a/ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.py b/ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.py deleted file mode 100644 index 864ac12..0000000 --- a/ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""attribute_constraints_relaxing - -Revision ID: 882b2d84cb1b -Revises: d00d6e3f38c4 -Create Date: 2017-01-13 11:27:00.053286 - -""" - -# revision identifiers, used by Alembic. -revision = '882b2d84cb1b' -down_revision = 'd00d6e3f38c4' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -from sqlalchemy.engine.reflection import Inspector as insp - -from oslo_utils import uuidutils - -ATTRIBUTES = 'attributes' -NODES = 'nodes' -NAME = 'name' -VALUE = 'value' -UUID = 'uuid' -NODE_UUID = 'node_uuid' - -naming_convention = { - "pk": 'pk_%(table_name)s', - "fk": 'fk_%(table_name)s' -} - - -def upgrade(): - - connection = op.get_bind() - - inspector = insp.from_engine(connection) - - pk_constraint = (inspector.get_pk_constraint(ATTRIBUTES).get('name') - or naming_convention['pk'] % {'table_name': ATTRIBUTES}) - fk_constraint = (inspector.get_foreign_keys(ATTRIBUTES)[0].get('name') - or naming_convention['fk'] % {'table_name': ATTRIBUTES}) - - columns_meta = inspector.get_columns(ATTRIBUTES) - name_type = {meta.get('type') for meta in columns_meta - if meta['name'] == NAME}.pop() - value_type = {meta.get('type') for meta in columns_meta - if meta['name'] == VALUE}.pop() - - node_uuid_column = sa.Column(NODE_UUID, sa.String(36)) - op.add_column(ATTRIBUTES, node_uuid_column) - - attributes = sa.table(ATTRIBUTES, node_uuid_column, - sa.Column(UUID, sa.String(36))) - - with op.batch_alter_table(ATTRIBUTES, - naming_convention=naming_convention) as batch_op: - batch_op.drop_constraint(fk_constraint, type_='foreignkey') - - rows = connection.execute(sa.select([attributes.c.uuid, - attributes.c.node_uuid])) - - for row in rows: - # move uuid to node_uuid, reuse uuid as a new primary key - connection.execute( - attributes.update().where(attributes.c.uuid == row.uuid). - values(node_uuid=row.uuid, uuid=uuidutils.generate_uuid()) - ) - - with op.batch_alter_table(ATTRIBUTES, - naming_convention=naming_convention) as batch_op: - batch_op.drop_constraint(pk_constraint, type_='primary') - batch_op.create_primary_key(pk_constraint, [UUID]) - batch_op.create_foreign_key('fk_node_attribute', NODES, - [NODE_UUID], [UUID]) - batch_op.alter_column('name', nullable=False, type_=name_type) - batch_op.alter_column('value', nullable=True, type_=value_type) diff --git a/ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.py b/ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.py deleted file mode 100644 index 15fad6a..0000000 --- a/ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Change created|finished_at type to DateTime - -Revision ID: d00d6e3f38c4 -Revises: d2e48801c8ef -Create Date: 2016-12-15 17:18:10.728695 - -""" - -# revision identifiers, used by Alembic. -revision = 'd00d6e3f38c4' -down_revision = 'd2e48801c8ef' -branch_labels = None -depends_on = None - -import datetime - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - started_at = sa.Column('started_at', sa.types.Float, nullable=True) - finished_at = sa.Column('finished_at', sa.types.Float, nullable=True) - temp_started_at = sa.Column("temp_started_at", sa.types.DateTime, - nullable=True) - temp_finished_at = sa.Column("temp_finished_at", sa.types.DateTime, - nullable=True) - uuid = sa.Column("uuid", sa.String(36), primary_key=True) - - op.add_column("nodes", temp_started_at) - op.add_column("nodes", temp_finished_at) - - t = sa.table('nodes', started_at, finished_at, - temp_started_at, temp_finished_at, uuid) - - conn = op.get_bind() - rows = conn.execute(sa.select([t.c.started_at, t.c.finished_at, t.c.uuid])) - for row in rows: - temp_started = datetime.datetime.utcfromtimestamp(row['started_at']) - temp_finished = row['finished_at'] - # Note(milan) this is just a precaution; sa.null shouldn't happen here - if temp_finished is not None: - temp_finished = datetime.datetime.utcfromtimestamp(temp_finished) - conn.execute(t.update().where(t.c.uuid == row.uuid).values( - temp_started_at=temp_started, temp_finished_at=temp_finished)) - - with op.batch_alter_table('nodes') as batch_op: - batch_op.drop_column('started_at') - batch_op.drop_column('finished_at') - batch_op.alter_column('temp_started_at', - existing_type=sa.types.DateTime, - nullable=True, - new_column_name='started_at') - batch_op.alter_column('temp_finished_at', - existing_type=sa.types.DateTime, - nullable=True, - new_column_name='finished_at') diff --git a/ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.py b/ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.py deleted file mode 100644 index cf3fb9f..0000000 --- a/ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Introducing Node.state attribute - -Revision ID: d2e48801c8ef -Revises: e169a4a81d88 -Create Date: 2016-07-29 10:10:32.351661 - -""" - -# revision identifiers, used by Alembic. -revision = 'd2e48801c8ef' -down_revision = 'e169a4a81d88' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -from sqlalchemy import sql - -from ironic_inspector import introspection_state as istate - -Node = sql.table('nodes', - sql.column('error', sa.String), - sql.column('state', sa.Enum(*istate.States.all()))) - - -def upgrade(): - op.add_column('nodes', sa.Column('version_id', sa.String(36), - server_default='')) - op.add_column('nodes', sa.Column('state', sa.Enum(*istate.States.all(), - name='node_state'), - nullable=False, - default=istate.States.finished, - server_default=istate.States.finished)) - # correct the state: finished -> error if Node.error is not null - stmt = Node.update().where(Node.c.error != sql.null()).values( - {'state': op.inline_literal(istate.States.error)}) - op.execute(stmt) diff --git a/ironic_inspector/migrations/versions/d588418040d_add_rules.py b/ironic_inspector/migrations/versions/d588418040d_add_rules.py deleted file mode 100644 index 7b79704..0000000 --- a/ironic_inspector/migrations/versions/d588418040d_add_rules.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add Rules - -Revision ID: d588418040d -Revises: 578f84f38d -Create Date: 2015-09-21 14:31:03.048455 - -""" - -# revision identifiers, used by Alembic. -revision = 'd588418040d' -down_revision = '578f84f38d' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - -from oslo_db.sqlalchemy import types - - -def upgrade(): - op.create_table( - 'rules', - sa.Column('uuid', sa.String(36), primary_key=True), - sa.Column('created_at', sa.DateTime, nullable=False), - sa.Column('description', sa.Text), - sa.Column('disabled', sa.Boolean, default=False), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - - op.create_table( - 'rule_conditions', - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('rule', sa.String(36), sa.ForeignKey('rules.uuid')), - sa.Column('op', sa.String(255), nullable=False), - sa.Column('multiple', sa.String(255), nullable=False), - sa.Column('field', sa.Text), - sa.Column('params', types.JsonEncodedDict), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - - op.create_table( - 'rule_actions', - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('rule', sa.String(36), sa.ForeignKey('rules.uuid')), - sa.Column('action', sa.String(255), nullable=False), - sa.Column('params', types.JsonEncodedDict), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) diff --git a/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py b/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py deleted file mode 100644 index 001de0a..0000000 --- a/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add invert field to rule condition - -Revision ID: e169a4a81d88 -Revises: d588418040d -Create Date: 2016-02-16 11:19:29.715615 - -""" - -# revision identifiers, used by Alembic. -revision = 'e169a4a81d88' -down_revision = 'd588418040d' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('rule_conditions', sa.Column('invert', sa.Boolean(), - nullable=True, default=False)) diff --git a/ironic_inspector/node_cache.py b/ironic_inspector/node_cache.py deleted file mode 100644 index d0da563..0000000 --- a/ironic_inspector/node_cache.py +++ /dev/null @@ -1,954 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Cache for nodes currently under introspection.""" - -import collections -import contextlib -import copy -import datetime -import json - -from automaton import exceptions as automaton_errors -from ironicclient import exceptions -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_db.sqlalchemy import utils as db_utils -from oslo_utils import excutils -from oslo_utils import reflection -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from sqlalchemy.orm import exc as orm_errors -from sqlalchemy import text - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import db -from ironic_inspector import introspection_state as istate -from ironic_inspector import utils - - -CONF = cfg.CONF - - -LOG = utils.getProcessingLogger(__name__) - - -MACS_ATTRIBUTE = 'mac' -_LOCK_TEMPLATE = 'node-%s' -_SEMAPHORES = lockutils.Semaphores() - - -def _get_lock(uuid): - """Get lock object for a given node UUID.""" - return lockutils.internal_lock(_LOCK_TEMPLATE % uuid, - semaphores=_SEMAPHORES) - - -def _get_lock_ctx(uuid): - """Get context manager yielding a lock object for a given node UUID.""" - return lockutils.lock(_LOCK_TEMPLATE % uuid, semaphores=_SEMAPHORES) - - -class NodeInfo(object): - """Record about a node in the cache. - - This class optionally allows to acquire a lock on a node. Note that the - class instance itself is NOT thread-safe, you need to create a new instance - for every thread. - """ - - def __init__(self, uuid, version_id=None, state=None, started_at=None, - finished_at=None, error=None, node=None, ports=None, - ironic=None, lock=None): - self.uuid = uuid - self.started_at = started_at - self.finished_at = finished_at - self.error = error - self.invalidate_cache() - self._version_id = version_id - self._state = state - self._node = node - if ports is not None and not isinstance(ports, dict): - ports = {p.address: p for p in ports} - self._ports = ports - self._attributes = None - self._ironic = ironic - # This is a lock on a node UUID, not on a NodeInfo object - self._lock = lock if lock is not None else _get_lock(uuid) - # Whether lock was acquired using this NodeInfo object - self._locked = lock is not None - self._fsm = None - - def __del__(self): - if self._locked: - LOG.warning('BUG: node lock was not released by the moment ' - 'node info object is deleted') - self._lock.release() - - def __str__(self): - """Self represented as an UUID and a state.""" - parts = [self.uuid] - if self._state: - parts += [_('state'), self._state] - return ' '.join(parts) - - def acquire_lock(self, blocking=True): - """Acquire a lock on the associated node. - - Exits with success if a lock is already acquired using this NodeInfo - object. - - :param blocking: if True, wait for lock to be acquired, otherwise - return immediately. - :returns: boolean value, whether lock was acquired successfully - """ - if self._locked: - return True - - LOG.debug('Attempting to acquire lock', node_info=self) - if self._lock.acquire(blocking): - self._locked = True - LOG.debug('Successfully acquired lock', node_info=self) - return True - else: - LOG.debug('Unable to acquire lock', node_info=self) - return False - - def release_lock(self): - """Release a lock on a node. - - Does nothing if lock was not acquired using this NodeInfo object. - """ - if self._locked: - LOG.debug('Successfully released lock', node_info=self) - self._lock.release() - self._locked = False - - @property - def version_id(self): - """Get the version id""" - if self._version_id is None: - row = db.model_query(db.Node).get(self.uuid) - if row is None: - raise utils.NotFoundInCacheError(_('Node not found in the ' - 'cache'), node_info=self) - self._version_id = row.version_id - return self._version_id - - def _set_version_id(self, value, session): - row = self._row(session) - row.version_id = value - row.save(session) - self._version_id = value - - def _row(self, session=None): - """Get a row from the database with self.uuid and self.version_id""" - try: - # race condition if version_id changed outside of this node_info - return db.model_query(db.Node, session=session).filter_by( - uuid=self.uuid, version_id=self.version_id).one() - except (orm_errors.NoResultFound, orm_errors.StaleDataError): - raise utils.NodeStateRaceCondition(node_info=self) - - def _commit(self, **fields): - """Commit the fields into the DB.""" - LOG.debug('Committing fields: %s', fields, node_info=self) - with db.ensure_transaction() as session: - self._set_version_id(uuidutils.generate_uuid(), session) - row = self._row(session) - row.update(fields) - - def commit(self): - """Commit current node status into the database.""" - # state and version_id are updated separately - self._commit(started_at=self.started_at, finished_at=self.finished_at, - error=self.error) - - @property - def state(self): - """State of the node_info object.""" - if self._state is None: - row = self._row() - self._state = row.state - return self._state - - def _set_state(self, value): - self._commit(state=value) - self._state = value - - def _get_fsm(self): - """Get an fsm instance initialized with self.state.""" - if self._fsm is None: - self._fsm = istate.FSM.copy(shallow=True) - self._fsm.initialize(start_state=self.state) - return self._fsm - - @contextlib.contextmanager - def _fsm_ctx(self): - fsm = self._get_fsm() - try: - yield fsm - finally: - if fsm.current_state != self.state: - LOG.info('Updating node state: %(current)s --> %(new)s', - {'current': self.state, 'new': fsm.current_state}, - node_info=self) - self._set_state(fsm.current_state) - - def fsm_event(self, event, strict=False): - """Update node_info.state based on a fsm.process_event(event) call. - - An AutomatonException triggers an error event. - If strict, node_info.finished(error=str(exc)) is called with the - AutomatonException instance and a EventError raised. - - :param event: an event to process by the fsm - :strict: whether to fail the introspection upon an invalid event - :raises: NodeStateInvalidEvent - """ - with self._fsm_ctx() as fsm: - LOG.debug('Executing fsm(%(state)s).process_event(%(event)s)', - {'state': fsm.current_state, 'event': event}, - node_info=self) - try: - fsm.process_event(event) - except automaton_errors.NotFound as exc: - msg = _('Invalid event: %s') % exc - if strict: - LOG.error(msg, node_info=self) - # assuming an error event is always possible - fsm.process_event(istate.Events.error) - self.finished(error=str(exc)) - else: - LOG.warning(msg, node_info=self) - raise utils.NodeStateInvalidEvent(str(exc), node_info=self) - - @property - def options(self): - """Node introspection options as a dict.""" - if self._options is None: - rows = db.model_query(db.Option).filter_by( - uuid=self.uuid) - self._options = {row.name: json.loads(row.value) - for row in rows} - return self._options - - @property - def attributes(self): - """Node look up attributes as a dict.""" - if self._attributes is None: - self._attributes = {} - rows = db.model_query(db.Attribute).filter_by( - node_uuid=self.uuid) - for row in rows: - self._attributes.setdefault(row.name, []).append(row.value) - return self._attributes - - @property - def ironic(self): - """Ironic client instance.""" - if self._ironic is None: - self._ironic = ir_utils.get_client() - return self._ironic - - def set_option(self, name, value): - """Set an option for a node.""" - encoded = json.dumps(value) - self.options[name] = value - with db.ensure_transaction() as session: - db.model_query(db.Option, session=session).filter_by( - uuid=self.uuid, name=name).delete() - db.Option(uuid=self.uuid, name=name, value=encoded).save( - session) - - def finished(self, error=None): - """Record status for this node. - - Also deletes look up attributes from the cache. - - :param error: error message - """ - self.release_lock() - - self.finished_at = timeutils.utcnow() - self.error = error - - with db.ensure_transaction() as session: - self._commit(finished_at=self.finished_at, error=self.error) - db.model_query(db.Attribute, session=session).filter_by( - node_uuid=self.uuid).delete() - db.model_query(db.Option, session=session).filter_by( - uuid=self.uuid).delete() - - def add_attribute(self, name, value, session=None): - """Store look up attribute for a node in the database. - - :param name: attribute name - :param value: attribute value or list of possible values - :param session: optional existing database session - """ - if not isinstance(value, list): - value = [value] - - with db.ensure_transaction(session) as session: - for v in value: - db.Attribute(uuid=uuidutils.generate_uuid(), name=name, - value=v, node_uuid=self.uuid).save(session) - # Invalidate attributes so they're loaded on next usage - self._attributes = None - - @classmethod - def from_row(cls, row, ironic=None, lock=None, node=None): - """Construct NodeInfo from a database row.""" - fields = {key: row[key] - for key in ('uuid', 'version_id', 'state', 'started_at', - 'finished_at', 'error')} - return cls(ironic=ironic, lock=lock, node=node, **fields) - - def invalidate_cache(self): - """Clear all cached info, so that it's reloaded next time.""" - self._options = None - self._node = None - self._ports = None - self._attributes = None - self._ironic = None - self._fsm = None - self._state = None - self._version_id = None - - def node(self, ironic=None): - """Get Ironic node object associated with the cached node record.""" - if self._node is None: - ironic = ironic or self.ironic - self._node = ir_utils.get_node(self.uuid, ironic=ironic) - return self._node - - def create_ports(self, ports, ironic=None): - """Create one or several ports for this node. - - :param ports: List of ports with all their attributes - e.g [{'mac': xx, 'ip': xx, 'client_id': None}, - {'mac': xx, 'ip': None, 'client_id': None}] - It also support the old style of list of macs. - A warning is issued if port already exists on a node. - - :param ironic: Ironic client to use instead of self.ironic - """ - existing_macs = [] - for port in ports: - mac = port - extra = {} - pxe_enabled = True - if isinstance(port, dict): - mac = port['mac'] - client_id = port.get('client_id') - if client_id: - extra = {'client-id': client_id} - pxe_enabled = port.get('pxe', True) - - if mac not in self.ports(): - self._create_port(mac, ironic=ironic, extra=extra, - pxe_enabled=pxe_enabled) - else: - existing_macs.append(mac) - - if existing_macs: - LOG.warning('Did not create ports %s as they already exist', - existing_macs, node_info=self) - - def ports(self, ironic=None): - """Get Ironic port objects associated with the cached node record. - - This value is cached as well, use invalidate_cache() to clean. - - :return: dict MAC -> port object - """ - if self._ports is None: - ironic = ironic or self.ironic - port_list = ironic.node.list_ports(self.uuid, limit=0, detail=True) - self._ports = {p.address: p for p in port_list} - return self._ports - - def _create_port(self, mac, ironic=None, **kwargs): - ironic = ironic or self.ironic - try: - port = ironic.port.create( - node_uuid=self.uuid, address=mac, **kwargs) - LOG.info('Port %(uuid)s was created successfully, MAC: %(mac)s,' - 'attributes: %(attrs)s', - {'uuid': port.uuid, 'mac': port.address, - 'attrs': kwargs}, - node_info=self) - except exceptions.Conflict: - LOG.warning('Port %s already exists, skipping', - mac, node_info=self) - # NOTE(dtantsur): we didn't get port object back, so we have to - # reload ports on next access - self._ports = None - else: - self._ports[mac] = port - - def patch(self, patches, ironic=None): - """Apply JSON patches to a node. - - Refreshes cached node instance. - - :param patches: JSON patches to apply - :param ironic: Ironic client to use instead of self.ironic - :raises: ironicclient exceptions - """ - ironic = ironic or self.ironic - # NOTE(aarefiev): support path w/o ahead forward slash - # as Ironic cli does - for patch in patches: - if patch.get('path') and not patch['path'].startswith('/'): - patch['path'] = '/' + patch['path'] - - LOG.debug('Updating node with patches %s', patches, node_info=self) - self._node = ironic.node.update(self.uuid, patches) - - def patch_port(self, port, patches, ironic=None): - """Apply JSON patches to a port. - - :param port: port object or its MAC - :param patches: JSON patches to apply - :param ironic: Ironic client to use instead of self.ironic - """ - ironic = ironic or self.ironic - ports = self.ports() - if isinstance(port, six.string_types): - port = ports[port] - - LOG.debug('Updating port %(mac)s with patches %(patches)s', - {'mac': port.address, 'patches': patches}, - node_info=self) - new_port = ironic.port.update(port.uuid, patches) - ports[port.address] = new_port - - def update_properties(self, ironic=None, **props): - """Update properties on a node. - - :param props: properties to update - :param ironic: Ironic client to use instead of self.ironic - """ - ironic = ironic or self.ironic - patches = [{'op': 'add', 'path': '/properties/%s' % k, 'value': v} - for k, v in props.items()] - self.patch(patches, ironic) - - def update_capabilities(self, ironic=None, **caps): - """Update capabilities on a node. - - :param caps: capabilities to update - :param ironic: Ironic client to use instead of self.ironic - """ - existing = ir_utils.capabilities_to_dict( - self.node().properties.get('capabilities')) - existing.update(caps) - self.update_properties( - ironic=ironic, - capabilities=ir_utils.dict_to_capabilities(existing)) - - def delete_port(self, port, ironic=None): - """Delete port. - - :param port: port object or its MAC - :param ironic: Ironic client to use instead of self.ironic - """ - ironic = ironic or self.ironic - ports = self.ports() - if isinstance(port, six.string_types): - port = ports[port] - - ironic.port.delete(port.uuid) - del ports[port.address] - - def get_by_path(self, path): - """Get field value by ironic-style path (e.g. /extra/foo). - - :param path: path to a field - :returns: field value - :raises: KeyError if field was not found - """ - path = path.strip('/') - try: - if '/' in path: - prop, key = path.split('/', 1) - return getattr(self.node(), prop)[key] - else: - return getattr(self.node(), path) - except AttributeError: - raise KeyError(path) - - def replace_field(self, path, func, **kwargs): - """Replace a field on ironic node. - - :param path: path to a field as used by the ironic client - :param func: function accepting an old value and returning a new one - :param kwargs: if 'default' value is passed here, it will be used when - no existing value is found. - :raises: KeyError if value is not found and default is not set - :raises: everything that patch() may raise - """ - ironic = kwargs.pop("ironic", None) or self.ironic - try: - value = self.get_by_path(path) - op = 'replace' - except KeyError: - if 'default' in kwargs: - value = kwargs['default'] - op = 'add' - else: - raise - - ref_value = copy.deepcopy(value) - value = func(value) - if value != ref_value: - self.patch([{'op': op, 'path': path, 'value': value}], ironic) - - -def triggers_fsm_error_transition(errors=(Exception,), - no_errors=(utils.NodeStateInvalidEvent, - utils.NodeStateRaceCondition)): - """Trigger an fsm error transition upon certain errors. - - It is assumed the first function arg of the decorated function is always a - NodeInfo instance. - - :param errors: a tuple of exceptions upon which an error - event is triggered. Re-raised. - :param no_errors: a tuple of exceptions that won't trigger the - error event. - """ - def outer(func): - @six.wraps(func) - def inner(node_info, *args, **kwargs): - ret = None - try: - ret = func(node_info, *args, **kwargs) - except no_errors as exc: - LOG.debug('Not processing error event for the ' - 'exception: %(exc)s raised by %(func)s', - {'exc': exc, - 'func': reflection.get_callable_name(func)}, - node_info=node_info) - except errors as exc: - with excutils.save_and_reraise_exception(): - LOG.error('Processing the error event because of an ' - 'exception %(exc_type)s: %(exc)s raised by ' - '%(func)s', - {'exc_type': type(exc), 'exc': exc, - 'func': reflection.get_callable_name(func)}, - node_info=node_info) - # an error event should be possible from all states - node_info.fsm_event(istate.Events.error) - return ret - return inner - return outer - - -def fsm_event_before(event, strict=False): - """Trigger an fsm event before the function execution. - - It is assumed the first function arg of the decorated function is always a - NodeInfo instance. - - :param event: the event to process before the function call - :param strict: make an invalid fsm event trigger an error event - """ - def outer(func): - @six.wraps(func) - def inner(node_info, *args, **kwargs): - LOG.debug('Processing event %(event)s before calling ' - '%(func)s', {'event': event, 'func': func}, - node_info=node_info) - node_info.fsm_event(event, strict=strict) - return func(node_info, *args, **kwargs) - return inner - return outer - - -def fsm_event_after(event, strict=False): - """Trigger an fsm event after the function execution. - - It is assumed the first function arg of the decorated function is always a - NodeInfo instance. - - :param event: the event to process after the function call - :param strict: make an invalid fsm event trigger an error event - """ - def outer(func): - @six.wraps(func) - def inner(node_info, *args, **kwargs): - ret = func(node_info, *args, **kwargs) - LOG.debug('Processing event %(event)s after calling ' - '%(func)s', {'event': event, 'func': func}, - node_info=node_info) - node_info.fsm_event(event, strict=strict) - return ret - return inner - return outer - - -def fsm_transition(event, reentrant=True, **exc_kwargs): - """Decorate a function to perform a (non-)reentrant transition. - - If True, reentrant transition will be performed at the end of a function - call. If False, the transition will be performed before the function call. - The function is decorated with the triggers_fsm_error_transition decorator - as well. - - :param event: the event to bind the transition to. - :param reentrant: whether the transition is reentrant. - :param exc_kwargs: passed on to the triggers_fsm_error_transition decorator - """ - def outer(func): - inner = triggers_fsm_error_transition(**exc_kwargs)(func) - if not reentrant: - return fsm_event_before(event, strict=True)(inner) - return fsm_event_after(event)(inner) - return outer - - -def release_lock(func): - """Decorate a node_info-function to release the node_info lock. - - Assumes the first parameter of the function func is always a NodeInfo - instance. - - """ - @six.wraps(func) - def inner(node_info, *args, **kwargs): - try: - return func(node_info, *args, **kwargs) - finally: - # FIXME(milan) hacking the test cases to work - # with release_lock.assert_called_once... - if node_info._locked: - node_info.release_lock() - return inner - - -def start_introspection(uuid, **kwargs): - """Start the introspection of a node. - - If a node_info record exists in the DB, a start transition is used rather - than dropping the record in order to check for the start transition - validity in particular node state. - - :param uuid: Ironic node UUID - :param kwargs: passed on to add_node() - :raises: NodeStateInvalidEvent in case the start transition is invalid in - the current node state - :raises: NodeStateRaceCondition if a mismatch was detected between the - node_info cache and the DB - :returns: NodeInfo - """ - with db.ensure_transaction(): - node_info = NodeInfo(uuid) - # check that the start transition is possible - try: - node_info.fsm_event(istate.Events.start) - except utils.NotFoundInCacheError: - # node not found while in the fsm_event handler - LOG.debug('Node missing in the cache; adding it now', - node_info=node_info) - state = istate.States.starting - else: - state = node_info.state - return add_node(uuid, state, **kwargs) - - -def add_node(uuid, state, **attributes): - """Store information about a node under introspection. - - All existing information about this node is dropped. - Empty values are skipped. - - :param uuid: Ironic node UUID - :param state: The initial state of the node - :param attributes: attributes known about this node (like macs, BMC etc); - also ironic client instance may be passed under 'ironic' - :returns: NodeInfo - """ - started_at = timeutils.utcnow() - with db.ensure_transaction() as session: - _delete_node(uuid) - db.Node(uuid=uuid, state=state, started_at=started_at).save(session) - - node_info = NodeInfo(uuid=uuid, state=state, started_at=started_at, - ironic=attributes.pop('ironic', None)) - for (name, value) in attributes.items(): - if not value: - continue - node_info.add_attribute(name, value, session=session) - - return node_info - - -def delete_nodes_not_in_list(uuids): - """Delete nodes which don't exist in Ironic node UUIDs. - - :param uuids: Ironic node UUIDs - """ - inspector_uuids = _list_node_uuids() - for uuid in inspector_uuids - uuids: - LOG.warning('Node %s was deleted from Ironic, dropping from Ironic ' - 'Inspector database', uuid) - with _get_lock_ctx(uuid): - _delete_node(uuid) - - -def _delete_node(uuid, session=None): - """Delete information about a node. - - :param uuid: Ironic node UUID - :param session: optional existing database session - """ - with db.ensure_transaction(session) as session: - db.model_query(db.Attribute, session=session).filter_by( - node_uuid=uuid).delete() - for model in (db.Option, db.Node): - db.model_query(model, - session=session).filter_by(uuid=uuid).delete() - - -def introspection_active(): - """Check if introspection is active for at least one node.""" - # FIXME(dtantsur): is there a better way to express it? - return (db.model_query(db.Node.uuid).filter_by(finished_at=None).first() - is not None) - - -def active_macs(): - """List all MAC's that are on introspection right now.""" - return ({x.value for x in db.model_query(db.Attribute.value). - filter_by(name=MACS_ATTRIBUTE)}) - - -def _list_node_uuids(): - """Get all nodes' uuid from cache. - - :returns: Set of nodes' uuid. - """ - return {x.uuid for x in db.model_query(db.Node.uuid)} - - -def get_node(node_id, ironic=None, locked=False): - """Get node from cache. - - :param node_id: node UUID or name. - :param ironic: optional ironic client instance - :param locked: if True, get a lock on node before fetching its data - :returns: structure NodeInfo. - """ - if uuidutils.is_uuid_like(node_id): - node = None - uuid = node_id - else: - node = ir_utils.get_node(node_id, ironic=ironic) - uuid = node.uuid - - if locked: - lock = _get_lock(uuid) - lock.acquire() - else: - lock = None - - try: - row = db.model_query(db.Node).filter_by(uuid=uuid).first() - if row is None: - raise utils.Error(_('Could not find node %s in cache') % uuid, - code=404) - return NodeInfo.from_row(row, ironic=ironic, lock=lock, node=node) - except Exception: - with excutils.save_and_reraise_exception(): - if lock is not None: - lock.release() - - -def find_node(**attributes): - """Find node in cache. - - Looks up a node based on attributes in a best-match fashion. - This function acquires a lock on a node. - - :param attributes: attributes known about this node (like macs, BMC etc) - also ironic client instance may be passed under 'ironic' - :returns: structure NodeInfo with attributes ``uuid`` and ``created_at`` - :raises: Error if node is not found or multiple nodes match the attributes - """ - ironic = attributes.pop('ironic', None) - # NOTE(dtantsur): sorting is not required, but gives us predictability - found = collections.Counter() - - for (name, value) in sorted(attributes.items()): - if not value: - LOG.debug('Empty value for attribute %s', name) - continue - if not isinstance(value, list): - value = [value] - - LOG.debug('Trying to use %s of value %s for node look up', - name, value) - value_list = [] - for v in value: - value_list.append("name='%s' AND value='%s'" % (name, v)) - stmt = ('select distinct node_uuid from attributes where ' + - ' OR '.join(value_list)) - rows = (db.model_query(db.Attribute.node_uuid).from_statement( - text(stmt)).all()) - found.update(row.node_uuid for row in rows) - - if not found: - raise utils.NotFoundInCacheError(_( - 'Could not find a node for attributes %s') % attributes) - - most_common = found.most_common() - LOG.debug('The following nodes match the attributes: %(attributes)s, ' - 'scoring: %(most_common)s', - {'most_common': ', '.join('%s: %d' % tpl for tpl in most_common), - 'attributes': ', '.join('%s=%s' % tpl for tpl in - attributes.items())}) - - # NOTE(milan) most_common is sorted, higher scores first - highest_score = most_common[0][1] - found = [item[0] for item in most_common if highest_score == item[1]] - if len(found) > 1: - raise utils.Error(_( - 'Multiple nodes match the same number of attributes ' - '%(attr)s: %(found)s') - % {'attr': attributes, 'found': found}, code=404) - - uuid = found.pop() - node_info = NodeInfo(uuid=uuid, ironic=ironic) - node_info.acquire_lock() - - try: - row = (db.model_query(db.Node.started_at, db.Node.finished_at). - filter_by(uuid=uuid).first()) - - if not row: - raise utils.Error(_( - 'Could not find node %s in introspection cache, ' - 'probably it\'s not on introspection now') % uuid, code=404) - - if row.finished_at: - raise utils.Error(_( - 'Introspection for node %(node)s already finished on ' - '%(finish)s') % {'node': uuid, 'finish': row.finished_at}) - - node_info.started_at = row.started_at - return node_info - except Exception: - with excutils.save_and_reraise_exception(): - node_info.release_lock() - - -def clean_up(): - """Clean up the cache. - - * Finish introspection for timed out nodes. - * Drop outdated node status information. - - :return: list of timed out node UUID's - """ - if CONF.node_status_keep_time > 0: - status_keep_threshold = (timeutils.utcnow() - datetime.timedelta( - seconds=CONF.node_status_keep_time)) - with db.ensure_transaction() as session: - db.model_query(db.Node, session=session).filter( - db.Node.finished_at.isnot(None), - db.Node.finished_at < status_keep_threshold).delete() - - timeout = CONF.timeout - if timeout <= 0: - return [] - threshold = timeutils.utcnow() - datetime.timedelta(seconds=timeout) - uuids = [row.uuid for row in - db.model_query(db.Node.uuid).filter( - db.Node.started_at < threshold, - db.Node.finished_at.is_(None)).all()] - - if not uuids: - return [] - - LOG.error('Introspection for nodes %s has timed out', uuids) - for u in uuids: - node_info = get_node(u, locked=True) - try: - if node_info.finished_at or node_info.started_at > threshold: - continue - if node_info.state != istate.States.waiting: - LOG.error('Something went wrong, timeout occurred ' - 'while introspection in "%s" state', - node_info.state, - node_info=node_info) - node_info.fsm_event(istate.Events.timeout) - node_info.finished(error='Introspection timeout') - finally: - node_info.release_lock() - - return uuids - - -def create_node(driver, ironic=None, **attributes): - """Create ironic node and cache it. - - * Create new node in ironic. - * Cache it in inspector. - * Sets node_info state to enrolling. - - :param driver: driver for Ironic node. - :param ironic: ronic client instance. - :param attributes: dict, additional keyword arguments to pass - to the ironic client on node creation. - :return: NodeInfo, or None in case error happened. - """ - if ironic is None: - ironic = ir_utils.get_client() - try: - node = ironic.node.create(driver=driver, **attributes) - except exceptions.InvalidAttribute as e: - LOG.error('Failed to create new node: %s', e) - else: - LOG.info('Node %s was created successfully', node.uuid) - return add_node(node.uuid, istate.States.enrolling, ironic=ironic) - - -def get_node_list(ironic=None, marker=None, limit=None): - """Get node list from the cache. - - The list of the nodes is ordered based on the (started_at, uuid) - attribute pair, newer items first. - - :param ironic: optional ironic client instance - :param marker: pagination marker (an UUID or None) - :param limit: pagination limit; None for default CONF.api_max_limit - :returns: a list of NodeInfo instances. - """ - if marker is not None: - # uuid marker -> row marker for pagination - marker = db.model_query(db.Node).get(marker) - if marker is None: - raise utils.Error(_('Node not found for marker: %s') % marker, - code=404) - - rows = db.model_query(db.Node) - # ordered based on (started_at, uuid); newer first - rows = db_utils.paginate_query(rows, db.Node, limit, - ('started_at', 'uuid'), - marker=marker, sort_dir='desc') - return [NodeInfo.from_row(row, ironic=ironic) for row in rows] diff --git a/ironic_inspector/plugins/__init__.py b/ironic_inspector/plugins/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/plugins/base.py b/ironic_inspector/plugins/base.py deleted file mode 100644 index bfd8322..0000000 --- a/ironic_inspector/plugins/base.py +++ /dev/null @@ -1,231 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base code for plugins support.""" - -import abc - -from oslo_config import cfg -from oslo_log import log -import six -import stevedore - -from ironic_inspector.common.i18n import _ - - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class ProcessingHook(object): # pragma: no cover - """Abstract base class for introspection data processing hooks.""" - - dependencies = [] - """An ordered list of hooks that must be enabled before this one. - - The items here should be entry point names, not classes. - """ - - def before_processing(self, introspection_data, **kwargs): - """Hook to run before any other data processing. - - This hook is run even before sanity checks. - - :param introspection_data: raw information sent by the ramdisk, - may be modified by the hook. - :param kwargs: used for extensibility without breaking existing hooks - :returns: nothing. - """ - - def before_update(self, introspection_data, node_info, **kwargs): - """Hook to run before Ironic node update. - - This hook is run after node is found and ports are created, - just before the node is updated with the data. - - :param introspection_data: processed data from the ramdisk. - :param node_info: NodeInfo instance. - :param kwargs: used for extensibility without breaking existing hooks. - :returns: nothing. - - [RFC 6902] - http://tools.ietf.org/html/rfc6902 - """ - - -class WithValidation(object): - REQUIRED_PARAMS = set() - """Set with names of required parameters.""" - - OPTIONAL_PARAMS = set() - """Set with names of optional parameters.""" - - def validate(self, params, **kwargs): - """Validate params passed during creation. - - Default implementation checks for presence of fields from - REQUIRED_PARAMS and fails for unexpected fields (not from - REQUIRED_PARAMS + OPTIONAL_PARAMS). - - :param params: params as a dictionary - :param kwargs: used for extensibility without breaking existing plugins - :raises: ValueError on validation failure - """ - passed = {k for k, v in params.items() if v is not None} - missing = self.REQUIRED_PARAMS - passed - unexpected = passed - self.REQUIRED_PARAMS - self.OPTIONAL_PARAMS - - msg = [] - if missing: - msg.append(_('missing required parameter(s): %s') - % ', '.join(missing)) - if unexpected: - msg.append(_('unexpected parameter(s): %s') - % ', '.join(unexpected)) - - if msg: - raise ValueError('; '.join(msg)) - - -@six.add_metaclass(abc.ABCMeta) -class RuleConditionPlugin(WithValidation): # pragma: no cover - """Abstract base class for rule condition plugins.""" - - REQUIRED_PARAMS = {'value'} - - ALLOW_NONE = False - """Whether this condition accepts None when field is not found.""" - - @abc.abstractmethod - def check(self, node_info, field, params, **kwargs): - """Check if condition holds for a given field. - - :param node_info: NodeInfo object - :param field: field value - :param params: parameters as a dictionary, changing it here will change - what will be stored in database - :param kwargs: used for extensibility without breaking existing plugins - :raises ValueError: on unacceptable field value - :returns: True if check succeeded, otherwise False - """ - - -@six.add_metaclass(abc.ABCMeta) -class RuleActionPlugin(WithValidation): # pragma: no cover - """Abstract base class for rule action plugins.""" - - FORMATTED_PARAMS = [] - """List of params will be formatted with python format.""" - - @abc.abstractmethod - def apply(self, node_info, params, **kwargs): - """Run action on successful rule match. - - :param node_info: NodeInfo object - :param params: parameters as a dictionary - :param kwargs: used for extensibility without breaking existing plugins - :raises: utils.Error on failure - """ - - -_HOOKS_MGR = None -_NOT_FOUND_HOOK_MGR = None -_CONDITIONS_MGR = None -_ACTIONS_MGR = None - - -def missing_entrypoints_callback(names): - """Raise MissingHookError with comma-separated list of missing hooks""" - error = _('The following hook(s) are missing or failed to load: %s') - raise RuntimeError(error % ', '.join(names)) - - -def processing_hooks_manager(*args): - """Create a Stevedore extension manager for processing hooks. - - :param args: arguments to pass to the hooks constructor. - """ - global _HOOKS_MGR - if _HOOKS_MGR is None: - names = [x.strip() - for x in CONF.processing.processing_hooks.split(',') - if x.strip()] - _HOOKS_MGR = stevedore.NamedExtensionManager( - 'ironic_inspector.hooks.processing', - names=names, - invoke_on_load=True, - invoke_args=args, - on_missing_entrypoints_callback=missing_entrypoints_callback, - name_order=True) - return _HOOKS_MGR - - -def validate_processing_hooks(): - """Validate the enabled processing hooks. - - :raises: MissingHookError on missing or failed to load hooks - :raises: RuntimeError on validation failure - :returns: the list of hooks passed validation - """ - hooks = [ext for ext in processing_hooks_manager()] - enabled = set() - errors = [] - for hook in hooks: - deps = getattr(hook.obj, 'dependencies', ()) - missing = [d for d in deps if d not in enabled] - if missing: - errors.append('Hook %(hook)s requires the following hooks to be ' - 'enabled before it: %(deps)s. The following hooks ' - 'are missing: %(missing)s.' % - {'hook': hook.name, - 'deps': ', '.join(deps), - 'missing': ', '.join(missing)}) - enabled.add(hook.name) - - if errors: - raise RuntimeError("Some hooks failed to load due to dependency " - "problems:\n%s" % "\n".join(errors)) - - return hooks - - -def node_not_found_hook_manager(*args): - global _NOT_FOUND_HOOK_MGR - if _NOT_FOUND_HOOK_MGR is None: - name = CONF.processing.node_not_found_hook - if name: - _NOT_FOUND_HOOK_MGR = stevedore.DriverManager( - 'ironic_inspector.hooks.node_not_found', - name=name) - - return _NOT_FOUND_HOOK_MGR - - -def rule_conditions_manager(): - """Create a Stevedore extension manager for conditions in rules.""" - global _CONDITIONS_MGR - if _CONDITIONS_MGR is None: - _CONDITIONS_MGR = stevedore.ExtensionManager( - 'ironic_inspector.rules.conditions', - invoke_on_load=True) - return _CONDITIONS_MGR - - -def rule_actions_manager(): - """Create a Stevedore extension manager for actions in rules.""" - global _ACTIONS_MGR - if _ACTIONS_MGR is None: - _ACTIONS_MGR = stevedore.ExtensionManager( - 'ironic_inspector.rules.actions', - invoke_on_load=True) - return _ACTIONS_MGR diff --git a/ironic_inspector/plugins/capabilities.py b/ironic_inspector/plugins/capabilities.py deleted file mode 100644 index a75171d..0000000 --- a/ironic_inspector/plugins/capabilities.py +++ /dev/null @@ -1,101 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Gather capabilities from inventory.""" - -from oslo_config import cfg - -from ironic_inspector.common.i18n import _ -from ironic_inspector.plugins import base -from ironic_inspector import utils - - -DEFAULT_CPU_FLAGS_MAPPING = { - 'vmx': 'cpu_vt', - 'svm': 'cpu_vt', - 'aes': 'cpu_aes', - 'pse': 'cpu_hugepages', - 'pdpe1gb': 'cpu_hugepages_1g', - 'smx': 'cpu_txt', -} - -CAPABILITIES_OPTS = [ - cfg.BoolOpt('boot_mode', - default=False, - help=_('Whether to store the boot mode (BIOS or UEFI).')), - cfg.DictOpt('cpu_flags', - default=DEFAULT_CPU_FLAGS_MAPPING, - help=_('Mapping between a CPU flag and a capability to set ' - 'if this flag is present.')), -] - - -def list_opts(): - return [ - ('capabilities', CAPABILITIES_OPTS) - ] - -CONF = cfg.CONF -CONF.register_opts(CAPABILITIES_OPTS, group='capabilities') -LOG = utils.getProcessingLogger(__name__) - - -class CapabilitiesHook(base.ProcessingHook): - """Processing hook for detecting capabilities.""" - - def _detect_boot_mode(self, inventory, node_info, data=None): - boot_mode = inventory.get('boot', {}).get('current_boot_mode') - if boot_mode is not None: - LOG.info('Boot mode was %s', boot_mode, - data=data, node_info=node_info) - return {'boot_mode': boot_mode} - else: - LOG.warning('No boot mode information available', - data=data, node_info=node_info) - return {} - - def _detect_cpu_flags(self, inventory, node_info, data=None): - flags = inventory['cpu'].get('flags') - if not flags: - LOG.warning('No CPU flags available, please update your ' - 'introspection ramdisk', - data=data, node_info=node_info) - return {} - - flags = set(flags) - caps = {} - for flag, name in CONF.capabilities.cpu_flags.items(): - if flag in flags: - caps[name] = 'true' - - LOG.info('CPU capabilities: %s', list(caps), - data=data, node_info=node_info) - return caps - - def before_update(self, introspection_data, node_info, **kwargs): - inventory = utils.get_inventory(introspection_data) - caps = {} - if CONF.capabilities.boot_mode: - caps.update(self._detect_boot_mode(inventory, node_info, - introspection_data)) - - caps.update(self._detect_cpu_flags(inventory, node_info, - introspection_data)) - - if caps: - LOG.debug('New capabilities: %s', caps, node_info=node_info, - data=introspection_data) - node_info.update_capabilities(**caps) - else: - LOG.debug('No new capabilities detected', node_info=node_info, - data=introspection_data) diff --git a/ironic_inspector/plugins/discovery.py b/ironic_inspector/plugins/discovery.py deleted file mode 100644 index 0b32bc6..0000000 --- a/ironic_inspector/plugins/discovery.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Enroll node not found hook hook.""" - -from oslo_config import cfg - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import node_cache -from ironic_inspector import utils - - -DISCOVERY_OPTS = [ - cfg.StrOpt('enroll_node_driver', - default='fake', - help=_('The name of the Ironic driver used by the enroll ' - 'hook when creating a new node in Ironic.')), -] - - -def list_opts(): - return [ - ('discovery', DISCOVERY_OPTS) - ] - -CONF = cfg.CONF -CONF.register_opts(DISCOVERY_OPTS, group='discovery') - -LOG = utils.getProcessingLogger(__name__) - - -def _extract_node_driver_info(introspection_data): - node_driver_info = {} - ipmi_address = utils.get_ipmi_address_from_data(introspection_data) - if ipmi_address: - node_driver_info['ipmi_address'] = ipmi_address - else: - LOG.warning('No BMC address provided, discovered node will be ' - 'created without ipmi address') - return node_driver_info - - -def _check_existing_nodes(introspection_data, node_driver_info, ironic): - macs = utils.get_valid_macs(introspection_data) - if macs: - # verify existing ports - for mac in macs: - ports = ironic.port.list(address=mac) - if not ports: - continue - raise utils.Error( - _('Port %(mac)s already exists, uuid: %(uuid)s') % - {'mac': mac, 'uuid': ports[0].uuid}, data=introspection_data) - else: - LOG.warning('No suitable interfaces found for discovered node. ' - 'Check that validate_interfaces hook is listed in ' - '[processing]default_processing_hooks config option') - - # verify existing node with discovered ipmi address - ipmi_address = node_driver_info.get('ipmi_address') - if ipmi_address: - # FIXME(aarefiev): it's not effective to fetch all nodes, and may - # impact on performance on big clusters - nodes = ironic.node.list(fields=('uuid', 'driver_info'), limit=0) - for node in nodes: - if ipmi_address == ir_utils.get_ipmi_address(node): - raise utils.Error( - _('Node %(uuid)s already has BMC address ' - '%(ipmi_address)s, not enrolling') % - {'ipmi_address': ipmi_address, 'uuid': node.uuid}, - data=introspection_data) - - -def enroll_node_not_found_hook(introspection_data, **kwargs): - node_attr = {} - ironic = ir_utils.get_client() - - node_driver_info = _extract_node_driver_info(introspection_data) - node_attr['driver_info'] = node_driver_info - - node_driver = CONF.discovery.enroll_node_driver - - _check_existing_nodes(introspection_data, node_driver_info, ironic) - LOG.debug('Creating discovered node with driver %(driver)s and ' - 'attributes: %(attr)s', - {'driver': node_driver, 'attr': node_attr}, - data=introspection_data) - # NOTE(aarefiev): This flag allows to distinguish enrolled manually - # and auto-discovered nodes in the introspection rules. - introspection_data['auto_discovered'] = True - return node_cache.create_node(node_driver, ironic=ironic, **node_attr) diff --git a/ironic_inspector/plugins/example.py b/ironic_inspector/plugins/example.py deleted file mode 100644 index e4f7496..0000000 --- a/ironic_inspector/plugins/example.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Example plugin.""" - -from oslo_log import log - -from ironic_inspector.plugins import base - - -LOG = log.getLogger('ironic_inspector.plugins.example') - - -class ExampleProcessingHook(base.ProcessingHook): # pragma: no cover - def before_processing(self, introspection_data, **kwargs): - LOG.debug('before_processing: %s', introspection_data) - - def before_update(self, introspection_data, node_info, **kwargs): - LOG.debug('before_update: %s (node %s)', introspection_data, - node_info.uuid) - - -def example_not_found_hook(introspection_data, **kwargs): - LOG.debug('Processing node not found %s', introspection_data) - - -class ExampleRuleAction(base.RuleActionPlugin): # pragma: no cover - def apply(self, node_info, params, **kwargs): - LOG.debug('apply action to %s: %s', node_info.uuid, params) diff --git a/ironic_inspector/plugins/extra_hardware.py b/ironic_inspector/plugins/extra_hardware.py deleted file mode 100644 index a0cb676..0000000 --- a/ironic_inspector/plugins/extra_hardware.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Plugin to store extra hardware information in Swift. - -Stores the value of the 'data' key returned by the ramdisk as a JSON encoded -string in a Swift object. The object is named 'extra_hardware-' and -is stored in the 'inspector' container. -""" - -import json - -from ironic_inspector.common import swift -from ironic_inspector.plugins import base -from ironic_inspector import utils - -LOG = utils.getProcessingLogger(__name__) -EDEPLOY_ITEM_SIZE = 4 - - -class ExtraHardwareHook(base.ProcessingHook): - """Processing hook for saving extra hardware information in Swift.""" - - def _store_extra_hardware(self, name, data): - """Handles storing the extra hardware data from the ramdisk""" - swift_api = swift.SwiftAPI() - swift_api.create_object(name, data) - - def before_update(self, introspection_data, node_info, **kwargs): - """Stores the 'data' key from introspection_data in Swift. - - If the 'data' key exists, updates Ironic extra column - 'hardware_swift_object' key to the name of the Swift object, and stores - the data in the 'inspector' container in Swift. - - Otherwise, it does nothing. - """ - if 'data' not in introspection_data: - LOG.warning('No extra hardware information was received from ' - 'the ramdisk', node_info=node_info, - data=introspection_data) - return - data = introspection_data['data'] - - name = 'extra_hardware-%s' % node_info.uuid - self._store_extra_hardware(name, json.dumps(data)) - - # NOTE(sambetts) If data is edeploy format, convert to dicts for rules - # processing, store converted data in introspection_data['extra']. - # Delete introspection_data['data'], it is assumed unusable - # by rules. - if self._is_edeploy_data(data): - LOG.debug('Extra hardware data is in eDeploy format, ' - 'converting to usable format', - node_info=node_info, data=introspection_data) - introspection_data['extra'] = self._convert_edeploy_data(data) - else: - LOG.warning('Extra hardware data was not in a recognised ' - 'format (eDeploy), and will not be forwarded to ' - 'introspection rules', node_info=node_info, - data=introspection_data) - - LOG.debug('Deleting \"data\" key from introspection data as it is ' - 'assumed unusable by introspection rules. Raw data is ' - 'stored in swift', - node_info=node_info, data=introspection_data) - del introspection_data['data'] - - node_info.patch([{'op': 'add', 'path': '/extra/hardware_swift_object', - 'value': name}]) - - def _is_edeploy_data(self, data): - return all(isinstance(item, list) and len(item) == EDEPLOY_ITEM_SIZE - for item in data) - - def _convert_edeploy_data(self, data): - converted = {} - for item in data: - converted_0 = converted.setdefault(item[0], {}) - converted_1 = converted_0.setdefault(item[1], {}) - - try: - item[3] = int(item[3]) - except (ValueError, TypeError): - pass - - converted_1[item[2]] = item[3] - return converted diff --git a/ironic_inspector/plugins/lldp_basic.py b/ironic_inspector/plugins/lldp_basic.py deleted file mode 100644 index 2b697a3..0000000 --- a/ironic_inspector/plugins/lldp_basic.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""LLDP Processing Hook for basic TLVs""" - -import binascii - -from ironic_inspector.common import lldp_parsers -from ironic_inspector.plugins import base -from ironic_inspector import utils - -LOG = utils.getProcessingLogger(__name__) - - -class LLDPBasicProcessingHook(base.ProcessingHook): - """Process mandatory and optional LLDP packet fields - - Loop through raw LLDP TLVs and parse those from the - basic management, 802.1, and 802.3 TLV sets. - Store parsed data back to the ironic-inspector database. - """ - - def _parse_lldp_tlvs(self, tlvs, node_info): - """Parse LLDP TLVs into dictionary of name/value pairs - - :param tlvs: list of raw TLVs - :param node_info: node being introspected - :returns nv: dictionary of name/value pairs. The - LLDP user-friendly names, e.g. - "switch_port_id" are the keys - """ - - # Generate name/value pairs for each TLV supported by this plugin. - parser = lldp_parsers.LLDPBasicMgmtParser(node_info) - - for tlv_type, tlv_value in tlvs: - try: - data = bytearray(binascii.a2b_hex(tlv_value)) - except TypeError as e: - LOG.warning( - "TLV value for TLV type %(tlv_type)d not in correct " - "format, value must be in hexadecimal: %(msg)s", - {'tlv_type': tlv_type, 'msg': e}, node_info=node_info) - continue - - if parser.parse_tlv(tlv_type, data): - LOG.debug("Handled TLV type %d", - tlv_type, node_info=node_info) - else: - LOG.debug("LLDP TLV type %d not handled", - tlv_type, node_info=node_info) - - return parser.nv_dict - - def before_update(self, introspection_data, node_info, **kwargs): - """Process LLDP data and update all_interfaces with processed data""" - - inventory = utils.get_inventory(introspection_data) - - for iface in inventory['interfaces']: - if_name = iface['name'] - - tlvs = iface.get('lldp') - if tlvs is None: - LOG.warning("No LLDP Data found for interface %s", - if_name, node_info=node_info) - continue - - LOG.debug("Processing LLDP Data for interface %s", - if_name, node_info=node_info) - - nv = self._parse_lldp_tlvs(tlvs, node_info) - - if nv: - # Store lldp data per interface in "all_interfaces" - iface_to_update = introspection_data['all_interfaces'][if_name] - iface_to_update['lldp_processed'] = nv diff --git a/ironic_inspector/plugins/local_link_connection.py b/ironic_inspector/plugins/local_link_connection.py deleted file mode 100644 index 7350015..0000000 --- a/ironic_inspector/plugins/local_link_connection.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generic LLDP Processing Hook""" - -import binascii - -from construct import core -import netaddr -from oslo_config import cfg - -from ironic_inspector.common import lldp_parsers -from ironic_inspector.common import lldp_tlvs as tlv -from ironic_inspector.plugins import base -from ironic_inspector import utils - -LOG = utils.getProcessingLogger(__name__) - -CONF = cfg.CONF - -PORT_ID_ITEM_NAME = "port_id" -SWITCH_ID_ITEM_NAME = "switch_id" - -LLDP_PROC_DATA_MAPPING =\ - {lldp_parsers.LLDP_CHASSIS_ID_NM: SWITCH_ID_ITEM_NAME, - lldp_parsers.LLDP_PORT_ID_NM: PORT_ID_ITEM_NAME} - - -class GenericLocalLinkConnectionHook(base.ProcessingHook): - """Process mandatory LLDP packet fields - - Non-vendor specific LLDP packet fields processed for each NIC found for a - baremetal node, port ID and chassis ID. These fields if found and if valid - will be saved into the local link connection info port id and switch id - fields on the Ironic port that represents that NIC. - """ - - def _get_local_link_patch(self, tlv_type, tlv_value, port, node_info): - try: - data = bytearray(binascii.unhexlify(tlv_value)) - except TypeError: - LOG.warning("TLV value for TLV type %d not in correct" - "format, ensure TLV value is in " - "hexidecimal format when sent to " - "inspector", tlv_type, node_info=node_info) - return - - item = value = None - if tlv_type == tlv.LLDP_TLV_PORT_ID: - try: - port_id = tlv.PortId.parse(data) - except (core.MappingError, netaddr.AddrFormatError) as e: - LOG.warning("TLV parse error for Port ID: %s", e, - node_info=node_info) - return - - item = PORT_ID_ITEM_NAME - value = port_id.value - elif tlv_type == tlv.LLDP_TLV_CHASSIS_ID: - try: - chassis_id = tlv.ChassisId.parse(data) - except (core.MappingError, netaddr.AddrFormatError) as e: - LOG.warning("TLV parse error for Chassis ID: %s", e, - node_info=node_info) - return - - # Only accept mac address for chassis ID - if 'mac_address' in chassis_id.subtype: - item = SWITCH_ID_ITEM_NAME - value = chassis_id.value - - if item and value: - if (not CONF.processing.overwrite_existing and - item in port.local_link_connection): - return - return {'op': 'add', - 'path': '/local_link_connection/%s' % item, - 'value': value} - - def _get_lldp_processed_patch(self, name, item, lldp_proc_data, port): - - if 'lldp_processed' not in lldp_proc_data: - return - - value = lldp_proc_data['lldp_processed'].get(name) - - if value: - if (not CONF.processing.overwrite_existing and - item in port.local_link_connection): - return - return {'op': 'add', - 'path': '/local_link_connection/%s' % item, - 'value': value} - - def before_update(self, introspection_data, node_info, **kwargs): - """Process LLDP data and patch Ironic port local link connection""" - inventory = utils.get_inventory(introspection_data) - - ironic_ports = node_info.ports() - - for iface in inventory['interfaces']: - if iface['name'] not in introspection_data['all_interfaces']: - continue - - mac_address = iface['mac_address'] - port = ironic_ports.get(mac_address) - if not port: - LOG.debug("Skipping LLC processing for interface %s, matching " - "port not found in Ironic.", mac_address, - node_info=node_info, data=introspection_data) - continue - - lldp_data = iface.get('lldp') - if lldp_data is None: - LOG.warning("No LLDP Data found for interface %s", - mac_address, node_info=node_info, - data=introspection_data) - continue - - patches = [] - # First check if lldp data was already processed by lldp_basic - # plugin which stores data in 'all_interfaces' - proc_data = introspection_data['all_interfaces'][iface['name']] - - for name, item in LLDP_PROC_DATA_MAPPING.items(): - patch = self._get_lldp_processed_patch(name, item, - proc_data, port) - if patch is not None: - patches.append(patch) - - # If no processed lldp data was available then parse raw lldp data - if not patches: - for tlv_type, tlv_value in lldp_data: - patch = self._get_local_link_patch(tlv_type, tlv_value, - port, node_info) - if patch is not None: - patches.append(patch) - - node_info.patch_port(port, patches) diff --git a/ironic_inspector/plugins/pci_devices.py b/ironic_inspector/plugins/pci_devices.py deleted file mode 100644 index cffd8be..0000000 --- a/ironic_inspector/plugins/pci_devices.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Gather and distinguish PCI devices from inventory.""" - -import collections -import json - -from oslo_config import cfg - -from ironic_inspector.common.i18n import _ -from ironic_inspector.plugins import base -from ironic_inspector import utils - -PCI_DEVICES_OPTS = [ - cfg.MultiStrOpt('alias', - default=[], - help=_('An alias for PCI device identified by ' - '\'vendor_id\' and \'product_id\' fields. Format: ' - '{"vendor_id": "1234", "product_id": "5678", ' - '"name": "pci_dev1"}')), -] - - -def list_opts(): - return [ - ('pci_devices', PCI_DEVICES_OPTS) - ] - -CONF = cfg.CONF -CONF.register_opts(PCI_DEVICES_OPTS, group='pci_devices') - -LOG = utils.getProcessingLogger(__name__) - - -def _parse_pci_alias_entry(): - parsed_pci_devices = [] - for pci_alias_entry in CONF.pci_devices.alias: - try: - parsed_entry = json.loads(pci_alias_entry) - if set(parsed_entry) != {'vendor_id', 'product_id', 'name'}: - raise KeyError("The 'alias' entry should contain " - "exactly 'vendor_id', 'product_id' and " - "'name' keys") - parsed_pci_devices.append(parsed_entry) - except (ValueError, KeyError) as ex: - LOG.error("Error parsing 'alias' option: %s", ex) - return {(dev['vendor_id'], dev['product_id']): dev['name'] - for dev in parsed_pci_devices} - - -class PciDevicesHook(base.ProcessingHook): - """Processing hook for counting and distinguishing various PCI devices. - - That information can be later used by nova for node scheduling. - """ - aliases = _parse_pci_alias_entry() - - def _found_pci_devices_count(self, found_pci_devices): - return collections.Counter([(dev['vendor_id'], dev['product_id']) - for dev in found_pci_devices - if (dev['vendor_id'], dev['product_id']) - in self.aliases]) - - def before_update(self, introspection_data, node_info, **kwargs): - if 'pci_devices' not in introspection_data: - if CONF.pci_devices.alias: - LOG.warning('No PCI devices information was received from ' - 'the ramdisk.') - return - alias_count = {self.aliases[id_pair]: count for id_pair, count in - self._found_pci_devices_count( - introspection_data['pci_devices']).items()} - if alias_count: - node_info.update_capabilities(**alias_count) - LOG.info('Found the following PCI devices: %s', alias_count) diff --git a/ironic_inspector/plugins/raid_device.py b/ironic_inspector/plugins/raid_device.py deleted file mode 100644 index 58eea40..0000000 --- a/ironic_inspector/plugins/raid_device.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Gather root device hint from recognized block devices.""" - -from ironic_inspector.plugins import base -from ironic_inspector import utils - - -LOG = utils.getProcessingLogger(__name__) - - -class RaidDeviceDetection(base.ProcessingHook): - """Processing hook for learning the root device after RAID creation. - - The plugin can figure out the root device in 2 runs. First, it saves the - discovered block device serials in node.extra. The second run will check - the difference between the recently discovered block devices and the - previously saved ones. After saving the root device in node.properties, it - will delete the temporarily saved block device serials in node.extra. - - This way, it helps to figure out the root device hint in cases when - otherwise Ironic doesn't have enough information to do so. Such a usecase - is DRAC RAID configuration where the BMC doesn't provide any useful - information about the created RAID disks. Using this plugin immediately - before and after creating the root RAID device will solve the issue of root - device hints. - - In cases where there's no RAID volume on the node, the standard plugin will - fail due to the missing local_gb value. This plugin fakes the missing - value, until it's corrected during later runs. Note, that for this to work - the plugin needs to take precedence over the standard plugin. - """ - - def _get_serials(self, data): - if 'inventory' in data: - return [x['serial'] for x in data['inventory'].get('disks', ()) - if x.get('serial')] - elif 'block_devices' in data: - return data['block_devices'].get('serials', ()) - - def before_processing(self, introspection_data, **kwargs): - """Adds fake local_gb value if it's missing from introspection_data.""" - if not introspection_data.get('local_gb'): - LOG.info('No volume is found on the node. Adding a fake ' - 'value for "local_gb"', data=introspection_data) - introspection_data['local_gb'] = 1 - - def before_update(self, introspection_data, node_info, **kwargs): - current_devices = self._get_serials(introspection_data) - if not current_devices: - LOG.warning('No block device was received from ramdisk', - node_info=node_info, data=introspection_data) - return - - node = node_info.node() - - if 'root_device' in node.properties: - LOG.info('Root device is already known for the node', - node_info=node_info, data=introspection_data) - return - - if 'block_devices' in node.extra: - # Compare previously discovered devices with the current ones - previous_devices = node.extra['block_devices']['serials'] - new_devices = [device for device in current_devices - if device not in previous_devices] - - if len(new_devices) > 1: - LOG.warning('Root device cannot be identified because ' - 'multiple new devices were found', - node_info=node_info, data=introspection_data) - return - elif len(new_devices) == 0: - LOG.warning('No new devices were found', - node_info=node_info, data=introspection_data) - return - - node_info.patch([ - {'op': 'remove', - 'path': '/extra/block_devices'}, - {'op': 'add', - 'path': '/properties/root_device', - 'value': {'serial': new_devices[0]}} - ]) - - else: - # No previously discovered devices - save the inspector block - # devices in node.extra - node_info.patch([{'op': 'add', - 'path': '/extra/block_devices', - 'value': {'serials': current_devices}}]) diff --git a/ironic_inspector/plugins/rules.py b/ironic_inspector/plugins/rules.py deleted file mode 100644 index adc1942..0000000 --- a/ironic_inspector/plugins/rules.py +++ /dev/null @@ -1,153 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Standard plugins for rules API.""" - -import operator -import re - -import netaddr - -from ironic_inspector.common.i18n import _ -from ironic_inspector.plugins import base -from ironic_inspector import utils - - -def coerce(value, expected): - if isinstance(expected, float): - return float(value) - elif isinstance(expected, int): - return int(value) - else: - return value - - -class SimpleCondition(base.RuleConditionPlugin): - op = None - - def check(self, node_info, field, params, **kwargs): - value = params['value'] - return self.op(coerce(field, value), value) - - -class EqCondition(SimpleCondition): - op = operator.eq - - -class LtCondition(SimpleCondition): - op = operator.lt - - -class GtCondition(SimpleCondition): - op = operator.gt - - -class LeCondition(SimpleCondition): - op = operator.le - - -class GeCondition(SimpleCondition): - op = operator.ge - - -class NeCondition(SimpleCondition): - op = operator.ne - - -class EmptyCondition(base.RuleConditionPlugin): - REQUIRED_PARAMS = set() - ALLOW_NONE = True - - def check(self, node_info, field, params, **kwargs): - return field in ('', None, [], {}) - - -class NetCondition(base.RuleConditionPlugin): - def validate(self, params, **kwargs): - super(NetCondition, self).validate(params, **kwargs) - # Make sure it does not raise - try: - netaddr.IPNetwork(params['value']) - except netaddr.AddrFormatError as exc: - raise ValueError('invalid value: %s' % exc) - - def check(self, node_info, field, params, **kwargs): - network = netaddr.IPNetwork(params['value']) - return netaddr.IPAddress(field) in network - - -class ReCondition(base.RuleConditionPlugin): - def validate(self, params, **kwargs): - try: - re.compile(params['value']) - except re.error as exc: - raise ValueError(_('invalid regular expression: %s') % exc) - - -class MatchesCondition(ReCondition): - def check(self, node_info, field, params, **kwargs): - regexp = params['value'] - if regexp[-1] != '$': - regexp += '$' - return re.match(regexp, str(field)) is not None - - -class ContainsCondition(ReCondition): - def check(self, node_info, field, params, **kwargs): - return re.search(params['value'], str(field)) is not None - - -class FailAction(base.RuleActionPlugin): - REQUIRED_PARAMS = {'message'} - - def apply(self, node_info, params, **kwargs): - raise utils.Error(params['message'], node_info=node_info) - - -class SetAttributeAction(base.RuleActionPlugin): - REQUIRED_PARAMS = {'path', 'value'} - # TODO(dtantsur): proper validation of path - - FORMATTED_PARAMS = ['value'] - - def apply(self, node_info, params, **kwargs): - node_info.patch([{'op': 'add', 'path': params['path'], - 'value': params['value']}]) - - -class SetCapabilityAction(base.RuleActionPlugin): - REQUIRED_PARAMS = {'name'} - OPTIONAL_PARAMS = {'value'} - - FORMATTED_PARAMS = ['value'] - - def apply(self, node_info, params, **kwargs): - node_info.update_capabilities( - **{params['name']: params.get('value')}) - - -class ExtendAttributeAction(base.RuleActionPlugin): - REQUIRED_PARAMS = {'path', 'value'} - OPTIONAL_PARAMS = {'unique'} - # TODO(dtantsur): proper validation of path - - FORMATTED_PARAMS = ['value'] - - def apply(self, node_info, params, **kwargs): - def _replace(values): - value = params['value'] - if not params.get('unique') or value not in values: - values.append(value) - return values - - node_info.replace_field(params['path'], _replace, default=[]) diff --git a/ironic_inspector/plugins/standard.py b/ironic_inspector/plugins/standard.py deleted file mode 100644 index 37089e7..0000000 --- a/ironic_inspector/plugins/standard.py +++ /dev/null @@ -1,299 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Standard set of plugins.""" - - -from ironic_lib import utils as il_utils -import netaddr -from oslo_config import cfg -from oslo_utils import netutils -from oslo_utils import units -import six - -from ironic_inspector.common.i18n import _ -from ironic_inspector.plugins import base -from ironic_inspector import utils - -CONF = cfg.CONF - - -LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard') - - -class RootDiskSelectionHook(base.ProcessingHook): - """Smarter root disk selection using Ironic root device hints. - - This hook must always go before SchedulerHook, otherwise root_disk field - might not be updated. - """ - - def before_update(self, introspection_data, node_info, **kwargs): - """Detect root disk from root device hints and IPA inventory.""" - hints = node_info.node().properties.get('root_device') - if not hints: - LOG.debug('Root device hints are not provided', - node_info=node_info, data=introspection_data) - return - - inventory = utils.get_inventory(introspection_data, - node_info=node_info) - try: - device = il_utils.match_root_device_hints(inventory['disks'], - hints) - except (TypeError, ValueError) as e: - raise utils.Error( - _('No disks could be found using the root device hints ' - '%(hints)s because they failed to validate. ' - 'Error: %(error)s') % {'hints': hints, 'error': e}, - node_info=node_info, data=introspection_data) - - if not device: - raise utils.Error(_('No disks satisfied root device hints'), - node_info=node_info, data=introspection_data) - - LOG.debug('Disk %(disk)s of size %(size)s satisfies ' - 'root device hints', - {'disk': device.get('name'), 'size': device['size']}, - node_info=node_info, data=introspection_data) - introspection_data['root_disk'] = device - - -class SchedulerHook(base.ProcessingHook): - """Nova scheduler required properties.""" - - KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb') - - def before_update(self, introspection_data, node_info, **kwargs): - """Update node with scheduler properties.""" - inventory = utils.get_inventory(introspection_data, - node_info=node_info) - errors = [] - - root_disk = introspection_data.get('root_disk') - if root_disk: - introspection_data['local_gb'] = root_disk['size'] // units.Gi - if CONF.processing.disk_partitioning_spacing: - introspection_data['local_gb'] -= 1 - else: - introspection_data['local_gb'] = 0 - - try: - introspection_data['cpus'] = int(inventory['cpu']['count']) - introspection_data['cpu_arch'] = six.text_type( - inventory['cpu']['architecture']) - except (KeyError, ValueError, TypeError): - errors.append(_('malformed or missing CPU information: %s') % - inventory.get('cpu')) - - try: - introspection_data['memory_mb'] = int( - inventory['memory']['physical_mb']) - except (KeyError, ValueError, TypeError): - errors.append(_('malformed or missing memory information: %s; ' - 'introspection requires physical memory size ' - 'from dmidecode') % inventory.get('memory')) - - if errors: - raise utils.Error(_('The following problems encountered: %s') % - '; '.join(errors), - node_info=node_info, data=introspection_data) - - LOG.info('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, ' - 'memory %(memory_mb)s MiB, disk %(local_gb)s GiB', - {key: introspection_data.get(key) for key in self.KEYS}, - node_info=node_info, data=introspection_data) - - overwrite = CONF.processing.overwrite_existing - properties = {key: str(introspection_data[key]) - for key in self.KEYS if overwrite or - not node_info.node().properties.get(key)} - node_info.update_properties(**properties) - - -class ValidateInterfacesHook(base.ProcessingHook): - """Hook to validate network interfaces.""" - - def __init__(self): - # Some configuration checks - if (CONF.processing.add_ports == 'disabled' and - CONF.processing.keep_ports == 'added'): - msg = _("Configuration error: add_ports set to disabled " - "and keep_ports set to added. Please change keep_ports " - "to all.") - raise utils.Error(msg) - - def _get_interfaces(self, data=None): - """Convert inventory to a dict with interfaces. - - :return: dict interface name -> dict with keys 'mac' and 'ip' - """ - result = {} - inventory = utils.get_inventory(data) - - pxe_mac = utils.get_pxe_mac(data) - - for iface in inventory['interfaces']: - name = iface.get('name') - mac = iface.get('mac_address') - ip = iface.get('ipv4_address') - client_id = iface.get('client_id') - - if not name: - LOG.error('Malformed interface record: %s', - iface, data=data) - continue - - if not mac: - LOG.debug('Skipping interface %s without link information', - name, data=data) - continue - - if not netutils.is_valid_mac(mac): - LOG.warning('MAC %(mac)s for interface %(name)s is ' - 'not valid, skipping', - {'mac': mac, 'name': name}, - data=data) - continue - - mac = mac.lower() - - LOG.debug('Found interface %(name)s with MAC "%(mac)s", ' - 'IP address "%(ip)s" and client_id "%(client_id)s"', - {'name': name, 'mac': mac, 'ip': ip, - 'client_id': client_id}, data=data) - result[name] = {'ip': ip, 'mac': mac, 'client_id': client_id, - 'pxe': (mac == pxe_mac)} - - return result - - def _validate_interfaces(self, interfaces, data=None): - """Validate interfaces on correctness and suitability. - - :return: dict interface name -> dict with keys 'mac' and 'ip' - """ - if not interfaces: - raise utils.Error(_('No interfaces supplied by the ramdisk'), - data=data) - - pxe_mac = utils.get_pxe_mac(data) - if not pxe_mac and CONF.processing.add_ports == 'pxe': - LOG.warning('No boot interface provided in the introspection ' - 'data, will add all ports with IP addresses') - - result = {} - - for name, iface in interfaces.items(): - ip = iface.get('ip') - pxe = iface.get('pxe', True) - - if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): - LOG.debug('Skipping local interface %s', name, data=data) - continue - - if CONF.processing.add_ports == 'pxe' and pxe_mac and not pxe: - LOG.debug('Skipping interface %s as it was not PXE booting', - name, data=data) - continue - elif CONF.processing.add_ports != 'all' and not ip: - LOG.debug('Skipping interface %s as it did not have ' - 'an IP address assigned during the ramdisk run', - name, data=data) - continue - - result[name] = iface - - if not result: - raise utils.Error(_('No suitable interfaces found in %s') % - interfaces, data=data) - return result - - def before_processing(self, introspection_data, **kwargs): - """Validate information about network interfaces.""" - - bmc_address = utils.get_ipmi_address_from_data(introspection_data) - if bmc_address: - introspection_data['ipmi_address'] = bmc_address - else: - LOG.debug('No BMC address provided in introspection data, ' - 'assuming virtual environment', data=introspection_data) - - all_interfaces = self._get_interfaces(introspection_data) - - interfaces = self._validate_interfaces(all_interfaces, - introspection_data) - - LOG.info('Using network interface(s): %s', - ', '.join('%s %s' % (name, items) - for (name, items) in interfaces.items()), - data=introspection_data) - - introspection_data['all_interfaces'] = all_interfaces - introspection_data['interfaces'] = interfaces - valid_macs = [iface['mac'] for iface in interfaces.values()] - introspection_data['macs'] = valid_macs - - def before_update(self, introspection_data, node_info, **kwargs): - """Create new ports and drop ports that are not present in the data.""" - interfaces = introspection_data.get('interfaces') - if CONF.processing.add_ports != 'disabled': - node_info.create_ports(list(interfaces.values())) - - if CONF.processing.keep_ports == 'present': - expected_macs = { - iface['mac'] - for iface in introspection_data['all_interfaces'].values() - } - elif CONF.processing.keep_ports == 'added': - expected_macs = set(introspection_data['macs']) - - if CONF.processing.keep_ports != 'all': - # list is required as we modify underlying dict - for port in list(node_info.ports().values()): - if port.address not in expected_macs: - LOG.info("Deleting port %(port)s as its MAC %(mac)s is " - "not in expected MAC list %(expected)s", - {'port': port.uuid, - 'mac': port.address, - 'expected': list(sorted(expected_macs))}, - node_info=node_info, data=introspection_data) - node_info.delete_port(port) - - if CONF.processing.overwrite_existing: - # Make sure pxe_enabled is up-to-date - ports = node_info.ports() - for iface in introspection_data['interfaces'].values(): - try: - port = ports[iface['mac']] - except KeyError: - continue - - real_pxe = iface.get('pxe', True) - if port.pxe_enabled != real_pxe: - LOG.info('Fixing pxe_enabled=%(val)s on port %(port)s ' - 'to match introspected data', - {'port': port.address, 'val': real_pxe}, - node_info=node_info, data=introspection_data) - node_info.patch_port(port, [{'op': 'replace', - 'path': '/pxe_enabled', - 'value': real_pxe}]) - - -class RamdiskErrorHook(base.ProcessingHook): - """Hook to process error send from the ramdisk.""" - - def before_processing(self, introspection_data, **kwargs): - error = introspection_data.get('error') - if error: - raise utils.Error(_('Ramdisk reported error: %s') % error, - data=introspection_data) diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py deleted file mode 100644 index 068070d..0000000 --- a/ironic_inspector/process.py +++ /dev/null @@ -1,390 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Handling introspection data from the ramdisk.""" - -import copy -import datetime -import json -import os - -from oslo_config import cfg -from oslo_serialization import base64 -from oslo_utils import excutils -from oslo_utils import timeutils - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.common import swift -from ironic_inspector import firewall -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector import rules -from ironic_inspector import utils - -CONF = cfg.CONF - -LOG = utils.getProcessingLogger(__name__) - -_STORAGE_EXCLUDED_KEYS = {'logs'} -_UNPROCESSED_DATA_STORE_SUFFIX = 'UNPROCESSED' - - -def _store_logs(introspection_data, node_info): - logs = introspection_data.get('logs') - if not logs: - LOG.warning('No logs were passed by the ramdisk', - data=introspection_data, node_info=node_info) - return - - if not CONF.processing.ramdisk_logs_dir: - LOG.warning('Failed to store logs received from the ramdisk ' - 'because ramdisk_logs_dir configuration option ' - 'is not set', - data=introspection_data, node_info=node_info) - return - - fmt_args = { - 'uuid': node_info.uuid if node_info is not None else 'unknown', - 'mac': (utils.get_pxe_mac(introspection_data) or - 'unknown').replace(':', ''), - 'dt': datetime.datetime.utcnow(), - 'bmc': (utils.get_ipmi_address_from_data(introspection_data) or - 'unknown') - } - - file_name = CONF.processing.ramdisk_logs_filename_format.format(**fmt_args) - - try: - if not os.path.exists(CONF.processing.ramdisk_logs_dir): - os.makedirs(CONF.processing.ramdisk_logs_dir) - with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), - 'wb') as fp: - fp.write(base64.decode_as_bytes(logs)) - except EnvironmentError: - LOG.exception('Could not store the ramdisk logs', - data=introspection_data, node_info=node_info) - else: - LOG.info('Ramdisk logs were stored in file %s', file_name, - data=introspection_data, node_info=node_info) - - -def _find_node_info(introspection_data, failures): - try: - return node_cache.find_node( - bmc_address=introspection_data.get('ipmi_address'), - mac=utils.get_valid_macs(introspection_data)) - except utils.NotFoundInCacheError as exc: - not_found_hook = plugins_base.node_not_found_hook_manager() - if not_found_hook is None: - failures.append(_('Look up error: %s') % exc) - return - - LOG.debug('Running node_not_found_hook %s', - CONF.processing.node_not_found_hook, - data=introspection_data) - - # NOTE(sambetts): If not_found_hook is not none it means that we were - # unable to find the node in the node cache and there is a node not - # found hook defined so we should try to send the introspection data - # to that hook to generate the node info before bubbling up the error. - try: - node_info = not_found_hook.driver(introspection_data) - if node_info: - return node_info - failures.append(_("Node not found hook returned nothing")) - except Exception as exc: - failures.append(_("Node not found hook failed: %s") % exc) - except utils.Error as exc: - failures.append(_('Look up error: %s') % exc) - - -def _run_pre_hooks(introspection_data, failures): - hooks = plugins_base.processing_hooks_manager() - for hook_ext in hooks: - LOG.debug('Running pre-processing hook %s', hook_ext.name, - data=introspection_data) - # NOTE(dtantsur): catch exceptions, so that we have changes to update - # node introspection status after look up - try: - hook_ext.obj.before_processing(introspection_data) - except utils.Error as exc: - LOG.error('Hook %(hook)s failed, delaying error report ' - 'until node look up: %(error)s', - {'hook': hook_ext.name, 'error': exc}, - data=introspection_data) - failures.append('Preprocessing hook %(hook)s: %(error)s' % - {'hook': hook_ext.name, 'error': exc}) - except Exception as exc: - LOG.exception('Hook %(hook)s failed, delaying error report ' - 'until node look up: %(error)s', - {'hook': hook_ext.name, 'error': exc}, - data=introspection_data) - failures.append(_('Unexpected exception %(exc_class)s during ' - 'preprocessing in hook %(hook)s: %(error)s') % - {'hook': hook_ext.name, - 'exc_class': exc.__class__.__name__, - 'error': exc}) - - -def _filter_data_excluded_keys(data): - return {k: v for k, v in data.items() - if k not in _STORAGE_EXCLUDED_KEYS} - - -def _store_data(node_info, data, suffix=None): - if CONF.processing.store_data != 'swift': - LOG.debug("Swift support is disabled, introspection data " - "won't be stored", node_info=node_info) - return - - swift_object_name = swift.store_introspection_data( - _filter_data_excluded_keys(data), - node_info.uuid, - suffix=suffix - ) - LOG.info('Introspection data was stored in Swift in object ' - '%s', swift_object_name, node_info=node_info) - if CONF.processing.store_data_location: - node_info.patch([{'op': 'add', 'path': '/extra/%s' % - CONF.processing.store_data_location, - 'value': swift_object_name}]) - - -def _store_unprocessed_data(node_info, data): - # runs in background - try: - _store_data(node_info, data, - suffix=_UNPROCESSED_DATA_STORE_SUFFIX) - except Exception: - LOG.exception('Encountered exception saving unprocessed ' - 'introspection data', node_info=node_info, - data=data) - - -def _get_unprocessed_data(uuid): - if CONF.processing.store_data == 'swift': - LOG.debug('Fetching unprocessed introspection data from ' - 'Swift for %s', uuid) - return json.loads( - swift.get_introspection_data( - uuid, - suffix=_UNPROCESSED_DATA_STORE_SUFFIX - ) - ) - else: - raise utils.Error(_('Swift support is disabled'), code=400) - - -def process(introspection_data): - """Process data from the ramdisk. - - This function heavily relies on the hooks to do the actual data processing. - """ - unprocessed_data = copy.deepcopy(introspection_data) - failures = [] - _run_pre_hooks(introspection_data, failures) - node_info = _find_node_info(introspection_data, failures) - if node_info: - # Locking is already done in find_node() but may be not done in a - # node_not_found hook - node_info.acquire_lock() - - if failures or node_info is None: - msg = _('The following failures happened during running ' - 'pre-processing hooks:\n%s') % '\n'.join(failures) - if node_info is not None: - node_info.finished(error='\n'.join(failures)) - _store_logs(introspection_data, node_info) - raise utils.Error(msg, node_info=node_info, data=introspection_data) - - LOG.info('Matching node is %s', node_info.uuid, - node_info=node_info, data=introspection_data) - - if node_info.finished_at is not None: - # race condition or introspection canceled - raise utils.Error(_('Node processing already finished with ' - 'error: %s') % node_info.error, - node_info=node_info, code=400) - - # Note(mkovacik): store data now when we're sure that a background - # thread won't race with other process() or introspect.abort() - # call - utils.executor().submit(_store_unprocessed_data, node_info, - unprocessed_data) - - try: - node = node_info.node() - except ir_utils.NotFound as exc: - with excutils.save_and_reraise_exception(): - node_info.finished(error=str(exc)) - _store_logs(introspection_data, node_info) - - try: - result = _process_node(node_info, node, introspection_data) - except utils.Error as exc: - node_info.finished(error=str(exc)) - with excutils.save_and_reraise_exception(): - _store_logs(introspection_data, node_info) - except Exception as exc: - LOG.exception('Unexpected exception during processing') - msg = _('Unexpected exception %(exc_class)s during processing: ' - '%(error)s') % {'exc_class': exc.__class__.__name__, - 'error': exc} - node_info.finished(error=msg) - _store_logs(introspection_data, node_info) - raise utils.Error(msg, node_info=node_info, data=introspection_data, - code=500) - - if CONF.processing.always_store_ramdisk_logs: - _store_logs(introspection_data, node_info) - return result - - -def _run_post_hooks(node_info, introspection_data): - hooks = plugins_base.processing_hooks_manager() - - for hook_ext in hooks: - LOG.debug('Running post-processing hook %s', hook_ext.name, - node_info=node_info, data=introspection_data) - hook_ext.obj.before_update(introspection_data, node_info) - - -@node_cache.fsm_transition(istate.Events.process, reentrant=False) -def _process_node(node_info, node, introspection_data): - # NOTE(dtantsur): repeat the check in case something changed - ir_utils.check_provision_state(node) - _run_post_hooks(node_info, introspection_data) - _store_data(node_info, introspection_data) - - ironic = ir_utils.get_client() - firewall.update_filters(ironic) - - node_info.invalidate_cache() - rules.apply(node_info, introspection_data) - - resp = {'uuid': node.uuid} - - utils.executor().submit(_finish, node_info, ironic, introspection_data, - power_off=CONF.processing.power_off) - - return resp - - -@node_cache.fsm_transition(istate.Events.finish) -def _finish(node_info, ironic, introspection_data, power_off=True): - if power_off: - LOG.debug('Forcing power off of node %s', node_info.uuid) - try: - ironic.node.set_power_state(node_info.uuid, 'off') - except Exception as exc: - if node_info.node().provision_state == 'enroll': - LOG.info("Failed to power off the node in" - "'enroll' state, ignoring; error was " - "%s", exc, node_info=node_info, - data=introspection_data) - else: - msg = (_('Failed to power off node %(node)s, check ' - 'its power management configuration: ' - '%(exc)s') % {'node': node_info.uuid, 'exc': - exc}) - node_info.finished(error=msg) - raise utils.Error(msg, node_info=node_info, - data=introspection_data) - LOG.info('Node powered-off', node_info=node_info, - data=introspection_data) - - node_info.finished() - LOG.info('Introspection finished successfully', - node_info=node_info, data=introspection_data) - - -def reapply(node_ident): - """Re-apply introspection steps. - - Re-apply preprocessing, postprocessing and introspection rules on - stored data. - - :param node_ident: node UUID or name - :raises: utils.Error - - """ - - LOG.debug('Processing re-apply introspection request for node ' - 'UUID: %s', node_ident) - node_info = node_cache.get_node(node_ident, locked=False) - if not node_info.acquire_lock(blocking=False): - # Note (mkovacik): it should be sufficient to check data - # presence & locking. If either introspection didn't start - # yet, was in waiting state or didn't finish yet, either data - # won't be available or locking would fail - raise utils.Error(_('Node locked, please, try again later'), - node_info=node_info, code=409) - - utils.executor().submit(_reapply, node_info) - - -def _reapply(node_info): - # runs in background - try: - node_info.started_at = timeutils.utcnow() - node_info.commit() - introspection_data = _get_unprocessed_data(node_info.uuid) - except Exception as exc: - LOG.exception('Encountered exception while fetching ' - 'stored introspection data', - node_info=node_info) - msg = (_('Unexpected exception %(exc_class)s while fetching ' - 'unprocessed introspection data from Swift: %(error)s') % - {'exc_class': exc.__class__.__name__, 'error': exc}) - node_info.finished(error=msg) - return - - try: - ironic = ir_utils.get_client() - except Exception as exc: - msg = _('Encountered an exception while getting the Ironic client: ' - '%s') % exc - LOG.error(msg, node_info=node_info, data=introspection_data) - node_info.fsm_event(istate.Events.error) - node_info.finished(error=msg) - return - - try: - _reapply_with_data(node_info, introspection_data) - except Exception as exc: - node_info.finished(error=str(exc)) - return - - _finish(node_info, ironic, introspection_data, - power_off=False) - - LOG.info('Successfully reapplied introspection on stored ' - 'data', node_info=node_info, data=introspection_data) - - -@node_cache.fsm_event_before(istate.Events.reapply) -@node_cache.triggers_fsm_error_transition() -def _reapply_with_data(node_info, introspection_data): - failures = [] - _run_pre_hooks(introspection_data, failures) - if failures: - raise utils.Error(_('Pre-processing failures detected reapplying ' - 'introspection on stored data:\n%s') % - '\n'.join(failures), node_info=node_info) - - _run_post_hooks(node_info, introspection_data) - _store_data(node_info, introspection_data) - node_info.invalidate_cache() - rules.apply(node_info, introspection_data) diff --git a/ironic_inspector/pxe_filter/__init__.py b/ironic_inspector/pxe_filter/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/pxe_filter/base.py b/ironic_inspector/pxe_filter/base.py deleted file mode 100644 index e49fc4c..0000000 --- a/ironic_inspector/pxe_filter/base.py +++ /dev/null @@ -1,224 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base code for PXE boot filtering.""" - -import contextlib -import functools - -from automaton import exceptions as automaton_errors -from automaton import machines -from eventlet import semaphore -from futurist import periodics -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log -import stevedore - -from ironic_inspector.common.i18n import _ -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.pxe_filter import interface - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -_STEVEDORE_DRIVER_NAMESPACE = 'ironic_inspector.pxe_filter' - - -class InvalidFilterDriverState(RuntimeError): - """The fsm of the filter driver raised an error.""" - - -class States(object): - """PXE filter driver states.""" - uninitialized = 'uninitialized' - initialized = 'initialized' - - -class Events(object): - """PXE filter driver transitions.""" - initialize = 'initialize' - sync = 'sync' - reset = 'reset' - - -# a reset is always possible -State_space = [ - { - 'name': States.uninitialized, - 'next_states': { - Events.initialize: States.initialized, - Events.reset: States.uninitialized, - }, - }, - { - 'name': States.initialized, - 'next_states': { - Events.sync: States.initialized, - Events.reset: States.uninitialized, - }, - }, -] - - -def locked_driver_event(event): - """Call driver method having processed the fsm event.""" - def outer(method): - @functools.wraps(method) - def inner(self, *args, **kwargs): - with self.lock, self.fsm_reset_on_error() as fsm: - fsm.process_event(event) - return method(self, *args, **kwargs) - return inner - return outer - - -class BaseFilter(interface.FilterDriver): - """The generic PXE boot filtering interface implementation. - - This driver doesn't do anything but provides a basic synchronization and - initialization logic for some drivers to reuse. Subclasses have to provide - a custom sync() method. - """ - - fsm = machines.FiniteMachine.build(State_space) - fsm.default_start_state = States.uninitialized - - def __init__(self): - super(BaseFilter, self).__init__() - self.lock = semaphore.BoundedSemaphore() - self.fsm.initialize(start_state=States.uninitialized) - - def __str__(self): - return '%(driver)s, state=%(state)s' % { - 'driver': type(self).__name__, 'state': self.state} - - @property - def state(self): - """Current driver state.""" - return self.fsm.current_state - - def reset(self): - """Reset internal driver state. - - This method is called by the fsm_context manager upon exception as well - as by the tear_down_filter method. A subclass might wish to override as - necessary, though must not lock the driver. The overriding subclass - should up-call. - - :returns: nothing. - """ - LOG.debug('Resetting the PXE filter driver %s', self) - # a reset event is always possible - self.fsm.process_event(Events.reset) - - @contextlib.contextmanager - def fsm_reset_on_error(self): - """Reset the filter driver upon generic exception. - - The context is self.fsm. The automaton.exceptions.NotFound error is - cast to the InvalidFilterDriverState error. Other exceptions trigger - self.reset() - - :raises: InvalidFilterDriverState - :returns: nothing. - """ - LOG.debug('The PXE filter driver %s enters the fsm_reset_on_error ' - 'context', self) - try: - yield self.fsm - except automaton_errors.NotFound as e: - raise InvalidFilterDriverState(_('The PXE filter driver %(driver)s' - ': my fsm encountered an ' - 'exception: %(error)s') % { - 'driver': self, 'error': e}) - except Exception as e: - LOG.exception('The PXE filter %(filter)s encountered an ' - 'exception: %(error)s; resetting the filter', - {'filter': self, 'error': e}) - self.reset() - raise - finally: - LOG.debug('The PXE filter driver %s left the fsm_reset_on_error ' - 'context', self) - - @locked_driver_event(Events.initialize) - def init_filter(self): - """Base driver initialization logic. Locked. - - :raises: InvalidFilterDriverState - :returns: nothing. - """ - LOG.debug('Initializing the PXE filter driver %s', self) - - def tear_down_filter(self): - """Base driver tear down logic. Locked. - - :returns: nothing. - """ - LOG.debug('Tearing down the PXE filter driver %s', self) - with self.lock: - self.reset() - - @locked_driver_event(Events.sync) - def sync(self, ironic): - """Base driver sync logic. Locked. - - :param ironic: obligatory ironic client instance - :returns: nothing. - """ - LOG.debug('Syncing the PXE filter driver %s', self) - - def get_periodic_sync_task(self): - """Get periodic sync task for the filter. - - :returns: a periodic task to be run in the background. - """ - ironic = ir_utils.get_client() - return periodics.periodic( - # NOTE(milan): the periodic decorator doesn't support 0 as - # a spacing value of (a switched off) periodic - spacing=CONF.pxe_filter.sync_period or float('inf'), - enabled=bool(CONF.pxe_filter.sync_period))( - lambda: self.sync(ironic)) - - -class NoopFilter(BaseFilter): - """A trivial PXE boot filter.""" - - -_DRIVER_MANAGER = None - - -@lockutils.synchronized(__name__) -def _driver_manager(): - """Create a Stevedore driver manager for filtering drivers. Locked.""" - global _DRIVER_MANAGER - - name = CONF.pxe_filter.driver - if _DRIVER_MANAGER is None: - _DRIVER_MANAGER = stevedore.driver.DriverManager( - _STEVEDORE_DRIVER_NAMESPACE, - name=name, - invoke_on_load=True - ) - - return _DRIVER_MANAGER - - -def driver(): - """Get the driver for the PXE filter. - - :returns: the singleton PXE filter driver object. - """ - return _driver_manager().driver diff --git a/ironic_inspector/pxe_filter/interface.py b/ironic_inspector/pxe_filter/interface.py deleted file mode 100644 index ec19502..0000000 --- a/ironic_inspector/pxe_filter/interface.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""The code of the PXE boot filtering interface.""" - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class FilterDriver(object): - """The PXE boot filtering interface.""" - - @abc.abstractmethod - def init_filter(self): - """Initialize the internal driver state. - - This method should be idempotent and may perform system-wide filter - state changes. Can be synchronous. - - :returns: nothing. - """ - - @abc.abstractmethod - def sync(self, ironic): - """Synchronize the filter with ironic and inspector. - - To be called both periodically and as needed by inspector. The filter - should tear down its internal state if the sync method raises in order - to "propagate" filtering exception between periodic and on-demand sync - call. To this end, a driver should raise from the sync call if its - internal state isn't properly initialized. - - :param ironic: an ironic client instance. - :returns: nothing. - """ - - @abc.abstractmethod - def tear_down_filter(self): - """Reset the filter. - - This method should be idempotent and may perform system-wide filter - state changes. Can be synchronous. - - :returns: nothing. - """ - - @abc.abstractmethod - def get_periodic_sync_task(self): - """Get periodic sync task for the filter. - - :returns: a periodic task to be run in the background. - """ diff --git a/ironic_inspector/rules.py b/ironic_inspector/rules.py deleted file mode 100644 index 22bb879..0000000 --- a/ironic_inspector/rules.py +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Support for introspection rules.""" - -import jsonpath_rw as jsonpath -import jsonschema -from oslo_db import exception as db_exc -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from sqlalchemy import orm - -from ironic_inspector.common.i18n import _ -from ironic_inspector import db -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector import utils - - -LOG = utils.getProcessingLogger(__name__) -_CONDITIONS_SCHEMA = None -_ACTIONS_SCHEMA = None - - -def conditions_schema(): - global _CONDITIONS_SCHEMA - if _CONDITIONS_SCHEMA is None: - condition_plugins = [x.name for x in - plugins_base.rule_conditions_manager()] - _CONDITIONS_SCHEMA = { - "title": "Inspector rule conditions schema", - "type": "array", - # we can have rules that always apply - "minItems": 0, - "items": { - "type": "object", - # field might become optional in the future, but not right now - "required": ["op", "field"], - "properties": { - "op": { - "description": "condition operator", - "enum": condition_plugins - }, - "field": { - "description": "JSON path to field for matching", - "type": "string" - }, - "multiple": { - "description": "how to treat multiple values", - "enum": ["all", "any", "first"] - }, - "invert": { - "description": "whether to invert the result", - "type": "boolean" - }, - }, - # other properties are validated by plugins - "additionalProperties": True - } - } - - return _CONDITIONS_SCHEMA - - -def actions_schema(): - global _ACTIONS_SCHEMA - if _ACTIONS_SCHEMA is None: - action_plugins = [x.name for x in - plugins_base.rule_actions_manager()] - _ACTIONS_SCHEMA = { - "title": "Inspector rule actions schema", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "required": ["action"], - "properties": { - "action": { - "description": "action to take", - "enum": action_plugins - }, - }, - # other properties are validated by plugins - "additionalProperties": True - } - } - - return _ACTIONS_SCHEMA - - -class IntrospectionRule(object): - """High-level class representing an introspection rule.""" - - def __init__(self, uuid, conditions, actions, description): - """Create rule object from database data.""" - self._uuid = uuid - self._conditions = conditions - self._actions = actions - self._description = description - - def as_dict(self, short=False): - result = { - 'uuid': self._uuid, - 'description': self._description, - } - - if not short: - result['conditions'] = [c.as_dict() for c in self._conditions] - result['actions'] = [a.as_dict() for a in self._actions] - - return result - - @property - def description(self): - return self._description or self._uuid - - def check_conditions(self, node_info, data): - """Check if conditions are true for a given node. - - :param node_info: a NodeInfo object - :param data: introspection data - :returns: True if conditions match, otherwise False - """ - LOG.debug('Checking rule "%s"', self.description, - node_info=node_info, data=data) - ext_mgr = plugins_base.rule_conditions_manager() - for cond in self._conditions: - scheme, path = _parse_path(cond.field) - - if scheme == 'node': - source_data = node_info.node().to_dict() - elif scheme == 'data': - source_data = data - - field_values = jsonpath.parse(path).find(source_data) - field_values = [x.value for x in field_values] - cond_ext = ext_mgr[cond.op].obj - - if not field_values: - if cond_ext.ALLOW_NONE: - LOG.debug('Field with JSON path %s was not found in data', - cond.field, node_info=node_info, data=data) - field_values = [None] - else: - LOG.info('Field with JSON path %(path)s was not found ' - 'in data, rule "%(rule)s" will not ' - 'be applied', - {'path': cond.field, 'rule': self.description}, - node_info=node_info, data=data) - return False - - for value in field_values: - result = cond_ext.check(node_info, value, cond.params) - if cond.invert: - result = not result - - if (cond.multiple == 'first' - or (cond.multiple == 'all' and not result) - or (cond.multiple == 'any' and result)): - break - - if not result: - LOG.info('Rule "%(rule)s" will not be applied: condition ' - '%(field)s %(op)s %(params)s failed', - {'rule': self.description, 'field': cond.field, - 'op': cond.op, 'params': cond.params}, - node_info=node_info, data=data) - return False - - LOG.info('Rule "%s" will be applied', self.description, - node_info=node_info, data=data) - return True - - def apply_actions(self, node_info, data=None): - """Run actions on a node. - - :param node_info: NodeInfo instance - :param data: introspection data - """ - LOG.debug('Running actions for rule "%s"', self.description, - node_info=node_info, data=data) - - ext_mgr = plugins_base.rule_actions_manager() - for act in self._actions: - ext = ext_mgr[act.action].obj - - for formatted_param in ext.FORMATTED_PARAMS: - value = act.params.get(formatted_param) - if not value or not isinstance(value, six.string_types): - continue - - # NOTE(aarefiev): verify provided value with introspection - # data format specifications. - # TODO(aarefiev): simple verify on import rule time. - try: - act.params[formatted_param] = value.format(data=data) - except KeyError as e: - raise utils.Error(_('Invalid formatting variable key ' - 'provided: %s') % e, - node_info=node_info, data=data) - - LOG.debug('Running action `%(action)s %(params)s`', - {'action': act.action, 'params': act.params}, - node_info=node_info, data=data) - ext.apply(node_info, act.params) - - LOG.debug('Successfully applied actions', - node_info=node_info, data=data) - - -def _parse_path(path): - """Parse path, extract scheme and path. - - Parse path with 'node' and 'data' scheme, which links on - introspection data and node info respectively. If scheme is - missing in path, default is 'data'. - - :param path: data or node path - :return: tuple (scheme, path) - """ - try: - index = path.index('://') - except ValueError: - scheme = 'data' - path = path - else: - scheme = path[:index] - path = path[index + 3:] - return scheme, path - - -def create(conditions_json, actions_json, uuid=None, - description=None): - """Create a new rule in database. - - :param conditions_json: list of dicts with the following keys: - * op - operator - * field - JSON path to field to compare - Other keys are stored as is. - :param actions_json: list of dicts with the following keys: - * action - action type - Other keys are stored as is. - :param uuid: rule UUID, will be generated if empty - :param description: human-readable rule description - :returns: new IntrospectionRule object - :raises: utils.Error on failure - """ - uuid = uuid or uuidutils.generate_uuid() - LOG.debug('Creating rule %(uuid)s with description "%(descr)s", ' - 'conditions %(conditions)s and actions %(actions)s', - {'uuid': uuid, 'descr': description, - 'conditions': conditions_json, 'actions': actions_json}) - - try: - jsonschema.validate(conditions_json, conditions_schema()) - except jsonschema.ValidationError as exc: - raise utils.Error(_('Validation failed for conditions: %s') % exc) - - try: - jsonschema.validate(actions_json, actions_schema()) - except jsonschema.ValidationError as exc: - raise utils.Error(_('Validation failed for actions: %s') % exc) - - cond_mgr = plugins_base.rule_conditions_manager() - act_mgr = plugins_base.rule_actions_manager() - - conditions = [] - reserved_params = {'op', 'field', 'multiple', 'invert'} - for cond_json in conditions_json: - field = cond_json['field'] - - scheme, path = _parse_path(field) - - if scheme not in ('node', 'data'): - raise utils.Error(_('Unsupported scheme for field: %s, valid ' - 'values are node:// or data://') % scheme) - # verify field as JSON path - try: - jsonpath.parse(path) - except Exception as exc: - raise utils.Error(_('Unable to parse field JSON path %(field)s: ' - '%(error)s') % {'field': field, 'error': exc}) - - plugin = cond_mgr[cond_json['op']].obj - params = {k: v for k, v in cond_json.items() - if k not in reserved_params} - try: - plugin.validate(params) - except ValueError as exc: - raise utils.Error(_('Invalid parameters for operator %(op)s: ' - '%(error)s') % - {'op': cond_json['op'], 'error': exc}) - - conditions.append((cond_json['field'], - cond_json['op'], - cond_json.get('multiple', 'any'), - cond_json.get('invert', False), - params)) - - actions = [] - for action_json in actions_json: - plugin = act_mgr[action_json['action']].obj - params = {k: v for k, v in action_json.items() if k != 'action'} - try: - plugin.validate(params) - except ValueError as exc: - raise utils.Error(_('Invalid parameters for action %(act)s: ' - '%(error)s') % - {'act': action_json['action'], 'error': exc}) - - actions.append((action_json['action'], params)) - - try: - with db.ensure_transaction() as session: - rule = db.Rule(uuid=uuid, description=description, - disabled=False, created_at=timeutils.utcnow()) - - for field, op, multiple, invert, params in conditions: - rule.conditions.append(db.RuleCondition(op=op, - field=field, - multiple=multiple, - invert=invert, - params=params)) - - for action, params in actions: - rule.actions.append(db.RuleAction(action=action, - params=params)) - - rule.save(session) - except db_exc.DBDuplicateEntry as exc: - LOG.error('Database integrity error %s when ' - 'creating a rule', exc) - raise utils.Error(_('Rule with UUID %s already exists') % uuid, - code=409) - - LOG.info('Created rule %(uuid)s with description "%(descr)s"', - {'uuid': uuid, 'descr': description}) - return IntrospectionRule(uuid=uuid, - conditions=rule.conditions, - actions=rule.actions, - description=description) - - -def get(uuid): - """Get a rule by its UUID.""" - try: - rule = db.model_query(db.Rule).filter_by(uuid=uuid).one() - except orm.exc.NoResultFound: - raise utils.Error(_('Rule %s was not found') % uuid, code=404) - - return IntrospectionRule(uuid=rule.uuid, actions=rule.actions, - conditions=rule.conditions, - description=rule.description) - - -def get_all(): - """List all rules.""" - query = db.model_query(db.Rule).order_by(db.Rule.created_at) - return [IntrospectionRule(uuid=rule.uuid, actions=rule.actions, - conditions=rule.conditions, - description=rule.description) - for rule in query] - - -def delete(uuid): - """Delete a rule by its UUID.""" - with db.ensure_transaction() as session: - db.model_query(db.RuleAction, - session=session).filter_by(rule=uuid).delete() - db.model_query(db.RuleCondition, - session=session) .filter_by(rule=uuid).delete() - count = (db.model_query(db.Rule, session=session) - .filter_by(uuid=uuid).delete()) - if not count: - raise utils.Error(_('Rule %s was not found') % uuid, code=404) - - LOG.info('Introspection rule %s was deleted', uuid) - - -def delete_all(): - """Delete all rules.""" - with db.ensure_transaction() as session: - db.model_query(db.RuleAction, session=session).delete() - db.model_query(db.RuleCondition, session=session).delete() - db.model_query(db.Rule, session=session).delete() - - LOG.info('All introspection rules were deleted') - - -def apply(node_info, data): - """Apply rules to a node.""" - rules = get_all() - if not rules: - LOG.debug('No custom introspection rules to apply', - node_info=node_info, data=data) - return - - LOG.debug('Applying custom introspection rules', - node_info=node_info, data=data) - - to_apply = [] - for rule in rules: - if rule.check_conditions(node_info, data): - to_apply.append(rule) - - if to_apply: - LOG.debug('Running actions', node_info=node_info, data=data) - for rule in to_apply: - rule.apply_actions(node_info, data=data) - else: - LOG.debug('No actions to apply', node_info=node_info, data=data) - - LOG.info('Successfully applied custom introspection rules', - node_info=node_info, data=data) diff --git a/ironic_inspector/test/__init__.py b/ironic_inspector/test/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/base.py b/ironic_inspector/test/base.py deleted file mode 100644 index 9142ef5..0000000 --- a/ironic_inspector/test/base.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import time - -import fixtures -import futurist -import mock -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_log import log -from oslo_utils import units -from oslo_utils import uuidutils -from oslotest import base as test_base - -from ironic_inspector.common import i18n -# Import configuration options -from ironic_inspector import conf # noqa -from ironic_inspector import db -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector import utils - -CONF = cfg.CONF - - -class BaseTest(test_base.BaseTestCase): - - IS_FUNCTIONAL = False - - def setUp(self): - super(BaseTest, self).setUp() - if not self.IS_FUNCTIONAL: - self.init_test_conf() - self.session = db.get_writer_session() - engine = self.session.get_bind() - db.Base.metadata.create_all(engine) - engine.connect() - self.addCleanup(engine.dispose) - plugins_base._HOOKS_MGR = None - node_cache._SEMAPHORES = lockutils.Semaphores() - patch = mock.patch.object(i18n, '_', lambda s: s) - patch.start() - # 'p=patch' magic is due to how closures work - self.addCleanup(lambda p=patch: p.stop()) - utils._EXECUTOR = futurist.SynchronousExecutor(green=True) - - def init_test_conf(self): - CONF.reset() - log.register_options(CONF) - self.cfg = self.useFixture(config_fixture.Config(CONF)) - self.cfg.set_default('connection', "sqlite:///", group='database') - self.cfg.set_default('slave_connection', None, group='database') - self.cfg.set_default('max_retries', 10, group='database') - - def assertPatchEqual(self, expected, actual): - expected = sorted(expected, key=lambda p: p['path']) - actual = sorted(actual, key=lambda p: p['path']) - self.assertEqual(expected, actual) - - def assertCalledWithPatch(self, expected, mock_call): - def _get_patch_param(call): - try: - if isinstance(call[0][1], list): - return call[0][1] - except IndexError: - pass - return call[0][0] - - actual = sum(map(_get_patch_param, mock_call.call_args_list), []) - self.assertPatchEqual(actual, expected) - - -class InventoryTest(BaseTest): - def setUp(self): - super(InventoryTest, self).setUp() - # Prepare some realistic inventory - # https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa - self.bmc_address = '1.2.3.4' - self.macs = ( - ['11:22:33:44:55:66', '66:55:44:33:22:11', '7c:fe:90:29:26:52']) - self.ips = ['1.2.1.2', '1.2.1.1', '1.2.1.3'] - self.inactive_mac = '12:12:21:12:21:12' - self.pxe_mac = self.macs[0] - self.all_macs = self.macs + [self.inactive_mac] - self.pxe_iface_name = 'eth1' - self.client_id = ( - 'ff:00:00:00:00:00:02:00:00:02:c9:00:7c:fe:90:03:00:29:26:52') - self.valid_interfaces = { - self.pxe_iface_name: {'ip': self.ips[0], 'mac': self.macs[0], - 'client_id': None, 'pxe': True}, - 'ib0': {'ip': self.ips[2], 'mac': self.macs[2], - 'client_id': self.client_id, 'pxe': False} - } - self.data = { - 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), - 'inventory': { - 'interfaces': [ - {'name': 'eth1', 'mac_address': self.macs[0], - 'ipv4_address': self.ips[0], - 'lldp': [ - [1, "04112233aabbcc"], - [2, "07373334"], - [3, "003c"]]}, - {'name': 'eth2', 'mac_address': self.inactive_mac}, - {'name': 'eth3', 'mac_address': self.macs[1], - 'ipv4_address': self.ips[1]}, - {'name': 'ib0', 'mac_address': self.macs[2], - 'ipv4_address': self.ips[2], - 'client_id': self.client_id} - ], - 'disks': [ - {'name': '/dev/sda', 'model': 'Big Data Disk', - 'size': 1000 * units.Gi}, - {'name': '/dev/sdb', 'model': 'Small OS Disk', - 'size': 20 * units.Gi}, - ], - 'cpu': { - 'count': 4, - 'architecture': 'x86_64' - }, - 'memory': { - 'physical_mb': 12288 - }, - 'bmc_address': self.bmc_address - }, - 'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk', - 'size': 1000 * units.Gi, - 'wwn': None}, - 'interfaces': self.valid_interfaces, - } - self.inventory = self.data['inventory'] - self.all_interfaces = { - 'eth1': {'mac': self.macs[0], 'ip': self.ips[0], - 'client_id': None, 'pxe': True}, - 'eth2': {'mac': self.inactive_mac, 'ip': None, - 'client_id': None, 'pxe': False}, - 'eth3': {'mac': self.macs[1], 'ip': self.ips[1], - 'client_id': None, 'pxe': False}, - 'ib0': {'mac': self.macs[2], 'ip': self.ips[2], - 'client_id': self.client_id, 'pxe': False} - } - self.active_interfaces = { - name: data - for (name, data) in self.all_interfaces.items() - if data.get('ip') - } - self.pxe_interfaces = { - self.pxe_iface_name: self.all_interfaces[self.pxe_iface_name] - } - - -class NodeTest(InventoryTest): - def setUp(self): - super(NodeTest, self).setUp() - self.uuid = uuidutils.generate_uuid() - fake_node = { - 'driver': 'pxe_ipmitool', - 'driver_info': {'ipmi_address': self.bmc_address}, - 'properties': {'cpu_arch': 'i386', 'local_gb': 40}, - 'uuid': self.uuid, - 'power_state': 'power on', - 'provision_state': 'inspecting', - 'extra': {}, - 'instance_uuid': None, - 'maintenance': False - } - mock_to_dict = mock.Mock(return_value=fake_node) - - self.node = mock.Mock(**fake_node) - self.node.to_dict = mock_to_dict - - self.ports = [] - self.node_info = node_cache.NodeInfo( - uuid=self.uuid, - started_at=datetime.datetime(1, 1, 1), - node=self.node, ports=self.ports) - self.node_info.node = mock.Mock(return_value=self.node) - self.sleep_fixture = self.useFixture( - fixtures.MockPatchObject(time, 'sleep', autospec=True)) - - -class NodeStateTest(NodeTest): - def setUp(self): - super(NodeStateTest, self).setUp() - self.node_info._version_id = uuidutils.generate_uuid() - self.node_info._state = istate.States.starting - self.db_node = db.Node(uuid=self.node_info.uuid, - version_id=self.node_info._version_id, - state=self.node_info._state, - started_at=self.node_info.started_at, - finished_at=self.node_info.finished_at, - error=self.node_info.error) - self.db_node.save(self.session) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py deleted file mode 100644 index 3ce0baa..0000000 --- a/ironic_inspector/test/functional.py +++ /dev/null @@ -1,767 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import eventlet # noqa -eventlet.monkey_patch() - -import contextlib -import copy -import datetime -import json -import os -import tempfile -import time -import unittest - -import fixtures -import mock -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_utils import timeutils -from oslo_utils import uuidutils -import pytz -import requests -import six -from six.moves import urllib - -from ironic_inspector.cmd import all as inspector_cmd -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.common import swift -from ironic_inspector import db -from ironic_inspector import dbsync -from ironic_inspector import introspection_state as istate -from ironic_inspector import main -from ironic_inspector import node_cache -from ironic_inspector import rules -from ironic_inspector.test import base -from ironic_inspector.test.unit import test_rules - - -CONF = """ -[ironic] -os_auth_url = http://url -os_username = user -os_password = password -os_tenant_name = tenant -[firewall] -manage_firewall = False -[DEFAULT] -debug = True -auth_strategy = noauth -introspection_delay = 0 -[database] -connection = sqlite:///%(db_file)s -[processing] -processing_hooks=$default_processing_hooks,lldp_basic -""" - - -DEFAULT_SLEEP = 2 -TEST_CONF_FILE = None - - -def get_test_conf_file(): - global TEST_CONF_FILE - if not TEST_CONF_FILE: - d = tempfile.mkdtemp() - TEST_CONF_FILE = os.path.join(d, 'test.conf') - db_file = os.path.join(d, 'test.db') - with open(TEST_CONF_FILE, 'wb') as fp: - content = CONF % {'db_file': db_file} - fp.write(content.encode('utf-8')) - return TEST_CONF_FILE - - -def get_error(response): - return response.json()['error']['message'] - - -def _query_string(*field_names): - def outer(func): - @six.wraps(func) - def inner(*args, **kwargs): - queries = [] - for field_name in field_names: - field = kwargs.pop(field_name, None) - if field is not None: - queries.append('%s=%s' % (field_name, field)) - - query_string = '&'.join(queries) - if query_string: - query_string = '?' + query_string - return func(*args, query_string=query_string, **kwargs) - return inner - return outer - - -class Base(base.NodeTest): - ROOT_URL = 'http://127.0.0.1:5050' - IS_FUNCTIONAL = True - - def setUp(self): - super(Base, self).setUp() - rules.delete_all() - - self.cli_fixture = self.useFixture( - fixtures.MockPatchObject(ir_utils, 'get_client')) - self.cli = self.cli_fixture.mock.return_value - self.cli.node.get.return_value = self.node - self.cli.node.update.return_value = self.node - self.cli.node.list.return_value = [self.node] - - self.patch = [ - {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, - {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} - ] - self.patch_root_hints = [ - {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, - {'path': '/properties/local_gb', 'value': '19', 'op': 'add'} - ] - - self.node.power_state = 'power off' - - self.cfg = self.useFixture(config_fixture.Config()) - conf_file = get_test_conf_file() - self.cfg.set_config_files([conf_file]) - - def tearDown(self): - super(Base, self).tearDown() - node_cache._delete_node(self.uuid) - - def call(self, method, endpoint, data=None, expect_error=None, - api_version=None): - if data is not None: - data = json.dumps(data) - endpoint = self.ROOT_URL + endpoint - headers = {'X-Auth-Token': 'token'} - if api_version: - headers[main._VERSION_HEADER] = '%d.%d' % api_version - res = getattr(requests, method.lower())(endpoint, data=data, - headers=headers) - if expect_error: - self.assertEqual(expect_error, res.status_code) - else: - if res.status_code >= 400: - msg = ('%(meth)s %(url)s failed with code %(code)s: %(msg)s' % - {'meth': method.upper(), 'url': endpoint, - 'code': res.status_code, 'msg': get_error(res)}) - raise AssertionError(msg) - return res - - def call_introspect(self, uuid, **kwargs): - endpoint = '/v1/introspection/%s' % uuid - return self.call('post', endpoint, **kwargs) - - def call_get_status(self, uuid, **kwargs): - return self.call('get', '/v1/introspection/%s' % uuid, **kwargs).json() - - @_query_string('marker', 'limit') - def call_get_statuses(self, query_string='', **kwargs): - path = '/v1/introspection' - return self.call('get', path + query_string, **kwargs).json() - - def call_abort_introspect(self, uuid, **kwargs): - return self.call('post', '/v1/introspection/%s/abort' % uuid, **kwargs) - - def call_reapply(self, uuid, **kwargs): - return self.call('post', '/v1/introspection/%s/data/unprocessed' % - uuid, **kwargs) - - def call_continue(self, data, **kwargs): - return self.call('post', '/v1/continue', data=data, **kwargs).json() - - def call_add_rule(self, data, **kwargs): - return self.call('post', '/v1/rules', data=data, **kwargs).json() - - def call_list_rules(self, **kwargs): - return self.call('get', '/v1/rules', **kwargs).json()['rules'] - - def call_delete_rules(self, **kwargs): - self.call('delete', '/v1/rules', **kwargs) - - def call_delete_rule(self, uuid, **kwargs): - self.call('delete', '/v1/rules/' + uuid, **kwargs) - - def call_get_rule(self, uuid, **kwargs): - return self.call('get', '/v1/rules/' + uuid, **kwargs).json() - - def _fake_status(self, finished=mock.ANY, state=mock.ANY, error=mock.ANY, - started_at=mock.ANY, finished_at=mock.ANY, - links=mock.ANY): - return {'uuid': self.uuid, 'finished': finished, 'error': error, - 'state': state, 'finished_at': finished_at, - 'started_at': started_at, - 'links': [{u'href': u'%s/v1/introspection/%s' % (self.ROOT_URL, - self.uuid), - u'rel': u'self'}]} - - def check_status(self, status, finished, state, error=None): - self.assertEqual( - self._fake_status(finished=finished, - state=state, - finished_at=finished and mock.ANY or None, - error=error), - status - ) - curr_time = datetime.datetime.fromtimestamp( - time.time(), tz=pytz.timezone(time.tzname[0])) - started_at = timeutils.parse_isotime(status['started_at']) - self.assertLess(started_at, curr_time) - if finished: - finished_at = timeutils.parse_isotime(status['finished_at']) - self.assertLess(started_at, finished_at) - self.assertLess(finished_at, curr_time) - else: - self.assertIsNone(status['finished_at']) - - def db_row(self): - """return database row matching self.uuid.""" - return db.model_query(db.Node).get(self.uuid) - - -class Test(Base): - def test_bmc(self): - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.cli.node.update.assert_called_once_with(self.uuid, mock.ANY) - self.assertCalledWithPatch(self.patch, self.cli.node.update) - self.cli.port.create.assert_called_once_with( - node_uuid=self.uuid, address='11:22:33:44:55:66', extra={}, - pxe_enabled=True) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - def test_port_creation_update_and_deletion(self): - cfg.CONF.set_override('add_ports', 'active', 'processing') - cfg.CONF.set_override('keep_ports', 'added', 'processing') - - uuid_to_delete = uuidutils.generate_uuid() - uuid_to_update = uuidutils.generate_uuid() - # Two ports already exist: one with incorrect pxe_enabled, the other - # should be deleted. - self.cli.node.list_ports.return_value = [ - mock.Mock(address=self.macs[1], uuid=uuid_to_update, - node_uuid=self.uuid, extra={}, pxe_enabled=True), - mock.Mock(address='foobar', uuid=uuid_to_delete, - node_uuid=self.uuid, extra={}, pxe_enabled=True), - ] - # Two more ports are created, one with client_id. Make sure the - # returned object has the same properties as requested in create(). - self.cli.port.create.side_effect = mock.Mock - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.cli.node.update.assert_called_once_with(self.uuid, mock.ANY) - self.assertCalledWithPatch(self.patch, self.cli.node.update) - calls = [ - mock.call(node_uuid=self.uuid, address=self.macs[0], - extra={}, pxe_enabled=True), - mock.call(node_uuid=self.uuid, address=self.macs[2], - extra={'client-id': self.client_id}, pxe_enabled=False), - ] - self.cli.port.create.assert_has_calls(calls, any_order=True) - self.cli.port.delete.assert_called_once_with(uuid_to_delete) - self.cli.port.update.assert_called_once_with( - uuid_to_update, - [{'op': 'replace', 'path': '/pxe_enabled', 'value': False}]) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - def test_introspection_statuses(self): - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - # NOTE(zhenguo): only test finished=False here, as we don't know - # other nodes status in this thread. - statuses = self.call_get_statuses().get('introspection') - self.assertIn(self._fake_status(finished=False), statuses) - - # check we've got 1 status with a limit of 1 - statuses = self.call_get_statuses(limit=1).get('introspection') - self.assertEqual(1, len(statuses)) - - all_statuses = self.call_get_statuses().get('introspection') - marker_statuses = self.call_get_statuses( - marker=self.uuid, limit=1).get('introspection') - marker_index = all_statuses.index(self.call_get_status(self.uuid)) - # marker is the last row on previous page - self.assertEqual(all_statuses[marker_index+1:marker_index+2], - marker_statuses) - - self.call_continue(self.data) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - # fetch all statuses and db nodes to assert pagination - statuses = self.call_get_statuses().get('introspection') - nodes = db.model_query(db.Node).order_by( - db.Node.started_at.desc()).all() - - # assert ordering - self.assertEqual([node.uuid for node in nodes], - [status_.get('uuid') for status_ in statuses]) - - # assert pagination - half = len(nodes) // 2 - marker = nodes[half].uuid - statuses = self.call_get_statuses(marker=marker).get('introspection') - self.assertEqual([node.uuid for node in nodes[half + 1:]], - [status_.get('uuid') for status_ in statuses]) - - # assert status links work - self.assertEqual([self.call_get_status(status_.get('uuid')) - for status_ in statuses], - [self.call('GET', urllib.parse.urlparse( - status_.get('links')[0].get('href')).path).json() - for status_ in statuses]) - - def test_rules_api(self): - res = self.call_list_rules() - self.assertEqual([], res) - - rule = { - 'conditions': [ - {'op': 'eq', 'field': 'memory_mb', 'value': 1024}, - ], - 'actions': [{'action': 'fail', 'message': 'boom'}], - 'description': 'Cool actions' - } - - res = self.call_add_rule(rule) - self.assertTrue(res['uuid']) - rule['uuid'] = res['uuid'] - rule['links'] = res['links'] - rule['conditions'] = [ - test_rules.BaseTest.condition_defaults(rule['conditions'][0]), - ] - self.assertEqual(rule, res) - - res = self.call('get', rule['links'][0]['href']).json() - self.assertEqual(rule, res) - - res = self.call_list_rules() - self.assertEqual(rule['links'], res[0].pop('links')) - self.assertEqual([{'uuid': rule['uuid'], - 'description': 'Cool actions'}], - res) - - res = self.call_get_rule(rule['uuid']) - self.assertEqual(rule, res) - - self.call_delete_rule(rule['uuid']) - res = self.call_list_rules() - self.assertEqual([], res) - - links = rule.pop('links') - del rule['uuid'] - for _ in range(3): - self.call_add_rule(rule) - - res = self.call_list_rules() - self.assertEqual(3, len(res)) - - self.call_delete_rules() - res = self.call_list_rules() - self.assertEqual([], res) - - self.call('get', links[0]['href'], expect_error=404) - self.call('delete', links[0]['href'], expect_error=404) - - def test_introspection_rules(self): - self.node.extra['bar'] = 'foo' - rules = [ - { - 'conditions': [ - {'field': 'memory_mb', 'op': 'eq', 'value': 12288}, - {'field': 'local_gb', 'op': 'gt', 'value': 998}, - {'field': 'local_gb', 'op': 'lt', 'value': 1000}, - {'field': 'local_gb', 'op': 'matches', 'value': '[0-9]+'}, - {'field': 'cpu_arch', 'op': 'contains', 'value': '[0-9]+'}, - {'field': 'root_disk.wwn', 'op': 'is-empty'}, - {'field': 'inventory.interfaces[*].ipv4_address', - 'op': 'contains', 'value': r'127\.0\.0\.1', - 'invert': True, 'multiple': 'all'}, - {'field': 'i.do.not.exist', 'op': 'is-empty'}, - ], - 'actions': [ - {'action': 'set-attribute', 'path': '/extra/foo', - 'value': 'bar'} - ] - }, - { - 'conditions': [ - {'field': 'memory_mb', 'op': 'ge', 'value': 100500}, - ], - 'actions': [ - {'action': 'set-attribute', 'path': '/extra/bar', - 'value': 'foo'}, - {'action': 'fail', 'message': 'boom'} - ] - } - ] - for rule in rules: - self.call_add_rule(rule) - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.call_continue(self.data) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.cli.node.update.assert_any_call( - self.uuid, - [{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}]) - - def test_conditions_scheme_actions_path(self): - rules = [ - { - 'conditions': [ - {'field': 'node://properties.local_gb', 'op': 'eq', - 'value': 40}, - {'field': 'node://driver_info.ipmi_address', 'op': 'eq', - 'value': self.bmc_address}, - ], - 'actions': [ - {'action': 'set-attribute', 'path': '/extra/foo', - 'value': 'bar'} - ] - }, - { - 'conditions': [ - {'field': 'data://inventory.cpu.count', 'op': 'eq', - 'value': self.data['inventory']['cpu']['count']}, - ], - 'actions': [ - {'action': 'set-attribute', - 'path': '/driver_info/ipmi_address', - 'value': '{data[inventory][bmc_address]}'} - ] - } - ] - for rule in rules: - self.call_add_rule(rule) - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.call_continue(self.data) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.cli.node.update.assert_any_call( - self.uuid, - [{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}]) - - self.cli.node.update.assert_any_call( - self.uuid, - [{'op': 'add', 'path': '/driver_info/ipmi_address', - 'value': self.data['inventory']['bmc_address']}]) - - def test_root_device_hints(self): - self.node.properties['root_device'] = {'size': 20} - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.assertCalledWithPatch(self.patch_root_hints, self.cli.node.update) - self.cli.port.create.assert_called_once_with( - node_uuid=self.uuid, address='11:22:33:44:55:66', extra={}, - pxe_enabled=True) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - def test_abort_introspection(self): - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_abort_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.assertEqual(202, res.status_code) - status = self.call_get_status(self.uuid) - self.assertTrue(status['finished']) - self.assertEqual('Canceled by operator', status['error']) - - # Note(mkovacik): we're checking just this doesn't pass OK as - # there might be either a race condition (hard to test) that - # yields a 'Node already finished.' or an attribute-based - # look-up error from some pre-processing hooks because - # node_info.finished() deletes the look-up attributes only - # after releasing the node lock - self.call('post', '/v1/continue', self.data, expect_error=400) - - @mock.patch.object(swift, 'store_introspection_data', autospec=True) - @mock.patch.object(swift, 'get_introspection_data', autospec=True) - def test_stored_data_processing(self, get_mock, store_mock): - cfg.CONF.set_override('store_data', 'swift', 'processing') - - # ramdisk data copy - # please mind the data is changed during processing - ramdisk_data = json.dumps(copy.deepcopy(self.data)) - get_mock.return_value = ramdisk_data - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - status = self.call_get_status(self.uuid) - inspect_started_at = timeutils.parse_isotime(status['started_at']) - self.check_status(status, finished=True, state=istate.States.finished) - - res = self.call_reapply(self.uuid) - self.assertEqual(202, res.status_code) - self.assertEqual('', res.text) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - # checks the started_at updated in DB is correct - reapply_started_at = timeutils.parse_isotime(status['started_at']) - self.assertLess(inspect_started_at, reapply_started_at) - - # reapply request data - get_mock.assert_called_once_with(self.uuid, - suffix='UNPROCESSED') - - # store ramdisk data, store processing result data, store - # reapply processing result data; the ordering isn't - # guaranteed as store ramdisk data runs in a background - # thread; hower, last call has to always be reapply processing - # result data - store_ramdisk_call = mock.call(mock.ANY, self.uuid, - suffix='UNPROCESSED') - store_processing_call = mock.call(mock.ANY, self.uuid, - suffix=None) - self.assertEqual(3, len(store_mock.call_args_list)) - self.assertIn(store_ramdisk_call, - store_mock.call_args_list[0:2]) - self.assertIn(store_processing_call, - store_mock.call_args_list[0:2]) - self.assertEqual(store_processing_call, - store_mock.call_args_list[2]) - - # second reapply call - get_mock.return_value = ramdisk_data - res = self.call_reapply(self.uuid) - self.assertEqual(202, res.status_code) - self.assertEqual('', res.text) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - # reapply saves the result - self.assertEqual(4, len(store_mock.call_args_list)) - self.assertEqual(store_processing_call, - store_mock.call_args_list[-1]) - - @mock.patch.object(swift, 'store_introspection_data', autospec=True) - @mock.patch.object(swift, 'get_introspection_data', autospec=True) - def test_edge_state_transitions(self, get_mock, store_mock): - """Assert state transitions work as expected in edge conditions.""" - cfg.CONF.set_override('store_data', 'swift', 'processing') - - # ramdisk data copy - # please mind the data is changed during processing - ramdisk_data = json.dumps(copy.deepcopy(self.data)) - get_mock.return_value = ramdisk_data - - # multiple introspect calls - self.call_introspect(self.uuid) - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - # an error -start-> starting state transition is possible - self.call_abort_introspect(self.uuid) - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - # double abort works - self.call_abort_introspect(self.uuid) - status = self.call_get_status(self.uuid) - error = status['error'] - self.check_status(status, finished=True, state=istate.States.error, - error=error) - self.call_abort_introspect(self.uuid) - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.error, - error=error) - - # preventing stale data race condition - # waiting -> processing is a strict state transition - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - row = self.db_row() - row.state = istate.States.processing - with db.ensure_transaction() as session: - row.save(session) - self.call_continue(self.data, expect_error=400) - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.error, - error=mock.ANY) - self.assertIn('no defined transition', status['error']) - # multiple reapply calls - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.call_continue(self.data) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.call_reapply(self.uuid) - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished, - error=None) - self.call_reapply(self.uuid) - # assert an finished -reapply-> reapplying -> finished state transition - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished, - error=None) - - def test_without_root_disk(self): - del self.data['root_disk'] - self.inventory['disks'] = [] - self.patch[-1] = {'path': '/properties/local_gb', - 'value': '0', 'op': 'add'} - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.cli.node.update.assert_called_once_with(self.uuid, mock.ANY) - self.assertCalledWithPatch(self.patch, self.cli.node.update) - self.cli.port.create.assert_called_once_with( - node_uuid=self.uuid, extra={}, address='11:22:33:44:55:66', - pxe_enabled=True) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - @mock.patch.object(swift, 'store_introspection_data', autospec=True) - @mock.patch.object(swift, 'get_introspection_data', autospec=True) - def test_lldp_plugin(self, get_mock, store_mock): - cfg.CONF.set_override('store_data', 'swift', 'processing') - - ramdisk_data = json.dumps(copy.deepcopy(self.data)) - get_mock.return_value = ramdisk_data - - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=False, state=istate.States.waiting) - - res = self.call_continue(self.data) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - status = self.call_get_status(self.uuid) - self.check_status(status, finished=True, state=istate.States.finished) - - # Verify that the lldp_processed data is written to swift - # as expected by the lldp plugin - updated_data = store_mock.call_args[0][0] - lldp_out = updated_data['all_interfaces']['eth1'] - - expected_chassis_id = "11:22:33:aa:bb:cc" - expected_port_id = "734" - self.assertEqual(expected_chassis_id, - lldp_out['lldp_processed']['switch_chassis_id']) - self.assertEqual(expected_port_id, - lldp_out['lldp_processed']['switch_port_id']) - - -@contextlib.contextmanager -def mocked_server(): - conf_file = get_test_conf_file() - dbsync.main(args=['--config-file', conf_file, 'upgrade']) - - cfg.CONF.reset() - cfg.CONF.unregister_opt(dbsync.command_opt) - - eventlet.greenthread.spawn_n(inspector_cmd.main, - args=['--config-file', conf_file]) - eventlet.greenthread.sleep(1) - # Wait for service to start up to 30 seconds - for i in range(10): - try: - requests.get('http://127.0.0.1:5050/v1') - except requests.ConnectionError: - if i == 9: - raise - print('Service did not start yet') - eventlet.greenthread.sleep(3) - else: - break - # start testing - yield - # Make sure all processes finished executing - eventlet.greenthread.sleep(1) - - -if __name__ == '__main__': - with mocked_server(): - unittest.main(verbosity=2) diff --git a/ironic_inspector/test/inspector_tempest_plugin/README.rst b/ironic_inspector/test/inspector_tempest_plugin/README.rst deleted file mode 100644 index 5ccb57d..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/README.rst +++ /dev/null @@ -1,18 +0,0 @@ -======================================= -Tempest Integration of ironic-inspector -======================================= - -This directory contains Tempest tests to cover the ironic-inspector project. - -It uses tempest plugin to automatically load these tests into tempest. More -information about tempest plugin could be found here: -`Plugin `_ - -The legacy method of running Tempest is to just treat the Tempest source code -as a python unittest: -`Run tests `_ - -There is also tox configuration for tempest, use following regex for running -introspection tests:: - - $ tox -e all-plugin -- inspector_tempest_plugin diff --git a/ironic_inspector/test/inspector_tempest_plugin/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/inspector_tempest_plugin/config.py b/ironic_inspector/test/inspector_tempest_plugin/config.py deleted file mode 100644 index e586900..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/config.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -service_option = cfg.BoolOpt("ironic-inspector", - default=True, - help="Whether or not ironic-inspector is expected" - " to be available") - -baremetal_introspection_group = cfg.OptGroup( - name="baremetal_introspection", - title="Baremetal introspection service options", - help="When enabling baremetal introspection tests," - "Ironic must be configured.") - -BaremetalIntrospectionGroup = [ - cfg.StrOpt('catalog_type', - default='baremetal-introspection', - help="Catalog type of the baremetal provisioning service"), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the baremetal introspection" - " service"), - cfg.IntOpt('introspection_sleep', - default=30, - help="Introspection sleep before check status"), - cfg.IntOpt('introspection_timeout', - default=600, - help="Introspection time out"), - cfg.IntOpt('hypervisor_update_sleep', - default=60, - help="Time to wait until nova becomes aware of " - "bare metal instances"), - cfg.IntOpt('hypervisor_update_timeout', - default=300, - help="Time out for wait until nova becomes aware of " - "bare metal instances"), - # NOTE(aarefiev): status_check_period default is 60s, but checking - # node state takes some time(API call), so races appear here, - # 80s would be enough to make one more check. - cfg.IntOpt('ironic_sync_timeout', - default=80, - help="Time it might take for Ironic--Inspector " - "sync to happen"), - cfg.IntOpt('discovery_timeout', - default=300, - help="Time to wait until new node would enrolled in " - "ironic"), - cfg.BoolOpt('auto_discovery_feature', - default=False, - help="Is the auto-discovery feature enabled. Enroll hook " - "should be specified in node_not_found_hook - processing " - "section of inspector.conf"), -] diff --git a/ironic_inspector/test/inspector_tempest_plugin/exceptions.py b/ironic_inspector/test/inspector_tempest_plugin/exceptions.py deleted file mode 100644 index ac08d54..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/exceptions.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.lib import exceptions - - -class IntrospectionFailed(exceptions.TempestException): - message = "Introspection failed" - - -class IntrospectionTimeout(exceptions.TempestException): - message = "Introspection time out" - - -class HypervisorUpdateTimeout(exceptions.TempestException): - message = "Hypervisor stats update time out" diff --git a/ironic_inspector/test/inspector_tempest_plugin/plugin.py b/ironic_inspector/test/inspector_tempest_plugin/plugin.py deleted file mode 100644 index 0428c7d..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/plugin.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest.test_discover import plugins - -from ironic_inspector.test.inspector_tempest_plugin import config - - -class InspectorTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(__file__)))[0] - test_dir = "inspector_tempest_plugin/tests" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_opt(config.service_option, - group='service_available') - conf.register_group(config.baremetal_introspection_group) - conf.register_opts(config.BaremetalIntrospectionGroup, - group="baremetal_introspection") - - def get_opt_lists(self): - return [ - (config.baremetal_introspection_group.name, - config.BaremetalIntrospectionGroup), - ('service_available', [config.service_option]) - ] diff --git a/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json b/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json deleted file mode 100644 index f1cfb0b..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json +++ /dev/null @@ -1,25 +0,0 @@ -[ - { - "description": "Successful Rule", - "conditions": [ - {"op": "ge", "field": "memory_mb", "value": 256}, - {"op": "ge", "field": "local_gb", "value": 1} - ], - "actions": [ - {"action": "set-attribute", "path": "/extra/rule_success", - "value": "yes"} - ] - }, - { - "description": "Failing Rule", - "conditions": [ - {"op": "lt", "field": "memory_mb", "value": 42}, - {"op": "eq", "field": "local_gb", "value": 0} - ], - "actions": [ - {"action": "set-attribute", "path": "/extra/rule_success", - "value": "no"}, - {"action": "fail", "message": "This rule should not have run"} - ] - } -] diff --git a/ironic_inspector/test/inspector_tempest_plugin/services/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/services/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py b/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py deleted file mode 100644 index 3b1a75b..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ironic_tempest_plugin.services.baremetal import base -from tempest import clients -from tempest.common import credentials_factory as common_creds -from tempest import config - - -CONF = config.CONF -ADMIN_CREDS = common_creds.get_configured_admin_credentials() - - -class Manager(clients.Manager): - def __init__(self, - credentials=ADMIN_CREDS, - api_microversions=None): - super(Manager, self).__init__(credentials) - self.introspection_client = BaremetalIntrospectionClient( - self.auth_provider, - CONF.baremetal_introspection.catalog_type, - CONF.identity.region, - endpoint_type=CONF.baremetal_introspection.endpoint_type) - - -class BaremetalIntrospectionClient(base.BaremetalClient): - """Base Tempest REST client for Ironic Inspector API v1.""" - version = '1' - uri_prefix = 'v1' - - @base.handle_errors - def purge_rules(self): - """Purge all existing rules.""" - return self._delete_request('rules', uuid=None) - - @base.handle_errors - def create_rules(self, rules): - """Create introspection rules.""" - if not isinstance(rules, list): - rules = [rules] - for rule in rules: - self._create_request('rules', rule) - - @base.handle_errors - def get_status(self, uuid): - """Get introspection status for a node.""" - return self._show_request('introspection', uuid=uuid) - - @base.handle_errors - def get_data(self, uuid): - """Get introspection data for a node.""" - return self._show_request('introspection', uuid=uuid, - uri='/%s/introspection/%s/data' % - (self.uri_prefix, uuid)) - - @base.handle_errors - def start_introspection(self, uuid): - """Start introspection for a node.""" - resp, _body = self.post(url=('/%s/introspection/%s' % - (self.uri_prefix, uuid)), - body=None) - self.expected_success(202, resp.status) - - return resp - - @base.handle_errors - def abort_introspection(self, uuid): - """Abort introspection for a node.""" - resp, _body = self.post(url=('/%s/introspection/%s/abort' % - (self.uri_prefix, uuid)), - body=None) - self.expected_success(202, resp.status) - - return resp diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py deleted file mode 100644 index 343ac9f..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py +++ /dev/null @@ -1,244 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import time - -import six -import tempest -from tempest import config -from tempest.lib.common.api_version_utils import LATEST_MICROVERSION -from tempest.lib.common.utils import test_utils -from tempest.lib import exceptions as lib_exc - -from ironic_inspector.test.inspector_tempest_plugin import exceptions -from ironic_inspector.test.inspector_tempest_plugin.services import \ - introspection_client -from ironic_tempest_plugin.tests.api.admin.api_microversion_fixture import \ - APIMicroversionFixture as IronicMicroversionFixture -from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ - BaremetalProvisionStates -from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ - BaremetalScenarioTest - - -CONF = config.CONF - - -class InspectorScenarioTest(BaremetalScenarioTest): - """Provide harness to do Inspector scenario tests.""" - - wait_provisioning_state_interval = 15 - - credentials = ['primary', 'admin'] - - ironic_api_version = LATEST_MICROVERSION - - @classmethod - def setup_clients(cls): - super(InspectorScenarioTest, cls).setup_clients() - inspector_manager = introspection_client.Manager() - cls.introspection_client = inspector_manager.introspection_client - - def setUp(self): - super(InspectorScenarioTest, self).setUp() - # we rely on the 'available' provision_state; using latest - # microversion - self.useFixture(IronicMicroversionFixture(self.ironic_api_version)) - self.flavor = self.baremetal_flavor() - self.node_ids = {node['uuid'] for node in - self.node_filter(filter=lambda node: - node['provision_state'] == - BaremetalProvisionStates.AVAILABLE)} - self.rule_purge() - - def item_filter(self, list_method, show_method, - filter=lambda item: True, items=None): - if items is None: - items = [show_method(item['uuid']) for item in - list_method()] - return [item for item in items if filter(item)] - - def node_list(self): - return self.baremetal_client.list_nodes()[1]['nodes'] - - def node_port_list(self, node_uuid): - return self.baremetal_client.list_node_ports(node_uuid)[1]['ports'] - - def node_update(self, uuid, patch): - return self.baremetal_client.update_node(uuid, **patch) - - def node_show(self, uuid): - return self.baremetal_client.show_node(uuid)[1] - - def node_delete(self, uuid): - return self.baremetal_client.delete_node(uuid) - - def node_filter(self, filter=lambda node: True, nodes=None): - return self.item_filter(self.node_list, self.node_show, - filter=filter, items=nodes) - - def node_set_power_state(self, uuid, state): - self.baremetal_client.set_node_power_state(uuid, state) - - def node_set_provision_state(self, uuid, state): - self.baremetal_client.set_node_provision_state(self, uuid, state) - - def hypervisor_stats(self): - return (self.admin_manager.hypervisor_client. - show_hypervisor_statistics()) - - def server_show(self, uuid): - self.servers_client.show_server(uuid) - - def rule_purge(self): - self.introspection_client.purge_rules() - - def rule_import(self, rule_path): - with open(rule_path, 'r') as fp: - rules = json.load(fp) - self.introspection_client.create_rules(rules) - - def rule_import_from_dict(self, rules): - self.introspection_client.create_rules(rules) - - def introspection_status(self, uuid): - return self.introspection_client.get_status(uuid)[1] - - def introspection_data(self, uuid): - return self.introspection_client.get_data(uuid)[1] - - def introspection_start(self, uuid): - return self.introspection_client.start_introspection(uuid) - - def introspection_abort(self, uuid): - return self.introspection_client.abort_introspection(uuid) - - def baremetal_flavor(self): - flavor_id = CONF.compute.flavor_ref - flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] - flavor['properties'] = self.flavors_client.list_flavor_extra_specs( - flavor_id)['extra_specs'] - return flavor - - def get_rule_path(self, rule_file): - base_path = os.path.split( - os.path.dirname(os.path.abspath(__file__)))[0] - base_path = os.path.split(base_path)[0] - return os.path.join(base_path, "inspector_tempest_plugin", - "rules", rule_file) - - def boot_instance(self): - return super(InspectorScenarioTest, self).boot_instance() - - def terminate_instance(self, instance): - return super(InspectorScenarioTest, self).terminate_instance(instance) - - def wait_for_node(self, node_name): - def check_node(): - try: - self.node_show(node_name) - except lib_exc.NotFound: - return False - return True - - if not test_utils.call_until_true( - check_node, - duration=CONF.baremetal_introspection.discovery_timeout, - sleep_for=20): - msg = ("Timed out waiting for node %s " % node_name) - raise lib_exc.TimeoutException(msg) - - inspected_node = self.node_show(self.node_info['name']) - self.wait_for_introspection_finished(inspected_node['uuid']) - - # TODO(aarefiev): switch to call_until_true - def wait_for_introspection_finished(self, node_ids): - """Waits for introspection of baremetal nodes to finish. - - """ - if isinstance(node_ids, six.text_type): - node_ids = [node_ids] - start = int(time.time()) - not_introspected = {node_id for node_id in node_ids} - - while not_introspected: - time.sleep(CONF.baremetal_introspection.introspection_sleep) - for node_id in node_ids: - status = self.introspection_status(node_id) - if status['finished']: - if status['error']: - message = ('Node %(node_id)s introspection failed ' - 'with %(error)s.' % - {'node_id': node_id, - 'error': status['error']}) - raise exceptions.IntrospectionFailed(message) - not_introspected = not_introspected - {node_id} - - if (int(time.time()) - start >= - CONF.baremetal_introspection.introspection_timeout): - message = ('Introspection timed out for nodes: %s' % - not_introspected) - raise exceptions.IntrospectionTimeout(message) - - def wait_for_nova_aware_of_bvms(self): - start = int(time.time()) - while True: - time.sleep(CONF.baremetal_introspection.hypervisor_update_sleep) - stats = self.hypervisor_stats() - expected_cpus = self.baremetal_flavor()['vcpus'] - if int(stats['hypervisor_statistics']['vcpus']) >= expected_cpus: - break - - timeout = CONF.baremetal_introspection.hypervisor_update_timeout - if (int(time.time()) - start >= timeout): - message = ( - 'Timeout while waiting for nova hypervisor-stats: ' - '%(stats)s required time (%(timeout)s s).' % - {'stats': stats, - 'timeout': timeout}) - raise exceptions.HypervisorUpdateTimeout(message) - - def node_cleanup(self, node_id): - if (self.node_show(node_id)['provision_state'] == - BaremetalProvisionStates.AVAILABLE): - return - # in case when introspection failed we need set provision state - # to 'manage' to make it possible transit into 'provide' state - if self.node_show(node_id)['provision_state'] == 'inspect failed': - self.baremetal_client.set_node_provision_state(node_id, 'manage') - - try: - self.baremetal_client.set_node_provision_state(node_id, 'provide') - except tempest.lib.exceptions.RestClientException: - # maybe node already cleaning or available - pass - - self.wait_provisioning_state( - node_id, [BaremetalProvisionStates.AVAILABLE, - BaremetalProvisionStates.NOSTATE], - timeout=CONF.baremetal.unprovision_timeout, - interval=self.wait_provisioning_state_interval) - - def introspect_node(self, node_id, remove_props=True): - if remove_props: - # in case there are properties remove those - patch = {('properties/%s' % key): None for key in - self.node_show(node_id)['properties']} - # reset any previous rule result - patch['extra/rule_success'] = None - self.node_update(node_id, patch) - - self.baremetal_client.set_node_provision_state(node_id, 'manage') - self.baremetal_client.set_node_provision_state(node_id, 'inspect') - self.addCleanup(self.node_cleanup, node_id) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py deleted file mode 100644 index bae615e..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py +++ /dev/null @@ -1,175 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.config import CONF -from tempest.lib import decorators -from tempest import test # noqa - -from ironic_inspector.test.inspector_tempest_plugin.tests import manager -from ironic_tempest_plugin.tests.scenario import baremetal_manager - - -class InspectorBasicTest(manager.InspectorScenarioTest): - - def verify_node_introspection_data(self, node): - self.assertEqual('yes', node['extra']['rule_success']) - data = self.introspection_data(node['uuid']) - self.assertEqual(data['cpu_arch'], - self.flavor['properties']['cpu_arch']) - self.assertEqual(int(data['memory_mb']), - int(self.flavor['ram'])) - self.assertEqual(int(data['cpus']), int(self.flavor['vcpus'])) - - def verify_node_flavor(self, node): - expected_cpus = self.flavor['vcpus'] - expected_memory_mb = self.flavor['ram'] - expected_cpu_arch = self.flavor['properties']['cpu_arch'] - disk_size = self.flavor['disk'] - ephemeral_size = self.flavor['OS-FLV-EXT-DATA:ephemeral'] - expected_local_gb = disk_size + ephemeral_size - - self.assertEqual(expected_cpus, - int(node['properties']['cpus'])) - self.assertEqual(expected_memory_mb, - int(node['properties']['memory_mb'])) - self.assertEqual(expected_local_gb, - int(node['properties']['local_gb'])) - self.assertEqual(expected_cpu_arch, - node['properties']['cpu_arch']) - - def verify_introspection_aborted(self, uuid): - status = self.introspection_status(uuid) - - self.assertEqual('Canceled by operator', status['error']) - self.assertTrue(status['finished']) - - self.wait_provisioning_state( - uuid, 'inspect failed', - timeout=CONF.baremetal.active_timeout, - interval=self.wait_provisioning_state_interval) - - @decorators.idempotent_id('03bf7990-bee0-4dd7-bf74-b97ad7b52a4b') - @test.services('compute', 'image', 'network', 'object_storage') - def test_baremetal_introspection(self): - """This smoke test case follows this set of operations: - - * Fetches expected properties from baremetal flavor - * Removes all properties from nodes - * Sets nodes to manageable state - * Imports introspection rule basic_ops_rule.json - * Inspects nodes - * Verifies all properties are inspected - * Verifies introspection data - * Sets node to available state - * Creates a keypair - * Boots an instance using the keypair - * Deletes the instance - - """ - # prepare introspection rule - rule_path = self.get_rule_path("basic_ops_rule.json") - self.rule_import(rule_path) - self.addCleanup(self.rule_purge) - - for node_id in self.node_ids: - self.introspect_node(node_id) - - # settle down introspection - self.wait_for_introspection_finished(self.node_ids) - for node_id in self.node_ids: - self.wait_provisioning_state( - node_id, 'manageable', - timeout=CONF.baremetal_introspection.ironic_sync_timeout, - interval=self.wait_provisioning_state_interval) - - for node_id in self.node_ids: - node = self.node_show(node_id) - self.verify_node_introspection_data(node) - self.verify_node_flavor(node) - - for node_id in self.node_ids: - self.baremetal_client.set_node_provision_state(node_id, 'provide') - - for node_id in self.node_ids: - self.wait_provisioning_state( - node_id, baremetal_manager.BaremetalProvisionStates.AVAILABLE, - timeout=CONF.baremetal.active_timeout, - interval=self.wait_provisioning_state_interval) - - self.wait_for_nova_aware_of_bvms() - self.add_keypair() - ins, _node = self.boot_instance() - self.terminate_instance(ins) - - @decorators.idempotent_id('70ca3070-184b-4b7d-8892-e977d2bc2870') - def test_introspection_abort(self): - """This smoke test case follows this very basic set of operations: - - * Start nodes introspection - * Wait until nodes power on - * Abort introspection - * Verifies nodes status and power state - - """ - # start nodes introspection - for node_id in self.node_ids: - self.introspect_node(node_id, remove_props=False) - - # wait for nodes power on - for node_id in self.node_ids: - self.wait_power_state( - node_id, - baremetal_manager.BaremetalPowerStates.POWER_ON) - - # abort introspection - for node_id in self.node_ids: - self.introspection_abort(node_id) - - # wait for nodes power off - for node_id in self.node_ids: - self.wait_power_state( - node_id, - baremetal_manager.BaremetalPowerStates.POWER_OFF) - - # verify nodes status and provision state - for node_id in self.node_ids: - self.verify_introspection_aborted(node_id) - - -class InspectorSmokeTest(manager.InspectorScenarioTest): - - @decorators.idempotent_id('a702d1f1-88e4-42ce-88ef-cba2d9e3312e') - @decorators.attr(type='smoke') - @test.services('object_storage') - def test_baremetal_introspection(self): - """This smoke test case follows this very basic set of operations: - - * Fetches expected properties from baremetal flavor - * Removes all properties from one node - * Sets the node to manageable state - * Inspects the node - * Sets the node to available state - - """ - # NOTE(dtantsur): we can't silently skip this test because it runs in - # grenade with several other tests, and we won't have any indication - # that it was not run. - assert self.node_ids, "No available nodes" - node_id = next(iter(self.node_ids)) - self.introspect_node(node_id) - - # settle down introspection - self.wait_for_introspection_finished([node_id]) - self.wait_provisioning_state( - node_id, 'manageable', - timeout=CONF.baremetal_introspection.ironic_sync_timeout, - interval=self.wait_provisioning_state_interval) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/test_discovery.py b/ironic_inspector/test/inspector_tempest_plugin/tests/test_discovery.py deleted file mode 100644 index 3880f60..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/test_discovery.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from ironic_tempest_plugin.tests.scenario import baremetal_manager -from tempest import config -from tempest.lib import decorators -from tempest import test # noqa - -from ironic_inspector.test.inspector_tempest_plugin.tests import manager - -CONF = config.CONF - -ProvisionStates = baremetal_manager.BaremetalProvisionStates - - -class InspectorDiscoveryTest(manager.InspectorScenarioTest): - @classmethod - def skip_checks(cls): - super(InspectorDiscoveryTest, cls).skip_checks() - if not CONF.baremetal_introspection.auto_discovery_feature: - msg = ("Please, provide a value for node_not_found_hook in " - "processing section of inspector.conf for enable " - "auto-discovery feature.") - raise cls.skipException(msg) - - def setUp(self): - super(InspectorDiscoveryTest, self).setUp() - - discovered_node = self._get_discovery_node() - self.node_info = self._get_node_info(discovered_node) - - rule = self._generate_discovery_rule(self.node_info) - - self.rule_import_from_dict(rule) - self.addCleanup(self.rule_purge) - - def _get_node_info(self, node_uuid): - node = self.node_show(node_uuid) - ports = self.node_port_list(node_uuid) - node['port_macs'] = [port['address'] for port in ports] - return node - - def _get_discovery_node(self): - nodes = self.node_list() - - discovered_node = None - for node in nodes: - if (node['provision_state'] == ProvisionStates.AVAILABLE or - node['provision_state'] == ProvisionStates.ENROLL or - node['provision_state'] is ProvisionStates.NOSTATE): - discovered_node = node['uuid'] - break - - self.assertIsNotNone(discovered_node) - return discovered_node - - def _generate_discovery_rule(self, node): - rule = dict() - rule["description"] = "Node %s discovery rule" % node['name'] - rule["actions"] = [ - {"action": "set-attribute", "path": "/name", - "value": "%s" % node['name']}, - {"action": "set-attribute", "path": "/driver", - "value": "%s" % node['driver']}, - ] - - for key, value in node['driver_info'].items(): - rule["actions"].append( - {"action": "set-attribute", "path": "/driver_info/%s" % key, - "value": "%s" % value}) - rule["conditions"] = [ - {"op": "eq", "field": "data://auto_discovered", "value": True} - ] - return rule - - def verify_node_introspection_data(self, node): - data = self.introspection_data(node['uuid']) - self.assertEqual(data['cpu_arch'], - self.flavor['properties']['cpu_arch']) - self.assertEqual(int(data['memory_mb']), - int(self.flavor['ram'])) - self.assertEqual(int(data['cpus']), int(self.flavor['vcpus'])) - - def verify_node_flavor(self, node): - expected_cpus = self.flavor['vcpus'] - expected_memory_mb = self.flavor['ram'] - expected_cpu_arch = self.flavor['properties']['cpu_arch'] - disk_size = self.flavor['disk'] - ephemeral_size = self.flavor['OS-FLV-EXT-DATA:ephemeral'] - expected_local_gb = disk_size + ephemeral_size - - self.assertEqual(expected_cpus, - int(node['properties']['cpus'])) - self.assertEqual(expected_memory_mb, - int(node['properties']['memory_mb'])) - self.assertEqual(expected_local_gb, - int(node['properties']['local_gb'])) - self.assertEqual(expected_cpu_arch, - node['properties']['cpu_arch']) - - def verify_node_driver_info(self, node_info, inspected_node): - for key in node_info['driver_info']: - self.assertEqual(six.text_type(node_info['driver_info'][key]), - inspected_node['driver_info'].get(key)) - - @decorators.idempotent_id('dd3abe5e-0d23-488d-bb4e-344cdeff7dcb') - def test_bearmetal_auto_discovery(self): - """This test case follows this set of operations: - - * Choose appropriate node, based on provision state; - * Get node info; - * Generate discovery rule; - * Delete discovered node from ironic; - * Start baremetal vm via virsh; - * Wating for node introspection; - * Verify introspected node. - """ - # NOTE(aarefiev): workaround for infra, 'tempest' user doesn't - # have virsh privileges, so lets power on the node via ironic - # and then delete it. Because of node is blacklisted in inspector - # we can't just power on it, therefor start introspection is used - # to whitelist discovered node first. - self.baremetal_client.set_node_provision_state( - self.node_info['uuid'], 'manage') - self.introspection_start(self.node_info['uuid']) - self.wait_power_state( - self.node_info['uuid'], - baremetal_manager.BaremetalPowerStates.POWER_ON) - self.node_delete(self.node_info['uuid']) - - self.wait_for_node(self.node_info['name']) - - inspected_node = self.node_show(self.node_info['name']) - self.verify_node_flavor(inspected_node) - self.verify_node_introspection_data(inspected_node) - self.verify_node_driver_info(self.node_info, inspected_node) - self.assertEqual(ProvisionStates.ENROLL, - inspected_node['provision_state']) diff --git a/ironic_inspector/test/unit/__init__.py b/ironic_inspector/test/unit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/unit/test_api_tools.py b/ironic_inspector/test/unit/test_api_tools.py deleted file mode 100644 index 979e123..0000000 --- a/ironic_inspector/test/unit/test_api_tools.py +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import flask -import mock -from oslo_config import cfg -from oslo_utils import uuidutils -import six - -from ironic_inspector import api_tools -import ironic_inspector.test.base as test_base -from ironic_inspector import utils - -CONF = cfg.CONF -app = flask.Flask(__name__) -app.testing = True - - -def mock_test_field(return_value=None, side_effect=None): - """Mock flask.request.args.get""" - def outer(func): - @six.wraps(func) - def inner(self, *args, **kwargs): - with app.test_request_context('/'): - get_mock = flask.request.args.get = mock.Mock() - get_mock.return_value = return_value - get_mock.side_effect = side_effect - ret = func(self, get_mock, *args, **kwargs) - return ret - return inner - return outer - - -class RaisesCoercionExceptionTestCase(test_base.BaseTest): - def test_ok(self): - @api_tools.raises_coercion_exceptions - def fn(): - return True - self.assertIs(True, fn()) - - def test_assertion_error(self): - @api_tools.raises_coercion_exceptions - def fn(): - assert False, 'Oops!' - - six.assertRaisesRegex(self, utils.Error, 'Bad request: Oops!', fn) - - def test_value_error(self): - @api_tools.raises_coercion_exceptions - def fn(): - raise ValueError('Oops!') - - six.assertRaisesRegex(self, utils.Error, 'Bad request: Oops!', fn) - - -class RequestFieldTestCase(test_base.BaseTest): - @mock_test_field(return_value='42') - def test_request_field_ok(self, get_mock): - @api_tools.request_field('foo') - def fn(value): - self.assertEqual(get_mock.return_value, value) - - fn() - get_mock.assert_called_once_with('foo', default=None) - - @mock_test_field(return_value='42') - def test_request_field_with_default(self, get_mock): - @api_tools.request_field('foo') - def fn(value): - self.assertEqual(get_mock.return_value, value) - - fn(default='bar') - get_mock.assert_called_once_with('foo', default='bar') - - @mock_test_field(return_value=42) - def test_request_field_with_default_returns_default(self, get_mock): - @api_tools.request_field('foo') - def fn(value): - self.assertEqual(get_mock.return_value, value) - - fn(default=42) - get_mock.assert_called_once_with('foo', default=42) - - -class MarkerFieldTestCase(test_base.BaseTest): - @mock_test_field(return_value=uuidutils.generate_uuid()) - def test_marker_ok(self, get_mock): - value = api_tools.marker_field() - self.assertEqual(get_mock.return_value, value) - - @mock.patch.object(uuidutils, 'is_uuid_like', autospec=True) - @mock_test_field(return_value='foo') - def test_marker_check_fails(self, get_mock, like_mock): - like_mock.return_value = False - six.assertRaisesRegex(self, utils.Error, '.*(Marker not UUID-like)', - api_tools.marker_field) - like_mock.assert_called_once_with(get_mock.return_value) - - -class LimitFieldTestCase(test_base.BaseTest): - @mock_test_field(return_value=42) - def test_limit_ok(self, get_mock): - value = api_tools.limit_field() - self.assertEqual(get_mock.return_value, value) - - @mock_test_field(return_value=str(CONF.api_max_limit + 1)) - def test_limit_over(self, get_mock): - six.assertRaisesRegex(self, utils.Error, - '.*(Limit over %s)' % CONF.api_max_limit, - api_tools.limit_field) - - @mock_test_field(return_value='0') - def test_limit_zero(self, get_mock): - value = api_tools.limit_field() - self.assertEqual(CONF.api_max_limit, value) - - @mock_test_field(return_value='-1') - def test_limit_negative(self, get_mock): - six.assertRaisesRegex(self, utils.Error, - '.*(Limit cannot be negative)', - api_tools.limit_field) - - @mock_test_field(return_value='foo') - def test_limit_invalid_value(self, get_mock): - six.assertRaisesRegex(self, utils.Error, 'Bad request', - api_tools.limit_field) diff --git a/ironic_inspector/test/unit/test_common_ironic.py b/ironic_inspector/test/unit/test_common_ironic.py deleted file mode 100644 index c9b7ba2..0000000 --- a/ironic_inspector/test/unit/test_common_ironic.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket -import unittest - -from ironicclient import client -import mock -from oslo_config import cfg - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.common import keystone -from ironic_inspector.test import base -from ironic_inspector import utils - - -CONF = cfg.CONF - - -@mock.patch.object(keystone, 'register_auth_opts') -@mock.patch.object(keystone, 'get_session') -@mock.patch.object(client, 'Client') -class TestGetClient(base.BaseTest): - def setUp(self): - super(TestGetClient, self).setUp() - ir_utils.reset_ironic_session() - self.cfg.config(auth_strategy='keystone') - self.cfg.config(os_region='somewhere', group='ironic') - self.addCleanup(ir_utils.reset_ironic_session) - - def test_get_client_with_auth_token(self, mock_client, mock_load, - mock_opts): - fake_token = 'token' - fake_ironic_url = 'http://127.0.0.1:6385' - mock_sess = mock.Mock() - mock_sess.get_endpoint.return_value = fake_ironic_url - mock_load.return_value = mock_sess - ir_utils.get_client(fake_token) - mock_sess.get_endpoint.assert_called_once_with( - endpoint_type=CONF.ironic.os_endpoint_type, - service_type=CONF.ironic.os_service_type, - region_name=CONF.ironic.os_region) - args = {'token': fake_token, - 'endpoint': fake_ironic_url, - 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, - 'max_retries': CONF.ironic.max_retries, - 'retry_interval': CONF.ironic.retry_interval} - mock_client.assert_called_once_with(1, **args) - - def test_get_client_without_auth_token(self, mock_client, mock_load, - mock_opts): - mock_sess = mock.Mock() - mock_load.return_value = mock_sess - ir_utils.get_client(None) - args = {'session': mock_sess, - 'region_name': 'somewhere', - 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, - 'max_retries': CONF.ironic.max_retries, - 'retry_interval': CONF.ironic.retry_interval} - mock_client.assert_called_once_with(1, **args) - - -class TestGetIpmiAddress(base.BaseTest): - def test_ipv4_in_resolves(self): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'ipmi_address': '192.168.1.1'}) - ip = ir_utils.get_ipmi_address(node) - self.assertEqual('192.168.1.1', ip) - - @mock.patch('socket.gethostbyname') - def test_good_hostname_resolves(self, mock_socket): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'ipmi_address': 'www.example.com'}) - mock_socket.return_value = '192.168.1.1' - ip = ir_utils.get_ipmi_address(node) - mock_socket.assert_called_once_with('www.example.com') - self.assertEqual('192.168.1.1', ip) - - @mock.patch('socket.gethostbyname') - def test_bad_hostname_errors(self, mock_socket): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'ipmi_address': 'meow'}, - uuid='uuid1') - mock_socket.side_effect = socket.gaierror('Boom') - self.assertRaises(utils.Error, ir_utils.get_ipmi_address, node) - - def test_additional_fields(self): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'foo': '192.168.1.1'}) - self.assertIsNone(ir_utils.get_ipmi_address(node)) - - self.cfg.config(ipmi_address_fields=['foo', 'bar', 'baz']) - ip = ir_utils.get_ipmi_address(node) - self.assertEqual('192.168.1.1', ip) - - def test_ipmi_bridging_enabled(self): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'ipmi_address': 'www.example.com', - 'ipmi_bridging': 'single'}) - self.assertIsNone(ir_utils.get_ipmi_address(node)) - - def test_loopback_address(self): - node = mock.Mock(spec=['driver_info', 'uuid'], - driver_info={'ipmi_address': '127.0.0.2'}) - ip = ir_utils.get_ipmi_address(node) - self.assertIsNone(ip) - - -class TestCapabilities(unittest.TestCase): - - def test_capabilities_to_dict(self): - capabilities = 'cat:meow,dog:wuff' - expected_output = {'cat': 'meow', 'dog': 'wuff'} - output = ir_utils.capabilities_to_dict(capabilities) - self.assertEqual(expected_output, output) - - def test_dict_to_capabilities(self): - capabilities_dict = {'cat': 'meow', 'dog': 'wuff'} - output = ir_utils.dict_to_capabilities(capabilities_dict) - self.assertIn('cat:meow', output) - self.assertIn('dog:wuff', output) diff --git a/ironic_inspector/test/unit/test_db.py b/ironic_inspector/test/unit/test_db.py deleted file mode 100644 index 7b2a445..0000000 --- a/ironic_inspector/test/unit/test_db.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from ironic_inspector import db -from ironic_inspector.test import base as test_base - - -class TestDB(test_base.NodeTest): - @mock.patch.object(db, 'get_reader_session', autospec=True) - def test_model_query(self, mock_reader): - mock_session = mock_reader.return_value - fake_query = mock_session.query.return_value - - query = db.model_query('db.Node') - - mock_reader.assert_called_once_with() - mock_session.query.assert_called_once_with('db.Node') - self.assertEqual(fake_query, query) - - @mock.patch.object(db, 'get_writer_session', autospec=True) - def test_ensure_transaction_new_session(self, mock_writer): - mock_session = mock_writer.return_value - - with db.ensure_transaction() as session: - mock_writer.assert_called_once_with() - mock_session.begin.assert_called_once_with(subtransactions=True) - self.assertEqual(mock_session, session) - - @mock.patch.object(db, 'get_writer_session', autospec=True) - def test_ensure_transaction_session(self, mock_writer): - mock_session = mock.MagicMock() - - with db.ensure_transaction(session=mock_session) as session: - self.assertFalse(mock_writer.called) - mock_session.begin.assert_called_once_with(subtransactions=True) - self.assertEqual(mock_session, session) - - @mock.patch.object(db.enginefacade, 'transaction_context', autospec=True) - def test__create_context_manager(self, mock_cnxt): - mock_ctx_mgr = mock_cnxt.return_value - - ctx_mgr = db._create_context_manager() - - mock_ctx_mgr.configure.assert_called_once_with(sqlite_fk=False) - self.assertEqual(mock_ctx_mgr, ctx_mgr) - - @mock.patch.object(db, 'get_context_manager', autospec=True) - def test_get_reader_session(self, mock_cnxt_mgr): - mock_cnxt = mock_cnxt_mgr.return_value - mock_sess_maker = mock_cnxt.reader.get_sessionmaker.return_value - - session = db.get_reader_session() - - mock_sess_maker.assert_called_once_with() - self.assertEqual(mock_sess_maker.return_value, session) - - @mock.patch.object(db, 'get_context_manager', autospec=True) - def test_get_writer_session(self, mock_cnxt_mgr): - mock_cnxt = mock_cnxt_mgr.return_value - mock_sess_maker = mock_cnxt.writer.get_sessionmaker.return_value - - session = db.get_writer_session() - - mock_sess_maker.assert_called_once_with() - self.assertEqual(mock_sess_maker.return_value, session) diff --git a/ironic_inspector/test/unit/test_firewall.py b/ironic_inspector/test/unit/test_firewall.py deleted file mode 100644 index 387068c..0000000 --- a/ironic_inspector/test/unit/test_firewall.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2015 NEC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -from oslo_config import cfg - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import firewall -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector.test import base as test_base - - -CONF = cfg.CONF -IB_DATA = """ -EMAC=02:00:02:97:00:01 IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:26:52 -EMAC=02:00:00:61:00:02 IMAC=61:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:24:4f -""" - - -@mock.patch.object(firewall, '_iptables') -@mock.patch.object(ir_utils, 'get_client') -@mock.patch.object(firewall.subprocess, 'check_call') -class TestFirewall(test_base.NodeTest): - CLIENT_ID = 'ff:00:00:00:00:00:02:00:00:02:c9:00:7c:fe:90:03:00:29:24:4f' - - def test_update_filters_without_manage_firewall(self, mock_call, - mock_get_client, - mock_iptables): - CONF.set_override('manage_firewall', False, 'firewall') - firewall.update_filters() - self.assertEqual(0, mock_iptables.call_count) - - def test_init_args(self, mock_call, mock_get_client, mock_iptables): - rootwrap_path = '/some/fake/path' - CONF.set_override('rootwrap_config', rootwrap_path) - firewall.init() - init_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', - '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain)] - - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(init_expected_args, call_args_list): - self.assertEqual(args, call[0]) - - expected = ('sudo', 'ironic-inspector-rootwrap', rootwrap_path, - 'iptables', '-w') - self.assertEqual(expected, firewall.BASE_COMMAND) - - def test_init_args_old_iptables(self, mock_call, mock_get_client, - mock_iptables): - rootwrap_path = '/some/fake/path' - CONF.set_override('rootwrap_config', rootwrap_path) - mock_call.side_effect = firewall.subprocess.CalledProcessError(2, '') - firewall.init() - init_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', - '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain)] - - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(init_expected_args, call_args_list): - self.assertEqual(args, call[0]) - - expected = ('sudo', 'ironic-inspector-rootwrap', rootwrap_path, - 'iptables',) - self.assertEqual(expected, firewall.BASE_COMMAND) - - def test_init_kwargs(self, mock_call, mock_get_client, mock_iptables): - firewall.init() - init_expected_kwargs = [ - {'ignore': True}, - {'ignore': True}, - {'ignore': True}] - - call_args_list = mock_iptables.call_args_list - - for (kwargs, call) in zip(init_expected_kwargs, call_args_list): - self.assertEqual(kwargs, call[1]) - - def test_update_filters_args(self, mock_call, mock_get_client, - mock_iptables): - # Pretend that we have nodes on introspection - node_cache.add_node(self.node.uuid, state=istate.States.waiting, - bmc_address='1.2.3.4') - - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - firewall.update_filters() - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - def test_update_filters_kwargs(self, mock_call, mock_get_client, - mock_iptables): - firewall.init() - - update_filters_expected_kwargs = [ - {'ignore': True}, - {'ignore': True}, - {'ignore': True}, - {}, - {'ignore': True}, - {'ignore': True}, - {'ignore': True}, - {}, - {}, - {}, - {'ignore': True}, - {'ignore': True}, - {'ignore': True} - ] - - firewall.update_filters() - call_args_list = mock_iptables.call_args_list - - for (kwargs, call) in zip(update_filters_expected_kwargs, - call_args_list): - self.assertEqual(kwargs, call[1]) - - def test_update_filters_with_blacklist(self, mock_call, mock_get_client, - mock_iptables): - active_macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] - inactive_mac = ['AA:BB:CC:DD:EE:FF'] - self.macs = active_macs + inactive_mac - self.ports = [mock.Mock(address=m) for m in self.macs] - mock_get_client.port.list.return_value = self.ports - node_cache.add_node(self.node.uuid, mac=active_macs, - state=istate.States.finished, - bmc_address='1.2.3.4', foo=None) - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - # Blacklist - ('-A', firewall.NEW_CHAIN, '-m', 'mac', '--mac-source', - inactive_mac[0], '-j', 'DROP'), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - firewall.update_filters(mock_get_client) - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - # check caching - - mock_iptables.reset_mock() - firewall.update_filters(mock_get_client) - self.assertFalse(mock_iptables.called) - - def test_update_filters_clean_cache_on_error(self, mock_call, - mock_get_client, - mock_iptables): - active_macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] - inactive_mac = ['AA:BB:CC:DD:EE:FF'] - self.macs = active_macs + inactive_mac - self.ports = [mock.Mock(address=m) for m in self.macs] - mock_get_client.port.list.return_value = self.ports - node_cache.add_node(self.node.uuid, mac=active_macs, - state=istate.States.finished, - bmc_address='1.2.3.4', foo=None) - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - # Blacklist - ('-A', firewall.NEW_CHAIN, '-m', 'mac', '--mac-source', - inactive_mac[0], '-j', 'DROP'), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - mock_iptables.side_effect = [None, None, RuntimeError()] - self.assertRaises(RuntimeError, firewall.update_filters, - mock_get_client) - - # check caching - - mock_iptables.reset_mock() - mock_iptables.side_effect = None - firewall.update_filters(mock_get_client) - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - def test_update_filters_args_node_not_found_hook(self, mock_call, - mock_get_client, - mock_iptables): - # DHCP should be always opened if node_not_found hook is set - CONF.set_override('node_not_found_hook', 'enroll', 'processing') - - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - firewall.update_filters() - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - def test_update_filters_args_no_introspection(self, mock_call, - mock_get_client, - mock_iptables): - firewall.init() - firewall.BLACKLIST_CACHE = ['foo'] - mock_get_client.return_value.port.list.return_value = [ - mock.Mock(address='foobar')] - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - ('-A', firewall.NEW_CHAIN, '-j', 'REJECT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - firewall.update_filters() - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - self.assertIsNone(firewall.BLACKLIST_CACHE) - - # Check caching enabled flag - - mock_iptables.reset_mock() - firewall.update_filters() - self.assertFalse(mock_iptables.called) - - # Adding a node changes it back - - node_cache.add_node(self.node.uuid, state=istate.States.starting, - bmc_address='1.2.3.4') - mock_iptables.reset_mock() - firewall.update_filters() - - mock_iptables.assert_any_call('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT') - self.assertEqual({'foobar'}, firewall.BLACKLIST_CACHE) - - def test_update_filters_infiniband( - self, mock_call, mock_get_client, mock_iptables): - - CONF.set_override('ethoib_interfaces', ['eth0'], 'firewall') - active_macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] - expected_rmac = '02:00:00:61:00:02' - ports = [mock.Mock(address=m) for m in active_macs] - ports.append(mock.Mock(address='7c:fe:90:29:24:4f', - extra={'client-id': self.CLIENT_ID}, - spec=['address', 'extra'])) - mock_get_client.port.list.return_value = ports - node_cache.add_node(self.node.uuid, mac=active_macs, - state=istate.States.finished, - bmc_address='1.2.3.4', foo=None) - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - # Blacklist - ('-A', firewall.NEW_CHAIN, '-m', 'mac', '--mac-source', - expected_rmac, '-j', 'DROP'), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - fileobj = mock.mock_open(read_data=IB_DATA) - with mock.patch('six.moves.builtins.open', fileobj, create=True): - firewall.update_filters(mock_get_client) - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) - - def test_update_filters_infiniband_no_such_file( - self, mock_call, mock_get_client, mock_iptables): - - CONF.set_override('ethoib_interfaces', ['eth0'], 'firewall') - active_macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] - ports = [mock.Mock(address=m) for m in active_macs] - ports.append(mock.Mock(address='7c:fe:90:29:24:4f', - extra={'client-id': self.CLIENT_ID}, - spec=['address', 'extra'])) - mock_get_client.port.list.return_value = ports - node_cache.add_node(self.node.uuid, mac=active_macs, - state=istate.States.finished, - bmc_address='1.2.3.4', foo=None) - firewall.init() - - update_filters_expected_args = [ - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-N', CONF.firewall.firewall_chain), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-F', firewall.NEW_CHAIN), - ('-X', firewall.NEW_CHAIN), - ('-N', firewall.NEW_CHAIN), - # Blacklist - ('-A', firewall.NEW_CHAIN, '-m', 'mac', '--mac-source', - '7c:fe:90:29:24:4f', '-j', 'DROP'), - ('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT'), - ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', firewall.NEW_CHAIN), - ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', - '67', '-j', CONF.firewall.firewall_chain), - ('-F', CONF.firewall.firewall_chain), - ('-X', CONF.firewall.firewall_chain), - ('-E', firewall.NEW_CHAIN, CONF.firewall.firewall_chain) - ] - - with mock.patch('six.moves.builtins.open', side_effect=IOError()): - firewall.update_filters(mock_get_client) - call_args_list = mock_iptables.call_args_list - - for (args, call) in zip(update_filters_expected_args, - call_args_list): - self.assertEqual(args, call[0]) diff --git a/ironic_inspector/test/unit/test_introspect.py b/ironic_inspector/test/unit/test_introspect.py deleted file mode 100644 index cf1fe1a..0000000 --- a/ironic_inspector/test/unit/test_introspect.py +++ /dev/null @@ -1,432 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import time - -from ironicclient import exceptions -import mock -from oslo_config import cfg - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import firewall -from ironic_inspector import introspect -from ironic_inspector import node_cache -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - -CONF = cfg.CONF - - -class BaseTest(test_base.NodeTest): - def setUp(self): - super(BaseTest, self).setUp() - introspect._LAST_INTROSPECTION_TIME = 0 - self.node.power_state = 'power off' - self.ports = [mock.Mock(address=m) for m in self.macs] - self.ports_dict = collections.OrderedDict((p.address, p) - for p in self.ports) - self.node_info = mock.Mock(uuid=self.uuid, options={}) - self.node_info.ports.return_value = self.ports_dict - self.node_info.node.return_value = self.node - - def _prepare(self, client_mock): - cli = client_mock.return_value - cli.node.get.return_value = self.node - cli.node.validate.return_value = mock.Mock(power={'result': True}) - return cli - - -@mock.patch.object(firewall, 'update_filters', autospec=True) -@mock.patch.object(node_cache, 'start_introspection', autospec=True) -@mock.patch.object(ir_utils, 'get_client', autospec=True) -class TestIntrospect(BaseTest): - def test_ok(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - introspect.introspect(self.node.uuid) - - cli.node.get.assert_called_once_with(self.uuid) - cli.node.validate.assert_called_once_with(self.uuid) - - start_mock.assert_called_once_with(self.uuid, - bmc_address=self.bmc_address, - ironic=cli) - self.node_info.ports.assert_called_once_with() - self.node_info.add_attribute.assert_called_once_with('mac', - self.macs) - filters_mock.assert_called_with(cli) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - self.node_info.acquire_lock.assert_called_once_with() - self.node_info.release_lock.assert_called_once_with() - - def test_loopback_bmc_address(self, client_mock, start_mock, filters_mock): - self.node.driver_info['ipmi_address'] = '127.0.0.1' - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - introspect.introspect(self.node.uuid) - - cli.node.get.assert_called_once_with(self.uuid) - cli.node.validate.assert_called_once_with(self.uuid) - - start_mock.assert_called_once_with(self.uuid, - bmc_address=None, - ironic=cli) - self.node_info.ports.assert_called_once_with() - self.node_info.add_attribute.assert_called_once_with('mac', - self.macs) - filters_mock.assert_called_with(cli) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - self.node_info.acquire_lock.assert_called_once_with() - self.node_info.release_lock.assert_called_once_with() - - def test_ok_ilo_and_drac(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - for name in ('ilo_address', 'drac_host'): - self.node.driver_info = {name: self.bmc_address} - introspect.introspect(self.node.uuid) - - start_mock.assert_called_with(self.uuid, - bmc_address=self.bmc_address, - ironic=cli) - - def test_power_failure(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - cli.node.set_boot_device.side_effect = exceptions.BadRequest() - cli.node.set_power_state.side_effect = exceptions.BadRequest() - start_mock.return_value = self.node_info - - introspect.introspect(self.node.uuid) - - cli.node.get.assert_called_once_with(self.uuid) - - start_mock.assert_called_once_with(self.uuid, - bmc_address=self.bmc_address, - ironic=cli) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - start_mock.return_value.finished.assert_called_once_with( - error=mock.ANY) - self.node_info.acquire_lock.assert_called_once_with() - self.node_info.release_lock.assert_called_once_with() - - def test_unexpected_error(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - filters_mock.side_effect = RuntimeError() - - introspect.introspect(self.node.uuid) - - cli.node.get.assert_called_once_with(self.uuid) - - start_mock.assert_called_once_with(self.uuid, - bmc_address=self.bmc_address, - ironic=cli) - self.assertFalse(cli.node.set_boot_device.called) - start_mock.return_value.finished.assert_called_once_with( - error=mock.ANY) - self.node_info.acquire_lock.assert_called_once_with() - self.node_info.release_lock.assert_called_once_with() - - def test_no_macs(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - self.node_info.ports.return_value = [] - start_mock.return_value = self.node_info - - introspect.introspect(self.node.uuid) - - self.node_info.ports.assert_called_once_with() - - start_mock.assert_called_once_with(self.uuid, - bmc_address=self.bmc_address, - ironic=cli) - self.assertFalse(self.node_info.add_attribute.called) - self.assertFalse(filters_mock.called) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - def test_no_lookup_attrs(self, client_mock, start_mock, filters_mock): - cli = self._prepare(client_mock) - self.node_info.ports.return_value = [] - start_mock.return_value = self.node_info - self.node_info.attributes = {} - - introspect.introspect(self.uuid) - - self.node_info.ports.assert_called_once_with() - self.node_info.finished.assert_called_once_with(error=mock.ANY) - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.node_info.acquire_lock.assert_called_once_with() - self.node_info.release_lock.assert_called_once_with() - - def test_no_lookup_attrs_with_node_not_found_hook(self, client_mock, - start_mock, - filters_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - cli = self._prepare(client_mock) - self.node_info.ports.return_value = [] - start_mock.return_value = self.node_info - self.node_info.attributes = {} - - introspect.introspect(self.uuid) - - self.node_info.ports.assert_called_once_with() - self.assertFalse(self.node_info.finished.called) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - def test_failed_to_get_node(self, client_mock, start_mock, filters_mock): - cli = client_mock.return_value - cli.node.get.side_effect = exceptions.NotFound() - self.assertRaisesRegex(utils.Error, - 'Node %s was not found' % self.uuid, - introspect.introspect, self.uuid) - - cli.node.get.side_effect = exceptions.BadRequest() - self.assertRaisesRegex(utils.Error, - '%s: Bad Request' % self.uuid, - introspect.introspect, self.uuid) - - self.assertEqual(0, self.node_info.ports.call_count) - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertFalse(start_mock.called) - self.assertFalse(self.node_info.acquire_lock.called) - - def test_failed_to_validate_node(self, client_mock, start_mock, - filters_mock): - cli = client_mock.return_value - cli.node.get.return_value = self.node - cli.node.validate.return_value = mock.Mock(power={'result': False, - 'reason': 'oops'}) - - self.assertRaisesRegex( - utils.Error, - 'Failed validation of power interface', - introspect.introspect, self.uuid) - - cli.node.validate.assert_called_once_with(self.uuid) - self.assertEqual(0, self.node_info.ports.call_count) - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertFalse(start_mock.called) - self.assertFalse(self.node_info.acquire_lock.called) - - def test_wrong_provision_state(self, client_mock, start_mock, - filters_mock): - self.node.provision_state = 'active' - cli = client_mock.return_value - cli.node.get.return_value = self.node - - self.assertRaisesRegex( - utils.Error, 'Invalid provision state for introspection: "active"', - introspect.introspect, self.uuid) - - self.assertEqual(0, self.node_info.ports.call_count) - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertFalse(start_mock.called) - self.assertFalse(self.node_info.acquire_lock.called) - - @mock.patch.object(time, 'time') - def test_introspection_delay(self, time_mock, client_mock, - start_mock, filters_mock): - time_mock.return_value = 42 - introspect._LAST_INTROSPECTION_TIME = 40 - CONF.set_override('introspection_delay', 10) - - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - introspect.introspect(self.uuid) - - self.sleep_fixture.mock.assert_called_once_with(8) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - # updated to the current time.time() - self.assertEqual(42, introspect._LAST_INTROSPECTION_TIME) - - @mock.patch.object(time, 'time') - def test_introspection_delay_not_needed( - self, time_mock, client_mock, - start_mock, filters_mock): - - time_mock.return_value = 100 - introspect._LAST_INTROSPECTION_TIME = 40 - CONF.set_override('introspection_delay', 10) - - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - introspect.introspect(self.uuid) - - self.sleep_fixture.mock().assert_not_called() - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - # updated to the current time.time() - self.assertEqual(100, introspect._LAST_INTROSPECTION_TIME) - - @mock.patch.object(time, 'time') - def test_introspection_delay_custom_drivers( - self, time_mock, client_mock, start_mock, filters_mock): - self.node.driver = 'foobar' - time_mock.return_value = 42 - introspect._LAST_INTROSPECTION_TIME = 40 - CONF.set_override('introspection_delay', 10) - CONF.set_override('introspection_delay_drivers', 'fo{1,2}b.r') - - cli = self._prepare(client_mock) - start_mock.return_value = self.node_info - - introspect.introspect(self.uuid) - - self.sleep_fixture.mock.assert_called_once_with(8) - cli.node.set_boot_device.assert_called_once_with(self.uuid, - 'pxe', - persistent=False) - cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - # updated to the current time.time() - self.assertEqual(42, introspect._LAST_INTROSPECTION_TIME) - - -@mock.patch.object(firewall, 'update_filters', autospec=True) -@mock.patch.object(node_cache, 'get_node', autospec=True) -@mock.patch.object(ir_utils, 'get_client', autospec=True) -class TestAbort(BaseTest): - def setUp(self): - super(TestAbort, self).setUp() - self.node_info.started_at = None - self.node_info.finished_at = None - - def test_ok(self, client_mock, get_mock, filters_mock): - cli = self._prepare(client_mock) - get_mock.return_value = self.node_info - self.node_info.acquire_lock.return_value = True - self.node_info.started_at = time.time() - self.node_info.finished_at = None - - introspect.abort(self.node.uuid) - - get_mock.assert_called_once_with(self.uuid, ironic=cli, - locked=False) - self.node_info.acquire_lock.assert_called_once_with(blocking=False) - filters_mock.assert_called_once_with(cli) - cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - self.node_info.finished.assert_called_once_with(error='Canceled ' - 'by operator') - - def test_node_not_found(self, client_mock, get_mock, filters_mock): - cli = self._prepare(client_mock) - exc = utils.Error('Not found.', code=404) - get_mock.side_effect = exc - - self.assertRaisesRegex(utils.Error, str(exc), - introspect.abort, self.uuid) - - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertEqual(0, self.node_info.finished.call_count) - - def test_node_locked(self, client_mock, get_mock, filters_mock): - cli = self._prepare(client_mock) - get_mock.return_value = self.node_info - self.node_info.acquire_lock.return_value = False - self.node_info.started_at = time.time() - - self.assertRaisesRegex(utils.Error, 'Node is locked, please, ' - 'retry later', introspect.abort, self.uuid) - - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertEqual(0, self.node_info.finshed.call_count) - - def test_introspection_already_finished(self, client_mock, - get_mock, filters_mock): - cli = self._prepare(client_mock) - get_mock.return_value = self.node_info - self.node_info.acquire_lock.return_value = True - self.node_info.started_at = time.time() - self.node_info.finished_at = time.time() - - introspect.abort(self.uuid) - - self.assertEqual(0, filters_mock.call_count) - self.assertEqual(0, cli.node.set_power_state.call_count) - self.assertEqual(0, self.node_info.finshed.call_count) - - def test_firewall_update_exception(self, client_mock, get_mock, - filters_mock): - cli = self._prepare(client_mock) - get_mock.return_value = self.node_info - self.node_info.acquire_lock.return_value = True - self.node_info.started_at = time.time() - self.node_info.finished_at = None - filters_mock.side_effect = Exception('Boom') - - introspect.abort(self.uuid) - - get_mock.assert_called_once_with(self.uuid, ironic=cli, - locked=False) - self.node_info.acquire_lock.assert_called_once_with(blocking=False) - filters_mock.assert_called_once_with(cli) - cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - self.node_info.finished.assert_called_once_with(error='Canceled ' - 'by operator') - - def test_node_power_off_exception(self, client_mock, get_mock, - filters_mock): - cli = self._prepare(client_mock) - get_mock.return_value = self.node_info - self.node_info.acquire_lock.return_value = True - self.node_info.started_at = time.time() - self.node_info.finished_at = None - cli.node.set_power_state.side_effect = Exception('BadaBoom') - - introspect.abort(self.uuid) - - get_mock.assert_called_once_with(self.uuid, ironic=cli, - locked=False) - self.node_info.acquire_lock.assert_called_once_with(blocking=False) - filters_mock.assert_called_once_with(cli) - cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - self.node_info.finished.assert_called_once_with(error='Canceled ' - 'by operator') diff --git a/ironic_inspector/test/unit/test_keystone.py b/ironic_inspector/test/unit/test_keystone.py deleted file mode 100644 index 3d9d4cf..0000000 --- a/ironic_inspector/test/unit/test_keystone.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from keystoneauth1 import loading as kaloading -from oslo_config import cfg - -from ironic_inspector.common import keystone -from ironic_inspector.test import base - - -TESTGROUP = 'keystone_test' - - -class KeystoneTest(base.BaseTest): - - def setUp(self): - super(KeystoneTest, self).setUp() - self.cfg.conf.register_group(cfg.OptGroup(TESTGROUP)) - - def test_register_auth_opts(self): - keystone.register_auth_opts(TESTGROUP) - auth_opts = ['auth_type', 'auth_section'] - sess_opts = ['certfile', 'keyfile', 'insecure', 'timeout', 'cafile'] - for o in auth_opts + sess_opts: - self.assertIn(o, self.cfg.conf[TESTGROUP]) - self.assertEqual('password', self.cfg.conf[TESTGROUP]['auth_type']) - - @mock.patch.object(kaloading, 'load_auth_from_conf_options', autospec=True) - def test_get_session(self, auth_mock): - keystone.register_auth_opts(TESTGROUP) - self.cfg.config(group=TESTGROUP, - cafile='/path/to/ca/file') - auth1 = mock.Mock() - auth_mock.return_value = auth1 - sess = keystone.get_session(TESTGROUP) - self.assertEqual('/path/to/ca/file', sess.verify) - self.assertEqual(auth1, sess.auth) - - def test_add_auth_options(self): - group, opts = keystone.add_auth_options([], TESTGROUP)[0] - self.assertEqual(TESTGROUP, group) - # check that there is no duplicates - names = {o.dest for o in opts} - self.assertEqual(len(names), len(opts)) - # NOTE(pas-ha) checking for most standard auth and session ones only - expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile', - 'auth_type', 'auth_url', 'username', 'password', - 'tenant_name', 'project_name', 'trust_id', - 'domain_id', 'user_domain_id', 'project_domain_id'} - self.assertTrue(expected.issubset(names)) diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py deleted file mode 100644 index dd82447..0000000 --- a/ironic_inspector/test/unit/test_main.py +++ /dev/null @@ -1,615 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import json -import unittest - -import mock -from oslo_utils import uuidutils - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import conf -from ironic_inspector import introspect -from ironic_inspector import introspection_state as istate -from ironic_inspector import main -from ironic_inspector import node_cache -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector.plugins import example as example_plugin -from ironic_inspector import process -from ironic_inspector import rules -from ironic_inspector.test import base as test_base -from ironic_inspector import utils -from oslo_config import cfg - -CONF = cfg.CONF - - -def _get_error(res): - return json.loads(res.data.decode('utf-8'))['error']['message'] - - -class BaseAPITest(test_base.BaseTest): - def setUp(self): - super(BaseAPITest, self).setUp() - main.app.config['TESTING'] = True - self.app = main.app.test_client() - CONF.set_override('auth_strategy', 'noauth') - self.uuid = uuidutils.generate_uuid() - - -class TestApiIntrospect(BaseAPITest): - @mock.patch.object(introspect, 'introspect', autospec=True) - def test_introspect_no_authentication(self, introspect_mock): - CONF.set_override('auth_strategy', 'noauth') - res = self.app.post('/v1/introspection/%s' % self.uuid) - self.assertEqual(202, res.status_code) - introspect_mock.assert_called_once_with(self.uuid, - token=None) - - @mock.patch.object(introspect, 'introspect', autospec=True) - def test_intospect_failed(self, introspect_mock): - introspect_mock.side_effect = utils.Error("boom") - res = self.app.post('/v1/introspection/%s' % self.uuid) - self.assertEqual(400, res.status_code) - self.assertEqual( - 'boom', - json.loads(res.data.decode('utf-8'))['error']['message']) - introspect_mock.assert_called_once_with( - self.uuid, - token=None) - - @mock.patch.object(utils, 'check_auth', autospec=True) - @mock.patch.object(introspect, 'introspect', autospec=True) - def test_introspect_failed_authentication(self, introspect_mock, - auth_mock): - CONF.set_override('auth_strategy', 'keystone') - auth_mock.side_effect = utils.Error('Boom', code=403) - res = self.app.post('/v1/introspection/%s' % self.uuid, - headers={'X-Auth-Token': 'token'}) - self.assertEqual(403, res.status_code) - self.assertFalse(introspect_mock.called) - - -@mock.patch.object(process, 'process', autospec=True) -class TestApiContinue(BaseAPITest): - def test_continue(self, process_mock): - # should be ignored - CONF.set_override('auth_strategy', 'keystone') - process_mock.return_value = {'result': 42} - res = self.app.post('/v1/continue', data='{"foo": "bar"}') - self.assertEqual(200, res.status_code) - process_mock.assert_called_once_with({"foo": "bar"}) - self.assertEqual({"result": 42}, json.loads(res.data.decode())) - - def test_continue_failed(self, process_mock): - process_mock.side_effect = utils.Error("boom") - res = self.app.post('/v1/continue', data='{"foo": "bar"}') - self.assertEqual(400, res.status_code) - process_mock.assert_called_once_with({"foo": "bar"}) - self.assertEqual('boom', _get_error(res)) - - def test_continue_wrong_type(self, process_mock): - res = self.app.post('/v1/continue', data='42') - self.assertEqual(400, res.status_code) - self.assertEqual('Invalid data: expected a JSON object, got int', - _get_error(res)) - self.assertFalse(process_mock.called) - - -@mock.patch.object(introspect, 'abort', autospec=True) -class TestApiAbort(BaseAPITest): - def test_ok(self, abort_mock): - abort_mock.return_value = '', 202 - - res = self.app.post('/v1/introspection/%s/abort' % self.uuid, - headers={'X-Auth-Token': 'token'}) - - abort_mock.assert_called_once_with(self.uuid, token='token') - self.assertEqual(202, res.status_code) - self.assertEqual(b'', res.data) - - def test_no_authentication(self, abort_mock): - abort_mock.return_value = b'', 202 - - res = self.app.post('/v1/introspection/%s/abort' % self.uuid) - - abort_mock.assert_called_once_with(self.uuid, token=None) - self.assertEqual(202, res.status_code) - self.assertEqual(b'', res.data) - - def test_node_not_found(self, abort_mock): - exc = utils.Error("Not Found.", code=404) - abort_mock.side_effect = exc - - res = self.app.post('/v1/introspection/%s/abort' % self.uuid) - - abort_mock.assert_called_once_with(self.uuid, token=None) - self.assertEqual(404, res.status_code) - data = json.loads(str(res.data.decode())) - self.assertEqual(str(exc), data['error']['message']) - - def test_abort_failed(self, abort_mock): - exc = utils.Error("Locked.", code=409) - abort_mock.side_effect = exc - - res = self.app.post('/v1/introspection/%s/abort' % self.uuid) - - abort_mock.assert_called_once_with(self.uuid, token=None) - self.assertEqual(409, res.status_code) - data = json.loads(res.data.decode()) - self.assertEqual(str(exc), data['error']['message']) - - -class GetStatusAPIBaseTest(BaseAPITest): - def setUp(self): - super(GetStatusAPIBaseTest, self).setUp() - self.uuid2 = uuidutils.generate_uuid() - self.finished_node = node_cache.NodeInfo( - uuid=self.uuid, - started_at=datetime.datetime(1, 1, 1), - finished_at=datetime.datetime(1, 1, 2), - error='boom', - state=istate.States.error) - self.finished_node.links = [ - {u'href': u'http://localhost/v1/introspection/%s' % - self.finished_node.uuid, - u'rel': u'self'}, - ] - self.finished_node.status = { - 'finished': True, - 'state': self.finished_node._state, - 'started_at': self.finished_node.started_at.isoformat(), - 'finished_at': self.finished_node.finished_at.isoformat(), - 'error': self.finished_node.error, - 'uuid': self.finished_node.uuid, - 'links': self.finished_node.links - } - - self.unfinished_node = node_cache.NodeInfo( - uuid=self.uuid2, - started_at=datetime.datetime(1, 1, 1), - state=istate.States.processing) - self.unfinished_node.links = [ - {u'href': u'http://localhost/v1/introspection/%s' % - self.unfinished_node.uuid, - u'rel': u'self'} - ] - finished_at = (self.unfinished_node.finished_at.isoformat() - if self.unfinished_node.finished_at else None) - self.unfinished_node.status = { - 'finished': False, - 'state': self.unfinished_node._state, - 'started_at': self.unfinished_node.started_at.isoformat(), - 'finished_at': finished_at, - 'error': None, - 'uuid': self.unfinished_node.uuid, - 'links': self.unfinished_node.links - } - - -@mock.patch.object(node_cache, 'get_node', autospec=True) -class TestApiGetStatus(GetStatusAPIBaseTest): - def test_get_introspection_in_progress(self, get_mock): - get_mock.return_value = self.unfinished_node - res = self.app.get('/v1/introspection/%s' % self.uuid) - self.assertEqual(200, res.status_code) - self.assertEqual(self.unfinished_node.status, - json.loads(res.data.decode('utf-8'))) - - def test_get_introspection_finished(self, get_mock): - get_mock.return_value = self.finished_node - res = self.app.get('/v1/introspection/%s' % self.uuid) - self.assertEqual(200, res.status_code) - self.assertEqual(self.finished_node.status, - json.loads(res.data.decode('utf-8'))) - - -@mock.patch.object(node_cache, 'get_node_list', autospec=True) -class TestApiListStatus(GetStatusAPIBaseTest): - - def test_list_introspection(self, list_mock): - list_mock.return_value = [self.finished_node, self.unfinished_node] - res = self.app.get('/v1/introspection') - self.assertEqual(200, res.status_code) - statuses = json.loads(res.data.decode('utf-8')).get('introspection') - - self.assertEqual([self.finished_node.status, - self.unfinished_node.status], statuses) - list_mock.assert_called_once_with(marker=None, - limit=CONF.api_max_limit) - - def test_list_introspection_limit(self, list_mock): - res = self.app.get('/v1/introspection?limit=1000') - self.assertEqual(200, res.status_code) - list_mock.assert_called_once_with(marker=None, limit=1000) - - def test_list_introspection_makrer(self, list_mock): - res = self.app.get('/v1/introspection?marker=%s' % - self.finished_node.uuid) - self.assertEqual(200, res.status_code) - list_mock.assert_called_once_with(marker=self.finished_node.uuid, - limit=CONF.api_max_limit) - - -class TestApiGetData(BaseAPITest): - @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) - def test_get_introspection_data(self, swift_mock): - CONF.set_override('store_data', 'swift', 'processing') - data = { - 'ipmi_address': '1.2.3.4', - 'cpus': 2, - 'cpu_arch': 'x86_64', - 'memory_mb': 1024, - 'local_gb': 20, - 'interfaces': { - 'em1': {'mac': '11:22:33:44:55:66', 'ip': '1.2.0.1'}, - } - } - swift_conn = swift_mock.return_value - swift_conn.get_object.return_value = json.dumps(data) - res = self.app.get('/v1/introspection/%s/data' % self.uuid) - name = 'inspector_data-%s' % self.uuid - swift_conn.get_object.assert_called_once_with(name) - self.assertEqual(200, res.status_code) - self.assertEqual(data, json.loads(res.data.decode('utf-8'))) - - @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) - def test_introspection_data_not_stored(self, swift_mock): - CONF.set_override('store_data', 'none', 'processing') - swift_conn = swift_mock.return_value - res = self.app.get('/v1/introspection/%s/data' % self.uuid) - self.assertFalse(swift_conn.get_object.called) - self.assertEqual(404, res.status_code) - - @mock.patch.object(ir_utils, 'get_node', autospec=True) - @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) - def test_with_name(self, swift_mock, get_mock): - get_mock.return_value = mock.Mock(uuid=self.uuid) - CONF.set_override('store_data', 'swift', 'processing') - data = { - 'ipmi_address': '1.2.3.4', - 'cpus': 2, - 'cpu_arch': 'x86_64', - 'memory_mb': 1024, - 'local_gb': 20, - 'interfaces': { - 'em1': {'mac': '11:22:33:44:55:66', 'ip': '1.2.0.1'}, - } - } - swift_conn = swift_mock.return_value - swift_conn.get_object.return_value = json.dumps(data) - res = self.app.get('/v1/introspection/name1/data') - name = 'inspector_data-%s' % self.uuid - swift_conn.get_object.assert_called_once_with(name) - self.assertEqual(200, res.status_code) - self.assertEqual(data, json.loads(res.data.decode('utf-8'))) - get_mock.assert_called_once_with('name1', fields=['uuid']) - - -@mock.patch.object(process, 'reapply', autospec=True) -class TestApiReapply(BaseAPITest): - - def setUp(self): - super(TestApiReapply, self).setUp() - CONF.set_override('store_data', 'swift', 'processing') - - def test_ok(self, reapply_mock): - - self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid) - reapply_mock.assert_called_once_with(self.uuid) - - def test_user_data(self, reapply_mock): - res = self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid, data='some data') - self.assertEqual(400, res.status_code) - message = json.loads(res.data.decode())['error']['message'] - self.assertEqual('User data processing is not supported yet', - message) - self.assertFalse(reapply_mock.called) - - def test_swift_disabled(self, reapply_mock): - CONF.set_override('store_data', 'none', 'processing') - - res = self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid) - self.assertEqual(400, res.status_code) - message = json.loads(res.data.decode())['error']['message'] - self.assertEqual('Inspector is not configured to store ' - 'data. Set the [processing] store_data ' - 'configuration option to change this.', - message) - self.assertFalse(reapply_mock.called) - - def test_node_locked(self, reapply_mock): - exc = utils.Error('Locked.', code=409) - reapply_mock.side_effect = exc - - res = self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid) - - self.assertEqual(409, res.status_code) - message = json.loads(res.data.decode())['error']['message'] - self.assertEqual(str(exc), message) - reapply_mock.assert_called_once_with(self.uuid) - - def test_node_not_found(self, reapply_mock): - exc = utils.Error('Not found.', code=404) - reapply_mock.side_effect = exc - - res = self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid) - - self.assertEqual(404, res.status_code) - message = json.loads(res.data.decode())['error']['message'] - self.assertEqual(str(exc), message) - reapply_mock.assert_called_once_with(self.uuid) - - def test_generic_error(self, reapply_mock): - exc = utils.Error('Oops', code=400) - reapply_mock.side_effect = exc - - res = self.app.post('/v1/introspection/%s/data/unprocessed' % - self.uuid) - - self.assertEqual(400, res.status_code) - message = json.loads(res.data.decode())['error']['message'] - self.assertEqual(str(exc), message) - reapply_mock.assert_called_once_with(self.uuid) - - -class TestApiRules(BaseAPITest): - @mock.patch.object(rules, 'get_all') - def test_get_all(self, get_all_mock): - get_all_mock.return_value = [ - mock.Mock(spec=rules.IntrospectionRule, - **{'as_dict.return_value': {'uuid': 'foo'}}), - mock.Mock(spec=rules.IntrospectionRule, - **{'as_dict.return_value': {'uuid': 'bar'}}), - ] - - res = self.app.get('/v1/rules') - self.assertEqual(200, res.status_code) - self.assertEqual( - { - 'rules': [{'uuid': 'foo', - 'links': [ - {'href': '/v1/rules/foo', 'rel': 'self'} - ]}, - {'uuid': 'bar', - 'links': [ - {'href': '/v1/rules/bar', 'rel': 'self'} - ]}] - }, - json.loads(res.data.decode('utf-8'))) - get_all_mock.assert_called_once_with() - for m in get_all_mock.return_value: - m.as_dict.assert_called_with(short=True) - - @mock.patch.object(rules, 'delete_all') - def test_delete_all(self, delete_all_mock): - res = self.app.delete('/v1/rules') - self.assertEqual(204, res.status_code) - delete_all_mock.assert_called_once_with() - - @mock.patch.object(rules, 'create', autospec=True) - def test_create(self, create_mock): - data = {'uuid': self.uuid, - 'conditions': 'cond', - 'actions': 'act'} - exp = data.copy() - exp['description'] = None - create_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, - **{'as_dict.return_value': exp}) - - res = self.app.post('/v1/rules', data=json.dumps(data)) - self.assertEqual(201, res.status_code) - create_mock.assert_called_once_with(conditions_json='cond', - actions_json='act', - uuid=self.uuid, - description=None) - self.assertEqual(exp, json.loads(res.data.decode('utf-8'))) - - @mock.patch.object(rules, 'create', autospec=True) - def test_create_api_less_1_6(self, create_mock): - data = {'uuid': self.uuid, - 'conditions': 'cond', - 'actions': 'act'} - exp = data.copy() - exp['description'] = None - create_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, - **{'as_dict.return_value': exp}) - - headers = {conf.VERSION_HEADER: - main._format_version((1, 5))} - - res = self.app.post('/v1/rules', data=json.dumps(data), - headers=headers) - self.assertEqual(200, res.status_code) - create_mock.assert_called_once_with(conditions_json='cond', - actions_json='act', - uuid=self.uuid, - description=None) - self.assertEqual(exp, json.loads(res.data.decode('utf-8'))) - - @mock.patch.object(rules, 'create', autospec=True) - def test_create_bad_uuid(self, create_mock): - data = {'uuid': 'foo', - 'conditions': 'cond', - 'actions': 'act'} - - res = self.app.post('/v1/rules', data=json.dumps(data)) - self.assertEqual(400, res.status_code) - - @mock.patch.object(rules, 'get') - def test_get_one(self, get_mock): - get_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, - **{'as_dict.return_value': - {'uuid': 'foo'}}) - - res = self.app.get('/v1/rules/' + self.uuid) - self.assertEqual(200, res.status_code) - self.assertEqual({'uuid': 'foo', - 'links': [ - {'href': '/v1/rules/foo', 'rel': 'self'} - ]}, - json.loads(res.data.decode('utf-8'))) - get_mock.assert_called_once_with(self.uuid) - get_mock.return_value.as_dict.assert_called_once_with(short=False) - - @mock.patch.object(rules, 'delete') - def test_delete_one(self, delete_mock): - res = self.app.delete('/v1/rules/' + self.uuid) - self.assertEqual(204, res.status_code) - delete_mock.assert_called_once_with(self.uuid) - - -class TestApiMisc(BaseAPITest): - @mock.patch.object(node_cache, 'get_node', autospec=True) - def test_404_expected(self, get_mock): - get_mock.side_effect = utils.Error('boom', code=404) - res = self.app.get('/v1/introspection/%s' % self.uuid) - self.assertEqual(404, res.status_code) - self.assertEqual('boom', _get_error(res)) - - def test_404_unexpected(self): - res = self.app.get('/v42') - self.assertEqual(404, res.status_code) - self.assertIn('not found', _get_error(res).lower()) - - @mock.patch.object(node_cache, 'get_node', autospec=True) - def test_500_with_debug(self, get_mock): - CONF.set_override('debug', True) - get_mock.side_effect = RuntimeError('boom') - res = self.app.get('/v1/introspection/%s' % self.uuid) - self.assertEqual(500, res.status_code) - self.assertEqual('Internal server error (RuntimeError): boom', - _get_error(res)) - - @mock.patch.object(node_cache, 'get_node', autospec=True) - def test_500_without_debug(self, get_mock): - CONF.set_override('debug', False) - get_mock.side_effect = RuntimeError('boom') - res = self.app.get('/v1/introspection/%s' % self.uuid) - self.assertEqual(500, res.status_code) - self.assertEqual('Internal server error', - _get_error(res)) - - -class TestApiVersions(BaseAPITest): - def _check_version_present(self, res): - self.assertEqual('%d.%d' % main.MINIMUM_API_VERSION, - res.headers.get(conf.MIN_VERSION_HEADER)) - self.assertEqual('%d.%d' % main.CURRENT_API_VERSION, - res.headers.get(conf.MAX_VERSION_HEADER)) - - def test_root_endpoint(self): - res = self.app.get("/") - self.assertEqual(200, res.status_code) - self._check_version_present(res) - data = res.data.decode('utf-8') - json_data = json.loads(data) - expected = {"versions": [{ - "status": "CURRENT", "id": '%s.%s' % main.CURRENT_API_VERSION, - "links": [{ - "rel": "self", - "href": ("http://localhost/v%s" % - main.CURRENT_API_VERSION[0]) - }] - }]} - self.assertEqual(expected, json_data) - - @mock.patch.object(main.app.url_map, "iter_rules", autospec=True) - def test_version_endpoint(self, mock_rules): - mock_rules.return_value = ["/v1/endpoint1", "/v1/endpoint2/", - "/v1/endpoint1/", - "/v2/endpoint1", "/v1/endpoint3", - "/v1/endpoint2//subpoint"] - endpoint = "/v1" - res = self.app.get(endpoint) - self.assertEqual(200, res.status_code) - self._check_version_present(res) - json_data = json.loads(res.data.decode('utf-8')) - expected = {u'resources': [ - { - u'name': u'endpoint1', - u'links': [{ - u'rel': u'self', - u'href': u'http://localhost/v1/endpoint1'}] - }, - { - u'name': u'endpoint3', - u'links': [{ - u'rel': u'self', - u'href': u'http://localhost/v1/endpoint3'}] - }, - ]} - self.assertEqual(expected, json_data) - - def test_version_endpoint_invalid(self): - endpoint = "/v-1" - res = self.app.get(endpoint) - self.assertEqual(404, res.status_code) - - def test_404_unexpected(self): - # API version on unknown pages - self._check_version_present(self.app.get('/v1/foobar')) - - @mock.patch.object(node_cache, 'get_node', autospec=True) - def test_usual_requests(self, get_mock): - get_mock.return_value = node_cache.NodeInfo(uuid=self.uuid, - started_at=42.0) - # Successfull - self._check_version_present( - self.app.post('/v1/introspection/%s' % self.uuid)) - # With error - self._check_version_present( - self.app.post('/v1/introspection/foobar')) - - def test_request_correct_version(self): - headers = {conf.VERSION_HEADER: - main._format_version(main.CURRENT_API_VERSION)} - self._check_version_present(self.app.get('/', headers=headers)) - - def test_request_unsupported_version(self): - bad_version = (main.CURRENT_API_VERSION[0], - main.CURRENT_API_VERSION[1] + 1) - headers = {conf.VERSION_HEADER: - main._format_version(bad_version)} - res = self.app.get('/', headers=headers) - self._check_version_present(res) - self.assertEqual(406, res.status_code) - error = _get_error(res) - self.assertIn('%d.%d' % bad_version, error) - self.assertIn('%d.%d' % main.MINIMUM_API_VERSION, error) - self.assertIn('%d.%d' % main.CURRENT_API_VERSION, error) - - -class TestPlugins(unittest.TestCase): - @mock.patch.object(example_plugin.ExampleProcessingHook, - 'before_processing', autospec=True) - @mock.patch.object(example_plugin.ExampleProcessingHook, - 'before_update', autospec=True) - def test_hook(self, mock_post, mock_pre): - plugins_base._HOOKS_MGR = None - CONF.set_override('processing_hooks', 'example', 'processing') - mgr = plugins_base.processing_hooks_manager() - mgr.map_method('before_processing', 'introspection_data') - mock_pre.assert_called_once_with(mock.ANY, 'introspection_data') - mgr.map_method('before_update', 'node_info', {}) - mock_post.assert_called_once_with(mock.ANY, 'node_info', {}) - - def test_manager_is_cached(self): - self.assertIs(plugins_base.processing_hooks_manager(), - plugins_base.processing_hooks_manager()) diff --git a/ironic_inspector/test/unit/test_migrations.py b/ironic_inspector/test/unit/test_migrations.py deleted file mode 100644 index dd7fa37..0000000 --- a/ironic_inspector/test/unit/test_migrations.py +++ /dev/null @@ -1,498 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Tests for database migrations. There are "opportunistic" tests here, supported -backends are: sqlite (used in test environment by default), mysql and -postgresql, which are required properly configured unit test environment. - -For the opportunistic testing you need to set up a db named 'openstack_citest' -with user 'openstack_citest' and password 'openstack_citest' on localhost. -The test will then use that db and u/p combo to run the tests. - -""" - - -import contextlib -import datetime - -import alembic -from alembic import script -import mock -from oslo_config import cfg -from oslo_db.sqlalchemy.migration_cli import ext_alembic -from oslo_db.sqlalchemy import orm -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import test_migrations -from oslo_db.sqlalchemy import utils as db_utils -from oslo_log import log as logging -from oslo_utils import uuidutils -import sqlalchemy - -from ironic_inspector import db -from ironic_inspector import dbsync -from ironic_inspector import introspection_state as istate -from ironic_inspector.test import base - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _get_connect_string(backend, user, passwd, database): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - if backend == "sqlite": - backend = "sqlite" - elif backend == "postgres": - backend = "postgresql+psycopg2" - elif backend == "mysql": - backend = "mysql+mysqldb" - else: - raise Exception("Unrecognized backend: '%s'" % backend) - - return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - % {'backend': backend, 'user': user, 'passwd': passwd, - 'database': database}) - - -def _is_backend_avail(backend, user, passwd, database): - try: - connect_uri = _get_connect_string(backend, user, passwd, database) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - -@contextlib.contextmanager -def patch_with_engine(engine): - with mock.patch.object(db, 'get_writer_session') as patch_w_sess, \ - mock.patch.object(db, 'get_reader_session') as patch_r_sess: - patch_w_sess.return_value = patch_r_sess.return_value = ( - orm.get_maker(engine)()) - yield - - -class WalkVersionsMixin(object): - def _walk_versions(self, engine=None, alembic_cfg=None): - # Determine latest version script from the repo, then - # upgrade from 1 through to the latest, with no data - # in the databases. This just checks that the schema itself - # upgrades successfully. - - with patch_with_engine(engine): - script_directory = script.ScriptDirectory.from_config(alembic_cfg) - - self.assertIsNone(self.migration_ext.version()) - - versions = [ver for ver in script_directory.walk_revisions()] - - for version in reversed(versions): - self._migrate_up(engine, alembic_cfg, - version.revision, with_data=True) - - def _migrate_up(self, engine, config, version, with_data=False): - """migrate up to a new version of the db. - - We allow for data insertion and post checks at every - migration version with special _pre_upgrade_### and - _check_### functions in the main test. - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%s" % version, None) - if pre_upgrade: - data = pre_upgrade(engine) - - self.migration_ext.upgrade(version) - self.assertEqual(version, self.migration_ext.version()) - if with_data: - check = getattr(self, "_check_%s" % version, None) - if check: - check(engine, data) - except Exception: - LOG.error("Failed to migrate to version %(version)s on engine " - "%(engine)s", - {'version': version, 'engine': engine}) - raise - - -class TestWalkVersions(base.BaseTest, WalkVersionsMixin): - def setUp(self): - super(TestWalkVersions, self).setUp() - self.engine = mock.MagicMock() - self.migration_ext = mock.MagicMock() - self.config = mock.MagicMock() - self.versions = [mock.Mock(revision='2b2'), mock.Mock(revision='1a1')] - - def test_migrate_up(self): - self.migration_ext.version.return_value = 'dsa123' - - self._migrate_up(self.engine, self.config, 'dsa123') - - self.migration_ext.version.assert_called_with() - - def test_migrate_up_with_data(self): - test_value = {"a": 1, "b": 2} - self.migration_ext.version.return_value = '141' - self._pre_upgrade_141 = mock.MagicMock() - self._pre_upgrade_141.return_value = test_value - self._check_141 = mock.MagicMock() - - self._migrate_up(self.engine, self.config, '141', True) - - self._pre_upgrade_141.assert_called_with(self.engine) - self._check_141.assert_called_with(self.engine, test_value) - - @mock.patch.object(script, 'ScriptDirectory') - @mock.patch.object(WalkVersionsMixin, '_migrate_up') - def test_walk_versions_all_default(self, _migrate_up, script_directory): - fc = script_directory.from_config() - fc.walk_revisions.return_value = self.versions - self.migration_ext.version.return_value = None - - self._walk_versions(self.engine, self.config) - - self.migration_ext.version.assert_called_with() - - upgraded = [mock.call(self.engine, self.config, v.revision, - with_data=True) for v in reversed(self.versions)] - self.assertEqual(self._migrate_up.call_args_list, upgraded) - - @mock.patch.object(script, 'ScriptDirectory') - @mock.patch.object(WalkVersionsMixin, '_migrate_up') - def test_walk_versions_all_false(self, _migrate_up, script_directory): - fc = script_directory.from_config() - fc.walk_revisions.return_value = self.versions - self.migration_ext.version.return_value = None - - self._walk_versions(self.engine, self.config) - - upgraded = [mock.call(self.engine, self.config, v.revision, - with_data=True) for v in reversed(self.versions)] - self.assertEqual(upgraded, self._migrate_up.call_args_list) - - -class MigrationCheckersMixin(object): - def setUp(self): - super(MigrationCheckersMixin, self).setUp() - self.config = dbsync._get_alembic_config() - self.config.ironic_inspector_config = CONF - # create AlembicExtension with fake config and replace - # with real one. - self.migration_ext = ext_alembic.AlembicExtension( - self.engine, {'alembic_ini_path': ''}) - self.migration_ext.config = self.config - - def test_walk_versions(self): - self._walk_versions(self.engine, self.config) - - def test_connect_fail(self): - """Test that we can trigger a database connection failure - - Test that we can fail gracefully to ensure we don't break people - without specific database backend - """ - if _is_backend_avail(self.FIXTURE.DRIVER, "openstack_cifail", - self.FIXTURE.USERNAME, self.FIXTURE.DBNAME): - self.fail("Shouldn't have connected") - - def _check_578f84f38d(self, engine, data): - nodes = db_utils.get_table(engine, 'nodes') - col_names = [column.name for column in nodes.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(nodes.c.uuid.type, sqlalchemy.types.String) - self.assertIn('started_at', col_names) - self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) - self.assertIn('finished_at', col_names) - self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) - self.assertIn('error', col_names) - self.assertIsInstance(nodes.c.error.type, sqlalchemy.types.Text) - - attributes = db_utils.get_table(engine, 'attributes') - col_names = [column.name for column in attributes.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String) - self.assertIn('name', col_names) - self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String) - self.assertIn('value', col_names) - self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String) - - options = db_utils.get_table(engine, 'options') - col_names = [column.name for column in options.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(options.c.uuid.type, sqlalchemy.types.String) - self.assertIn('name', col_names) - self.assertIsInstance(options.c.name.type, sqlalchemy.types.String) - self.assertIn('value', col_names) - self.assertIsInstance(options.c.value.type, sqlalchemy.types.Text) - - def _check_d588418040d(self, engine, data): - rules = db_utils.get_table(engine, 'rules') - col_names = [column.name for column in rules.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(rules.c.uuid.type, sqlalchemy.types.String) - self.assertIn('created_at', col_names) - self.assertIsInstance(rules.c.created_at.type, - sqlalchemy.types.DateTime) - self.assertIn('description', col_names) - self.assertIsInstance(rules.c.description.type, sqlalchemy.types.Text) - self.assertIn('disabled', col_names) - # in some backends bool type is integer - self.assertIsInstance(rules.c.disabled.type, - (sqlalchemy.types.Boolean, - sqlalchemy.types.Integer)) - - conditions = db_utils.get_table(engine, 'rule_conditions') - col_names = [column.name for column in conditions.c] - self.assertIn('id', col_names) - self.assertIsInstance(conditions.c.id.type, sqlalchemy.types.Integer) - self.assertIn('rule', col_names) - self.assertIsInstance(conditions.c.rule.type, sqlalchemy.types.String) - self.assertIn('op', col_names) - self.assertIsInstance(conditions.c.op.type, sqlalchemy.types.String) - self.assertIn('multiple', col_names) - self.assertIsInstance(conditions.c.multiple.type, - sqlalchemy.types.String) - self.assertIn('field', col_names) - self.assertIsInstance(conditions.c.field.type, sqlalchemy.types.Text) - self.assertIn('params', col_names) - self.assertIsInstance(conditions.c.params.type, sqlalchemy.types.Text) - - actions = db_utils.get_table(engine, 'rule_actions') - col_names = [column.name for column in actions.c] - self.assertIn('id', col_names) - self.assertIsInstance(actions.c.id.type, sqlalchemy.types.Integer) - self.assertIn('rule', col_names) - self.assertIsInstance(actions.c.rule.type, sqlalchemy.types.String) - self.assertIn('action', col_names) - self.assertIsInstance(actions.c.action.type, sqlalchemy.types.String) - self.assertIn('params', col_names) - self.assertIsInstance(actions.c.params.type, sqlalchemy.types.Text) - - def _check_e169a4a81d88(self, engine, data): - rule_conditions = db_utils.get_table(engine, 'rule_conditions') - # set invert with default value - False - data = {'id': 1, 'op': 'eq', 'multiple': 'all'} - rule_conditions.insert().execute(data) - - conds = rule_conditions.select( - rule_conditions.c.id == 1).execute().first() - self.assertFalse(conds['invert']) - - # set invert with - True - data = {'id': 2, 'op': 'eq', 'multiple': 'all', 'invert': True} - rule_conditions.insert().execute(data) - - conds = rule_conditions.select( - rule_conditions.c.id == 2).execute().first() - self.assertTrue(conds['invert']) - - def _pre_upgrade_d2e48801c8ef(self, engine): - ok_node_id = uuidutils.generate_uuid() - err_node_id = uuidutils.generate_uuid() - data = [ - { - 'uuid': ok_node_id, - 'error': None, - 'finished_at': 0.0, - 'started_at': 0.0 - }, - { - 'uuid': err_node_id, - 'error': 'Oops!', - 'finished_at': 0.0, - 'started_at': 0.0 - } - ] - nodes = db_utils.get_table(engine, 'nodes') - for node in data: - nodes.insert().execute(node) - return {'err_node_id': err_node_id, 'ok_node_id': ok_node_id} - - def _check_d2e48801c8ef(self, engine, data): - nodes = db_utils.get_table(engine, 'nodes') - col_names = [column.name for column in nodes.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(nodes.c.uuid.type, sqlalchemy.types.String) - self.assertIn('version_id', col_names) - self.assertIsInstance(nodes.c.version_id.type, sqlalchemy.types.String) - self.assertIn('state', col_names) - self.assertIsInstance(nodes.c.state.type, sqlalchemy.types.String) - self.assertIn('started_at', col_names) - self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) - self.assertIn('finished_at', col_names) - self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) - self.assertIn('error', col_names) - self.assertIsInstance(nodes.c.error.type, sqlalchemy.types.Text) - - ok_node_id = data['ok_node_id'] - err_node_id = data['err_node_id'] - # assert the ok node is in the (default) finished state - ok_node = nodes.select(nodes.c.uuid == ok_node_id).execute().first() - self.assertEqual(istate.States.finished, ok_node['state']) - # assert err node state is error after the migration - # even though the default state is finished - err_node = nodes.select(nodes.c.uuid == err_node_id).execute().first() - self.assertEqual(istate.States.error, err_node['state']) - - def _pre_upgrade_d00d6e3f38c4(self, engine): - nodes = db_utils.get_table(engine, 'nodes') - data = [] - for finished_at in (None, 1234.0): - node = {'uuid': uuidutils.generate_uuid(), - 'started_at': 1232.0, - 'finished_at': finished_at, - 'error': None} - nodes.insert().values(node).execute() - data.append(node) - return data - - def _check_d00d6e3f38c4(self, engine, data): - nodes = db_utils.get_table(engine, 'nodes') - col_names = [column.name for column in nodes.c] - - self.assertIn('started_at', col_names) - self.assertIn('finished_at', col_names) - self.assertIsInstance(nodes.c.started_at.type, - sqlalchemy.types.DateTime) - self.assertIsInstance(nodes.c.finished_at.type, - sqlalchemy.types.DateTime) - - for node in data: - finished_at = datetime.datetime.utcfromtimestamp( - node['finished_at']) if node['finished_at'] else None - row = nodes.select(nodes.c.uuid == node['uuid']).execute().first() - self.assertEqual( - datetime.datetime.utcfromtimestamp(node['started_at']), - row['started_at']) - self.assertEqual( - finished_at, - row['finished_at']) - - def _pre_upgrade_882b2d84cb1b(self, engine): - attributes = db_utils.get_table(engine, 'attributes') - nodes = db_utils.get_table(engine, 'nodes') - self.node_uuid = uuidutils.generate_uuid() - node = { - 'uuid': self.node_uuid, - 'started_at': datetime.datetime.utcnow(), - 'finished_at': None, - 'error': None, - 'state': istate.States.starting - } - nodes.insert().values(node).execute() - data = { - 'uuid': self.node_uuid, - 'name': 'foo', - 'value': 'bar' - } - attributes.insert().values(data).execute() - - def _check_882b2d84cb1b(self, engine, data): - attributes = db_utils.get_table(engine, 'attributes') - col_names = [column.name for column in attributes.c] - self.assertIn('uuid', col_names) - self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String) - self.assertIn('node_uuid', col_names) - self.assertIsInstance(attributes.c.node_uuid.type, - sqlalchemy.types.String) - self.assertIn('name', col_names) - self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String) - self.assertIn('value', col_names) - self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String) - - row = attributes.select(attributes.c.node_uuid == - self.node_uuid).execute().first() - self.assertEqual(self.node_uuid, row.node_uuid) - self.assertNotEqual(self.node_uuid, row.uuid) - self.assertIsNotNone(row.uuid) - self.assertEqual('foo', row.name) - self.assertEqual('bar', row.value) - - def test_upgrade_and_version(self): - with patch_with_engine(self.engine): - self.migration_ext.upgrade('head') - self.assertIsNotNone(self.migration_ext.version()) - - def test_upgrade_twice(self): - with patch_with_engine(self.engine): - self.migration_ext.upgrade('578f84f38d') - v1 = self.migration_ext.version() - self.migration_ext.upgrade('d588418040d') - v2 = self.migration_ext.version() - self.assertNotEqual(v1, v2) - - -class TestMigrationsMySQL(MigrationCheckersMixin, - WalkVersionsMixin, - test_base.MySQLOpportunisticTestCase): - pass - - -class TestMigrationsPostgreSQL(MigrationCheckersMixin, - WalkVersionsMixin, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestMigrationSqlite(MigrationCheckersMixin, - WalkVersionsMixin, - test_base.DbTestCase): - pass - - -class ModelsMigrationSyncMixin(object): - - def get_metadata(self): - return db.Base.metadata - - def get_engine(self): - return self.engine - - def db_sync(self, engine): - config = dbsync._get_alembic_config() - config.ironic_inspector_config = CONF - with patch_with_engine(engine): - alembic.command.upgrade(config, 'head') - - -class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.MySQLOpportunisticTestCase): - pass - - -class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.DbTestCase): - pass diff --git a/ironic_inspector/test/unit/test_node_cache.py b/ironic_inspector/test/unit/test_node_cache.py deleted file mode 100644 index 6ce9bab..0000000 --- a/ironic_inspector/test/unit/test_node_cache.py +++ /dev/null @@ -1,1265 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import datetime -import json -import unittest - -import automaton -import mock -from oslo_config import cfg -import oslo_db -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import db -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - -CONF = cfg.CONF - - -class TestNodeCache(test_base.NodeTest): - def test_add_node(self): - # Ensure previous node information is cleared - uuid2 = uuidutils.generate_uuid() - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - db.Node(uuid=uuid2, - state=istate.States.starting).save(session) - db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', - value='11:22:11:22:11:22', - node_uuid=self.uuid).save(session) - - node = node_cache.add_node(self.node.uuid, - istate.States.starting, - mac=self.macs, bmc_address='1.2.3.4', - foo=None) - self.assertEqual(self.uuid, node.uuid) - self.assertTrue( - (datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - < node.started_at < - datetime.datetime.utcnow() + datetime.timedelta(seconds=60))) - self.assertFalse(node._locked) - - res = set(db.model_query(db.Node.uuid, - db.Node.started_at).all()) - - expected = {(node.uuid, node.started_at), (uuid2, None)} - self.assertEqual(expected, res) - - res = (db.model_query(db.Attribute.name, - db.Attribute.value, db.Attribute.node_uuid). - order_by(db.Attribute.name, db.Attribute.value).all()) - self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), - ('mac', self.macs[0], self.uuid), - ('mac', self.macs[1], self.uuid), - ('mac', self.macs[2], self.uuid)], - [(row.name, row.value, row.node_uuid) for row in res]) - - def test__delete_node(self): - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.finished).save(session) - db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', - value='11:22:11:22:11:22', node_uuid=self.uuid).save( - session) - data = {'s': 'value', 'b': True, 'i': 42} - encoded = json.dumps(data) - db.Option(uuid=self.uuid, name='name', value=encoded).save( - session) - - node_cache._delete_node(self.uuid) - session = db.get_writer_session() - row_node = db.model_query(db.Node).filter_by( - uuid=self.uuid).first() - self.assertIsNone(row_node) - row_attribute = db.model_query(db.Attribute).filter_by( - node_uuid=self.uuid).first() - self.assertIsNone(row_attribute) - row_option = db.model_query(db.Option).filter_by( - uuid=self.uuid).first() - self.assertIsNone(row_option) - - @mock.patch.object(node_cache, '_get_lock_ctx', autospec=True) - @mock.patch.object(node_cache, '_list_node_uuids') - @mock.patch.object(node_cache, '_delete_node') - def test_delete_nodes_not_in_list(self, mock__delete_node, - mock__list_node_uuids, - mock__get_lock_ctx): - uuid2 = uuidutils.generate_uuid() - uuids = {self.uuid} - mock__list_node_uuids.return_value = {self.uuid, uuid2} - session = db.get_writer_session() - with session.begin(): - node_cache.delete_nodes_not_in_list(uuids) - mock__delete_node.assert_called_once_with(uuid2) - mock__get_lock_ctx.assert_called_once_with(uuid2) - mock__get_lock_ctx.return_value.__enter__.assert_called_once_with() - - def test_active_macs(self): - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - values = [('mac', '11:22:11:22:11:22', self.uuid), - ('mac', '22:11:22:11:22:11', self.uuid)] - for value in values: - db.Attribute(uuid=uuidutils.generate_uuid(), name=value[0], - value=value[1], node_uuid=value[2]).save(session) - self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'}, - node_cache.active_macs()) - - def test__list_node_uuids(self): - session = db.get_writer_session() - uuid2 = uuidutils.generate_uuid() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - db.Node(uuid=uuid2, - state=istate.States.starting).save(session) - - node_uuid_list = node_cache._list_node_uuids() - self.assertEqual({self.uuid, uuid2}, node_uuid_list) - - def test_add_attribute(self): - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) - node_info.add_attribute('key', 'value') - res = db.model_query(db.Attribute.name, - db.Attribute.value, - db.Attribute.node_uuid, - session=session) - res = res.order_by(db.Attribute.name, db.Attribute.value).all() - self.assertEqual([('key', 'value', self.uuid)], - [tuple(row) for row in res]) - # check that .attributes got invalidated and reloaded - self.assertEqual({'key': ['value']}, node_info.attributes) - - def test_add_attribute_same_name(self): - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) - - node_info.add_attribute('key', ['foo', 'bar']) - node_info.add_attribute('key', 'baz') - res = db.model_query(db.Attribute.name, db.Attribute.value, - db.Attribute.node_uuid, session=session) - res = res.order_by(db.Attribute.name, db.Attribute.value).all() - self.assertEqual([('key', 'bar', self.uuid), - ('key', 'baz', self.uuid), - ('key', 'foo', self.uuid)], - [tuple(row) for row in res]) - - def test_add_attribute_same_value(self): - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.node.uuid, - state=istate.States.starting).save(session) - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) - - node_info.add_attribute('key', 'value') - node_info.add_attribute('key', 'value') - res = db.model_query(db.Attribute.name, db.Attribute.value, - db.Attribute.node_uuid, session=session) - self.assertEqual([('key', 'value', self.uuid), - ('key', 'value', self.uuid)], - [tuple(row) for row in res]) - - def test_attributes(self): - node_info = node_cache.add_node(self.uuid, - istate.States.starting, - bmc_address='1.2.3.4', - mac=self.macs) - self.assertEqual({'bmc_address': ['1.2.3.4'], - 'mac': self.macs}, - node_info.attributes) - # check invalidation - session = db.get_writer_session() - with session.begin(): - db.Attribute(uuid=uuidutils.generate_uuid(), name='foo', - value='bar', node_uuid=self.uuid).save(session) - # still cached - self.assertEqual({'bmc_address': ['1.2.3.4'], - 'mac': self.macs}, - node_info.attributes) - node_info.invalidate_cache() - self.assertEqual({'bmc_address': ['1.2.3.4'], - 'mac': self.macs, 'foo': ['bar']}, - node_info.attributes) - - -class TestNodeCacheFind(test_base.NodeTest): - def setUp(self): - super(TestNodeCacheFind, self).setUp() - self.macs2 = ['00:00:00:00:00:00'] - node_cache.add_node(self.uuid, - istate.States.starting, - bmc_address='1.2.3.4', - mac=self.macs) - - def test_no_data(self): - self.assertRaises(utils.Error, node_cache.find_node) - self.assertRaises(utils.Error, node_cache.find_node, mac=[]) - - def test_bmc(self): - res = node_cache.find_node(bmc_address='1.2.3.4') - self.assertEqual(self.uuid, res.uuid) - self.assertTrue( - datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - < res.started_at < - datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) - self.assertTrue(res._locked) - - def test_same_bmc_different_macs(self): - uuid2 = uuidutils.generate_uuid() - node_cache.add_node(uuid2, - istate.States.starting, - bmc_address='1.2.3.4', - mac=self.macs2) - res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs) - self.assertEqual(self.uuid, res.uuid) - res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs2) - self.assertEqual(uuid2, res.uuid) - - def test_same_bmc_raises(self): - uuid2 = uuidutils.generate_uuid() - node_cache.add_node(uuid2, - istate.States.starting, - bmc_address='1.2.3.4') - six.assertRaisesRegex(self, utils.Error, 'Multiple nodes', - node_cache.find_node, bmc_address='1.2.3.4') - - def test_macs(self): - res = node_cache.find_node(mac=['11:22:33:33:33:33', self.macs[1]]) - self.assertEqual(self.uuid, res.uuid) - self.assertTrue( - datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - < res.started_at < - datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) - self.assertTrue(res._locked) - - def test_macs_not_found(self): - self.assertRaises(utils.Error, node_cache.find_node, - mac=['11:22:33:33:33:33', - '66:66:44:33:22:11']) - - def test_macs_multiple_found(self): - node_cache.add_node('uuid2', - istate.States.starting, - mac=self.macs2) - self.assertRaises(utils.Error, node_cache.find_node, - mac=[self.macs[0], self.macs2[0]]) - - def test_both(self): - res = node_cache.find_node(bmc_address='1.2.3.4', - mac=self.macs) - self.assertEqual(self.uuid, res.uuid) - self.assertTrue( - datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - < res.started_at < - datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) - self.assertTrue(res._locked) - - def test_inconsistency(self): - session = db.get_writer_session() - with session.begin(): - (db.model_query(db.Node).filter_by(uuid=self.uuid). - delete()) - self.assertRaises(utils.Error, node_cache.find_node, - bmc_address='1.2.3.4') - - def test_already_finished(self): - session = db.get_writer_session() - with session.begin(): - (db.model_query(db.Node).filter_by(uuid=self.uuid). - update({'finished_at': datetime.datetime.utcnow()})) - self.assertRaises(utils.Error, node_cache.find_node, - bmc_address='1.2.3.4') - - -class TestNodeCacheCleanUp(test_base.NodeTest): - def setUp(self): - super(TestNodeCacheCleanUp, self).setUp() - self.started_at = datetime.datetime.utcnow() - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.uuid, - state=istate.States.waiting, - started_at=self.started_at).save( - session) - for v in self.macs: - db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', - value=v, node_uuid=self.uuid).save(session) - db.Option(uuid=self.uuid, name='foo', value='bar').save( - session) - - def test_no_timeout(self): - CONF.set_override('timeout', 0) - - self.assertFalse(node_cache.clean_up()) - - res = [tuple(row) for row in - db.model_query(db.Node.finished_at, - db.Node.error).all()] - self.assertEqual([(None, None)], res) - self.assertEqual(len(self.macs), - db.model_query(db.Attribute).count()) - self.assertEqual(1, db.model_query(db.Option).count()) - - @mock.patch.object(node_cache, '_get_lock', autospec=True) - @mock.patch.object(timeutils, 'utcnow') - def test_ok(self, time_mock, get_lock_mock): - time_mock.return_value = datetime.datetime.utcnow() - - self.assertFalse(node_cache.clean_up()) - - res = [tuple(row) for row in db.model_query( - db.Node.finished_at, db.Node.error).all()] - self.assertEqual([(None, None)], res) - self.assertEqual(len(self.macs), - db.model_query(db.Attribute).count()) - self.assertEqual(1, db.model_query(db.Option).count()) - self.assertFalse(get_lock_mock.called) - - @mock.patch.object(node_cache, '_get_lock', autospec=True) - @mock.patch.object(timeutils, 'utcnow') - def test_timeout(self, time_mock, get_lock_mock): - # Add a finished node to confirm we don't try to timeout it - time_mock.return_value = self.started_at - session = db.get_writer_session() - finished_at = self.started_at + datetime.timedelta(seconds=60) - with session.begin(): - db.Node(uuid=self.uuid + '1', started_at=self.started_at, - state=istate.States.waiting, - finished_at=finished_at).save(session) - CONF.set_override('timeout', 99) - time_mock.return_value = (self.started_at + - datetime.timedelta(seconds=100)) - - self.assertEqual([self.uuid], node_cache.clean_up()) - - res = [(row.state, row.finished_at, row.error) for row in - db.model_query(db.Node).all()] - self.assertEqual( - [(istate.States.error, - self.started_at + datetime.timedelta(seconds=100), - 'Introspection timeout'), - (istate.States.waiting, - self.started_at + datetime.timedelta(seconds=60), None)], - res) - self.assertEqual([], db.model_query(db.Attribute).all()) - self.assertEqual([], db.model_query(db.Option).all()) - get_lock_mock.assert_called_once_with(self.uuid) - get_lock_mock.return_value.acquire.assert_called_once_with() - - @mock.patch.object(node_cache, '_get_lock', autospec=True) - @mock.patch.object(timeutils, 'utcnow') - def test_timeout_active_state(self, time_mock, get_lock_mock): - time_mock.return_value = self.started_at - session = db.get_writer_session() - CONF.set_override('timeout', 1) - for state in [istate.States.starting, istate.States.enrolling, - istate.States.processing, istate.States.reapplying]: - db.model_query(db.Node, session=session).filter_by( - uuid=self.uuid).update({'state': state, 'finished_at': None}) - - current_time = self.started_at + datetime.timedelta(seconds=2) - time_mock.return_value = current_time - - self.assertEqual([self.uuid], node_cache.clean_up()) - - res = [(row.state, row.finished_at, row.error) for row in - db.model_query(db.Node).all()] - self.assertEqual( - [(istate.States.error, current_time, 'Introspection timeout')], - res) - - def test_old_status(self): - CONF.set_override('node_status_keep_time', 42) - session = db.get_writer_session() - with session.begin(): - db.model_query(db.Node).update( - {'finished_at': (datetime.datetime.utcnow() - - datetime.timedelta(seconds=100))}) - - self.assertEqual([], node_cache.clean_up()) - - self.assertEqual([], db.model_query(db.Node).all()) - - def test_old_status_disabled(self): - # Status clean up is disabled by default - session = db.get_writer_session() - with session.begin(): - db.model_query(db.Node).update( - {'finished_at': (datetime.datetime.utcnow() - - datetime.timedelta(days=10000))}) - - self.assertEqual([], node_cache.clean_up()) - - self.assertNotEqual([], db.model_query(db.Node).all()) - - -class TestNodeCacheGetNode(test_base.NodeTest): - def test_ok(self): - started_at = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=42)) - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.uuid, - state=istate.States.starting, - started_at=started_at).save(session) - info = node_cache.get_node(self.uuid) - - self.assertEqual(self.uuid, info.uuid) - self.assertEqual(started_at, info.started_at) - self.assertIsNone(info.finished_at) - self.assertIsNone(info.error) - self.assertFalse(info._locked) - - def test_locked(self): - started_at = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=42)) - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.uuid, - state=istate.States.starting, - started_at=started_at).save(session) - info = node_cache.get_node(self.uuid, locked=True) - - self.assertEqual(self.uuid, info.uuid) - self.assertEqual(started_at, info.started_at) - self.assertIsNone(info.finished_at) - self.assertIsNone(info.error) - self.assertTrue(info._locked) - - def test_not_found(self): - self.assertRaises(utils.Error, node_cache.get_node, - uuidutils.generate_uuid()) - - def test_with_name(self): - started_at = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=42)) - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.uuid, - state=istate.States.starting, - started_at=started_at).save(session) - ironic = mock.Mock() - ironic.node.get.return_value = self.node - - info = node_cache.get_node('name', ironic=ironic) - - self.assertEqual(self.uuid, info.uuid) - self.assertEqual(started_at, info.started_at) - self.assertIsNone(info.finished_at) - self.assertIsNone(info.error) - self.assertFalse(info._locked) - ironic.node.get.assert_called_once_with('name') - - -@mock.patch.object(timeutils, 'utcnow', lambda: datetime.datetime(1, 1, 1)) -class TestNodeInfoFinished(test_base.NodeTest): - def setUp(self): - super(TestNodeInfoFinished, self).setUp() - node_cache.add_node(self.uuid, - istate.States.processing, - bmc_address='1.2.3.4', - mac=self.macs) - self.node_info = node_cache.NodeInfo( - uuid=self.uuid, started_at=datetime.datetime(3, 1, 4)) - session = db.get_writer_session() - with session.begin(): - db.Option(uuid=self.uuid, name='foo', value='bar').save( - session) - - def test_success(self): - self.node_info.finished() - - session = db.get_writer_session() - with session.begin(): - self.assertEqual((datetime.datetime(1, 1, 1), None), - tuple(db.model_query( - db.Node.finished_at, - db.Node.error).first())) - self.assertEqual([], db.model_query(db.Attribute, - session=session).all()) - self.assertEqual([], db.model_query(db.Option, - session=session).all()) - - def test_error(self): - self.node_info.finished(error='boom') - - self.assertEqual((datetime.datetime(1, 1, 1), 'boom'), - tuple(db.model_query(db.Node.finished_at, - db.Node.error).first())) - self.assertEqual([], db.model_query(db.Attribute).all()) - self.assertEqual([], db.model_query(db.Option).all()) - - def test_release_lock(self): - self.node_info.acquire_lock() - self.node_info.finished() - self.assertFalse(self.node_info._locked) - - -class TestNodeInfoOptions(test_base.NodeTest): - def setUp(self): - super(TestNodeInfoOptions, self).setUp() - node_cache.add_node(self.uuid, - istate.States.starting, - bmc_address='1.2.3.4', - mac=self.macs) - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) - session = db.get_writer_session() - with session.begin(): - db.Option(uuid=self.uuid, name='foo', value='"bar"').save( - session) - - def test_get(self): - self.assertEqual({'foo': 'bar'}, self.node_info.options) - # should be cached - self.assertEqual(self.node_info.options, self.node_info.options) - # invalidate cache - old_options = self.node_info.options - self.node_info.invalidate_cache() - self.assertIsNot(old_options, self.node_info.options) - self.assertEqual(old_options, self.node_info.options) - - def test_set(self): - data = {'s': 'value', 'b': True, 'i': 42} - self.node_info.set_option('name', data) - self.assertEqual(data, self.node_info.options['name']) - - new = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) - self.assertEqual(data, new.options['name']) - - -@mock.patch.object(ir_utils, 'get_client', autospec=True) -class TestNodeCacheIronicObjects(unittest.TestCase): - def setUp(self): - super(TestNodeCacheIronicObjects, self).setUp() - self.ports = {'mac1': mock.Mock(address='mac1', spec=['address']), - 'mac2': mock.Mock(address='mac2', spec=['address'])} - self.uuid = uuidutils.generate_uuid() - - def test_node_provided(self, mock_ironic): - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=mock.sentinel.node) - self.assertIs(mock.sentinel.node, node_info.node()) - self.assertFalse(mock_ironic.called) - - def test_node_not_provided(self, mock_ironic): - mock_ironic.return_value.node.get.return_value = mock.sentinel.node - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0) - - self.assertIs(mock.sentinel.node, node_info.node()) - self.assertIs(node_info.node(), node_info.node()) - - mock_ironic.assert_called_once_with() - mock_ironic.return_value.node.get.assert_called_once_with(self.uuid) - - def test_node_ironic_preset(self, mock_ironic): - mock_ironic2 = mock.Mock() - mock_ironic2.node.get.return_value = mock.sentinel.node - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - ironic=mock_ironic2) - self.assertIs(mock.sentinel.node, node_info.node()) - - self.assertFalse(mock_ironic.called) - mock_ironic2.node.get.assert_called_once_with(self.uuid) - - def test_ports_provided(self, mock_ironic): - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - ports=self.ports) - self.assertIs(self.ports, node_info.ports()) - self.assertFalse(mock_ironic.called) - - def test_ports_provided_list(self, mock_ironic): - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - ports=list(self.ports.values())) - self.assertEqual(self.ports, node_info.ports()) - self.assertFalse(mock_ironic.called) - - def test_ports_not_provided(self, mock_ironic): - mock_ironic.return_value.node.list_ports.return_value = list( - self.ports.values()) - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0) - - self.assertEqual(self.ports, node_info.ports()) - self.assertIs(node_info.ports(), node_info.ports()) - - mock_ironic.assert_called_once_with() - mock_ironic.return_value.node.list_ports.assert_called_once_with( - self.uuid, limit=0, detail=True) - - def test_ports_ironic_preset(self, mock_ironic): - mock_ironic2 = mock.Mock() - mock_ironic2.node.list_ports.return_value = list( - self.ports.values()) - node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - ironic=mock_ironic2) - self.assertEqual(self.ports, node_info.ports()) - - self.assertFalse(mock_ironic.called) - mock_ironic2.node.list_ports.assert_called_once_with( - self.uuid, limit=0, detail=True) - - -class TestUpdate(test_base.NodeTest): - def setUp(self): - super(TestUpdate, self).setUp() - self.ironic = mock.Mock() - self.ports = {'mac%d' % i: mock.Mock(address='mac%d' % i, uuid=str(i)) - for i in range(2)} - self.node_info = node_cache.NodeInfo(uuid=self.uuid, - started_at=0, - node=self.node, - ports=self.ports, - ironic=self.ironic) - - def test_patch(self): - self.ironic.node.update.return_value = mock.sentinel.node - - self.node_info.patch([{'patch': 'patch'}]) - - self.ironic.node.update.assert_called_once_with(self.uuid, - [{'patch': 'patch'}]) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_patch_path_wo_leading_slash(self): - self.ironic.node.update.return_value = mock.sentinel.node - - patch = [{'op': 'add', 'path': 'driver_info/test', 'value': 42}] - expected_patch = copy.deepcopy(patch) - expected_patch[0]['path'] = '/' + 'driver_info/test' - - self.node_info.patch(patch) - - self.ironic.node.update.assert_called_once_with(self.uuid, - expected_patch) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_patch_path_with_leading_slash(self): - self.ironic.node.update.return_value = mock.sentinel.node - - patch = [{'op': 'add', 'path': '/driver_info/test', 'value': 42}] - - self.node_info.patch(patch) - - self.ironic.node.update.assert_called_once_with(self.uuid, patch) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_update_properties(self): - self.ironic.node.update.return_value = mock.sentinel.node - - self.node_info.update_properties(prop=42) - - patch = [{'op': 'add', 'path': '/properties/prop', 'value': 42}] - self.ironic.node.update.assert_called_once_with(self.uuid, patch) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_update_capabilities(self): - self.ironic.node.update.return_value = mock.sentinel.node - self.node.properties['capabilities'] = 'foo:bar,x:y' - - self.node_info.update_capabilities(x=1, y=2) - - self.ironic.node.update.assert_called_once_with(self.uuid, mock.ANY) - patch = self.ironic.node.update.call_args[0][1] - new_caps = ir_utils.capabilities_to_dict(patch[0]['value']) - self.assertEqual({'foo': 'bar', 'x': '1', 'y': '2'}, new_caps) - - def test_replace_field(self): - self.ironic.node.update.return_value = mock.sentinel.node - self.node.extra['foo'] = 'bar' - - self.node_info.replace_field('/extra/foo', lambda v: v + '1') - - patch = [{'op': 'replace', 'path': '/extra/foo', 'value': 'bar1'}] - self.ironic.node.update.assert_called_once_with(self.uuid, patch) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_replace_field_not_found(self): - self.ironic.node.update.return_value = mock.sentinel.node - - self.assertRaises(KeyError, self.node_info.replace_field, - '/extra/foo', lambda v: v + '1') - - def test_replace_field_with_default(self): - self.ironic.node.update.return_value = mock.sentinel.node - - self.node_info.replace_field('/extra/foo', lambda v: v + [42], - default=[]) - - patch = [{'op': 'add', 'path': '/extra/foo', 'value': [42]}] - self.ironic.node.update.assert_called_once_with(self.uuid, patch) - self.assertIs(mock.sentinel.node, self.node_info.node()) - - def test_replace_field_same_value(self): - self.ironic.node.update.return_value = mock.sentinel.node - self.node.extra['foo'] = 'bar' - - self.node_info.replace_field('/extra/foo', lambda v: v) - self.assertFalse(self.ironic.node.update.called) - - def test_patch_port(self): - self.ironic.port.update.return_value = mock.sentinel.port - - self.node_info.patch_port(self.ports['mac0'], ['patch']) - - self.ironic.port.update.assert_called_once_with('0', ['patch']) - self.assertIs(mock.sentinel.port, - self.node_info.ports()['mac0']) - - def test_patch_port_by_mac(self): - self.ironic.port.update.return_value = mock.sentinel.port - - self.node_info.patch_port('mac0', ['patch']) - - self.ironic.port.update.assert_called_once_with('0', ['patch']) - self.assertIs(mock.sentinel.port, - self.node_info.ports()['mac0']) - - def test_delete_port(self): - self.node_info.delete_port(self.ports['mac0']) - - self.ironic.port.delete.assert_called_once_with('0') - self.assertEqual(['mac1'], list(self.node_info.ports())) - - def test_delete_port_by_mac(self): - self.node_info.delete_port('mac0') - - self.ironic.port.delete.assert_called_once_with('0') - self.assertEqual(['mac1'], list(self.node_info.ports())) - - @mock.patch.object(node_cache.LOG, 'warning', autospec=True) - def test_create_ports(self, mock_warn): - ports = [ - 'mac2', - {'mac': 'mac3', 'client_id': '42', 'pxe': False}, - {'mac': 'mac4', 'pxe': True} - ] - - self.node_info.create_ports(ports) - self.assertEqual({'mac0', 'mac1', 'mac2', 'mac3', 'mac4'}, - set(self.node_info.ports())) - - create_calls = [ - mock.call(node_uuid=self.uuid, address='mac2', extra={}, - pxe_enabled=True), - mock.call(node_uuid=self.uuid, address='mac3', - extra={'client-id': '42'}, pxe_enabled=False), - mock.call(node_uuid=self.uuid, address='mac4', extra={}, - pxe_enabled=True), - ] - self.assertEqual(create_calls, self.ironic.port.create.call_args_list) - # No conflicts - cache was not cleared - no calls to port.list - self.assertFalse(mock_warn.called) - self.assertFalse(self.ironic.port.list.called) - - @mock.patch.object(node_cache.LOG, 'info', autospec=True) - def test__create_port(self, mock_info): - uuid = uuidutils.generate_uuid() - address = 'mac1' - self.ironic.port.create.return_value = mock.Mock(uuid=uuid, - address=address) - - self.node_info._create_port(address, client_id='42') - - self.ironic.port.create.assert_called_once_with( - node_uuid=self.uuid, address='mac1', client_id='42') - mock_info.assert_called_once_with( - mock.ANY, {'uuid': uuid, 'mac': address, - 'attrs': {'client_id': '42'}}, - node_info=self.node_info) - - @mock.patch.object(node_cache.LOG, 'warning', autospec=True) - def test_create_ports_with_conflicts(self, mock_warn): - self.ironic.port.create.return_value = mock.Mock( - uuid='fake', address='mac') - - ports = [ - 'mac', - {'mac': 'mac0'}, - 'mac1', - {'mac': 'mac2', 'client_id': '42', 'pxe': False}, - ] - - self.node_info.create_ports(ports) - - create_calls = [ - mock.call(node_uuid=self.uuid, address='mac', extra={}, - pxe_enabled=True), - mock.call(node_uuid=self.uuid, address='mac2', - extra={'client-id': '42'}, pxe_enabled=False), - ] - self.assertEqual(create_calls, self.ironic.port.create.call_args_list) - mock_warn.assert_called_once_with(mock.ANY, ['mac0', 'mac1'], - node_info=self.node_info) - - -class TestNodeCacheGetByPath(test_base.NodeTest): - def setUp(self): - super(TestNodeCacheGetByPath, self).setUp() - self.node = mock.Mock(spec=['uuid', 'properties'], - properties={'answer': 42}, - uuid=self.uuid) - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=self.node) - - def test_get_by_path(self): - self.assertEqual(self.uuid, self.node_info.get_by_path('/uuid')) - self.assertEqual(self.uuid, self.node_info.get_by_path('uuid')) - self.assertEqual(42, self.node_info.get_by_path('/properties/answer')) - self.assertRaises(KeyError, self.node_info.get_by_path, '/foo') - self.assertRaises(KeyError, self.node_info.get_by_path, '/extra/foo') - - -@mock.patch.object(node_cache, '_get_lock', autospec=True) -class TestLock(test_base.NodeTest): - def test_acquire(self, get_lock_mock): - node_info = node_cache.NodeInfo(self.uuid) - self.assertFalse(node_info._locked) - get_lock_mock.assert_called_once_with(self.uuid) - self.assertFalse(get_lock_mock.return_value.acquire.called) - - self.assertTrue(node_info.acquire_lock()) - self.assertTrue(node_info._locked) - self.assertTrue(node_info.acquire_lock()) - self.assertTrue(node_info._locked) - get_lock_mock.return_value.acquire.assert_called_once_with(True) - - def test_release(self, get_lock_mock): - node_info = node_cache.NodeInfo(self.uuid) - node_info.acquire_lock() - self.assertTrue(node_info._locked) - node_info.release_lock() - self.assertFalse(node_info._locked) - node_info.release_lock() - self.assertFalse(node_info._locked) - get_lock_mock.return_value.acquire.assert_called_once_with(True) - get_lock_mock.return_value.release.assert_called_once_with() - - def test_acquire_non_blocking(self, get_lock_mock): - node_info = node_cache.NodeInfo(self.uuid) - self.assertFalse(node_info._locked) - get_lock_mock.return_value.acquire.side_effect = iter([False, True]) - - self.assertFalse(node_info.acquire_lock(blocking=False)) - self.assertFalse(node_info._locked) - self.assertTrue(node_info.acquire_lock(blocking=False)) - self.assertTrue(node_info._locked) - self.assertTrue(node_info.acquire_lock(blocking=False)) - self.assertTrue(node_info._locked) - get_lock_mock.return_value.acquire.assert_called_with(False) - self.assertEqual(2, get_lock_mock.return_value.acquire.call_count) - - -@mock.patch.object(node_cache, 'add_node', autospec=True) -@mock.patch.object(ir_utils, 'get_client', autospec=True) -class TestNodeCreate(test_base.NodeTest): - def setUp(self): - super(TestNodeCreate, self).setUp() - self.mock_client = mock.Mock() - - def test_default_create(self, mock_get_client, mock_add_node): - mock_get_client.return_value = self.mock_client - self.mock_client.node.create.return_value = self.node - - node_cache.create_node('fake') - - self.mock_client.node.create.assert_called_once_with(driver='fake') - mock_add_node.assert_called_once_with( - self.node.uuid, - istate.States.enrolling, - ironic=self.mock_client) - - def test_create_with_args(self, mock_get_client, mock_add_node): - mock_get_client.return_value = self.mock_client - self.mock_client.node.create.return_value = self.node - - node_cache.create_node('agent_ipmitool', ironic=self.mock_client) - - self.assertFalse(mock_get_client.called) - self.mock_client.node.create.assert_called_once_with( - driver='agent_ipmitool') - mock_add_node.assert_called_once_with( - self.node.uuid, - istate.States.enrolling, - ironic=self.mock_client) - - def test_create_client_error(self, mock_get_client, mock_add_node): - mock_get_client.return_value = self.mock_client - self.mock_client.node.create.side_effect = ( - node_cache.exceptions.InvalidAttribute) - - node_cache.create_node('fake') - - mock_get_client.assert_called_once_with() - self.mock_client.node.create.assert_called_once_with(driver='fake') - self.assertFalse(mock_add_node.called) - - -class TestNodeCacheListNode(test_base.NodeTest): - def setUp(self): - super(TestNodeCacheListNode, self).setUp() - self.uuid2 = uuidutils.generate_uuid() - session = db.get_writer_session() - with session.begin(): - db.Node(uuid=self.uuid, - started_at=datetime.datetime(1, 1, 2)).save(session) - db.Node(uuid=self.uuid2, started_at=datetime.datetime(1, 1, 1), - finished_at=datetime.datetime(1, 1, 3)).save(session) - - # mind please node(self.uuid).started_at > node(self.uuid2).started_at - # and the result ordering is strict in node_cache.get_node_list newer first - - def test_list_node(self): - nodes = node_cache.get_node_list() - - self.assertEqual([self.uuid, self.uuid2], - [node.uuid for node in nodes]) - - def test_list_node_limit(self): - nodes = node_cache.get_node_list(limit=1) - self.assertEqual([self.uuid], [node.uuid for node in nodes]) - - def test_list_node_marker(self): - # get nodes started_at after node(self.uuid) - nodes = node_cache.get_node_list(marker=self.uuid) - self.assertEqual([self.uuid2], [node.uuid for node in nodes]) - - def test_list_node_wrong_marker(self): - self.assertRaises(utils.Error, node_cache.get_node_list, - marker='foo-bar') - - -class TestNodeInfoVersionId(test_base.NodeStateTest): - def test_get(self): - self.node_info._version_id = None - self.assertEqual(self.db_node.version_id, self.node_info.version_id) - - def test_get_missing_uuid(self): - self.node_info.uuid = 'foo' - self.node_info._version_id = None - - def func(): - return self.node_info.version_id - - six.assertRaisesRegex(self, utils.NotFoundInCacheError, '.*', func) - - def test_set(self): - with db.ensure_transaction() as session: - self.node_info._set_version_id(uuidutils.generate_uuid(), - session) - row = db.model_query(db.Node).get(self.node_info.uuid) - self.assertEqual(self.node_info.version_id, row.version_id) - - def test_set_race(self): - with db.ensure_transaction() as session: - row = db.model_query(db.Node, session=session).get( - self.node_info.uuid) - row.update({'version_id': uuidutils.generate_uuid()}) - row.save(session) - - six.assertRaisesRegex(self, utils.NodeStateRaceCondition, - 'Node state mismatch', self.node_info._set_state, - istate.States.finished) - - -class TestNodeInfoState(test_base.NodeStateTest): - def test_get(self): - self.node_info._state = None - self.assertEqual(self.db_node.state, self.node_info.state) - - def test_set(self): - self.node_info._set_state(istate.States.finished) - row = db.model_query(db.Node).get(self.node_info.uuid) - self.assertEqual(self.node_info.state, row.state) - - def test_set_invalid_state(self): - six.assertRaisesRegex(self, oslo_db.exception.DBError, - 'constraint failed', - self.node_info._set_state, 'foo') - - def test_commit(self): - current_time = timeutils.utcnow() - self.node_info.started_at = self.node_info.finished_at = current_time - self.node_info.error = "Boo!" - self.node_info.commit() - - row = db.model_query(db.Node).get(self.node_info.uuid) - self.assertEqual(self.node_info.started_at, row.started_at) - self.assertEqual(self.node_info.finished_at, row.finished_at) - self.assertEqual(self.node_info.error, row.error) - - -class TestNodeInfoStateFsm(test_base.NodeStateTest): - def test__get_fsm(self): - self.node_info._fsm = None - fsm = self.node_info._get_fsm() - self.assertEqual(self.node_info.state, fsm.current_state) - - def test__get_fsm_invalid_state(self): - self.node_info._fsm = None - self.node_info._state = 'foo' - six.assertRaisesRegex(self, automaton.exceptions.NotFound, - '.*undefined state.*', - self.node_info._get_fsm) - - def test__fsm_ctx_set_state(self): - with self.node_info._fsm_ctx() as fsm: - fsm.process_event(istate.Events.wait) - self.assertEqual(self.node_info.state, istate.States.starting) - self.assertEqual(self.node_info.state, istate.States.waiting) - - def test__fsm_ctx_set_same_state(self): - version_id = self.node_info.version_id - with self.node_info._fsm_ctx() as fsm: - fsm.initialize(self.node_info.state) - self.assertEqual(version_id, self.node_info.version_id) - - def test__fsm_ctx_illegal_event(self): - with self.node_info._fsm_ctx() as fsm: - six.assertRaisesRegex(self, automaton.exceptions.NotFound, - 'no defined transition', fsm.process_event, - istate.Events.finish) - self.assertEqual(self.node_info.state, istate.States.starting) - - def test__fsm_ctx_generic_exception(self): - class CustomException(Exception): - pass - - def func(fsm): - fsm.process_event(istate.Events.wait) - raise CustomException('Oops') - - with self.node_info._fsm_ctx() as fsm: - self.assertRaises(CustomException, func, fsm) - self.assertEqual(self.node_info.state, istate.States.waiting) - - def test_fsm_event(self): - self.node_info.fsm_event(istate.Events.wait) - self.assertEqual(self.node_info.state, istate.States.waiting) - - def test_fsm_illegal_event(self): - six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, - 'no defined transition', - self.node_info.fsm_event, istate.Events.finish) - self.assertEqual(self.node_info.state, istate.States.starting) - - def test_fsm_illegal_strict_event(self): - six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, - 'no defined transition', - self.node_info.fsm_event, - istate.Events.finish, strict=True) - self.assertIn('no defined transition', self.node_info.error) - self.assertEqual(self.node_info.state, istate.States.error) - - -class TestFsmEvent(test_base.NodeStateTest): - def test_event_before(self): - @node_cache.fsm_event_before(istate.Events.wait) - def function(node_info): - self.assertEqual(node_info.state, istate.States.waiting) - node_info.fsm_event(istate.Events.process) - - function(self.node_info) - self.assertEqual(self.node_info.state, istate.States.processing) - - def test_event_after(self): - @node_cache.fsm_event_after(istate.Events.process) - def function(node_info): - node_info.fsm_event(istate.Events.wait) - self.assertEqual(node_info.state, istate.States.waiting) - - function(self.node_info) - self.assertEqual(self.node_info.state, istate.States.processing) - - @mock.patch.object(node_cache, 'LOG', autospec=True) - def test_triggers_fsm_error_transition_no_errors(self, log_mock): - class CustomException(Exception): - pass - - @node_cache.triggers_fsm_error_transition(no_errors=(CustomException,)) - def function(node_info): - self.assertEqual(node_info.state, istate.States.starting) - raise CustomException('Oops') - - function(self.node_info) - log_msg = ('Not processing error event for the exception: ' - '%(exc)s raised by %(func)s') - log_mock.debug.assert_called_with(log_msg, mock.ANY, - node_info=mock.ANY) - self.assertEqual(self.node_info.state, istate.States.starting) - - def test_triggers_fsm_error_transition_no_errors_empty(self): - class CustomException(Exception): - pass - - @node_cache.triggers_fsm_error_transition(no_errors=()) - def function(node_info): - self.assertEqual(node_info.state, istate.States.starting) - raise CustomException('Oops!') - - # assert an error event was performed - self.assertRaises(CustomException, function, self.node_info) - self.assertEqual(self.node_info.state, istate.States.error) - - def test_triggers_fsm_error_transition_no_errors_with_error(self): - class CustomException(Exception): - pass - - @node_cache.triggers_fsm_error_transition(errors=(CustomException,)) - def function(node_info): - self.assertEqual(node_info.state, istate.States.starting) - raise CustomException('Oops') - - # assert a generic error triggers an error event - self.assertRaises(CustomException, function, self.node_info) - self.assertEqual(self.node_info.state, istate.States.error) - - def test_triggers_fsm_error_transition_erros_masked(self): - class CustomException(Exception): - pass - - @node_cache.triggers_fsm_error_transition(errors=()) - def function(node_info): - self.assertEqual(node_info.state, istate.States.starting) - raise CustomException('Oops') - - # assert no error event was triggered - self.assertRaises(CustomException, function, self.node_info) - self.assertEqual(self.node_info.state, istate.States.starting) - - def test_unlock(self): - @node_cache.release_lock - def func(node_info): - self.assertTrue(node_info._locked) - - self.node_info.acquire_lock(blocking=True) - with mock.patch.object(self.node_info, 'release_lock', - autospec=True) as release_lock_mock: - func(self.node_info) - release_lock_mock.assert_called_once_with() - - def test_unlock_unlocked(self): - @node_cache.release_lock - def func(node_info): - self.assertFalse(node_info._locked) - - self.node_info.release_lock() - with mock.patch.object(self.node_info, 'release_lock', - autospec=True) as release_lock_mock: - func(self.node_info) - self.assertEqual(0, release_lock_mock.call_count) - - @mock.patch.object(node_cache, 'triggers_fsm_error_transition', - autospec=True) - @mock.patch.object(node_cache, 'fsm_event_after', autospec=True) - def test_fsm_transition(self, fsm_event_after_mock, trigger_mock): - @node_cache.fsm_transition(istate.Events.finish) - def func(): - pass - fsm_event_after_mock.assert_called_once_with(istate.Events.finish) - trigger_mock.assert_called_once_with() - - @mock.patch.object(node_cache, 'triggers_fsm_error_transition', - autospec=True) - @mock.patch.object(node_cache, 'fsm_event_before', autospec=True) - def test_nonreentrant_fsm_transition(self, fsm_event_before_mock, - trigger_mock): - @node_cache.fsm_transition(istate.Events.abort, reentrant=False) - def func(): - pass - fsm_event_before_mock.assert_called_once_with(istate.Events.abort, - strict=True) - trigger_mock.assert_called_once_with() - - -@mock.patch.object(node_cache, 'add_node', autospec=True) -@mock.patch.object(node_cache, 'NodeInfo', autospec=True) -class TestStartIntrospection(test_base.NodeTest): - def prepare_mocks(fn): - @six.wraps(fn) - def inner(self, NodeMock, *args): - method_mock = mock.Mock() - NodeMock.return_value = self.node_info - self.node_info.fsm_event = method_mock - fn(self, method_mock, *args) - method_mock.assert_called_once_with(istate.Events.start) - return inner - - @prepare_mocks - def test_node_in_db_ok_state(self, fsm_event_mock, add_node_mock): - def side_effect(*args): - self.node_info._state = 'foo' - - fsm_event_mock.side_effect = side_effect - node_cache.start_introspection(self.node.uuid) - add_node_mock.assert_called_once_with(self.node_info.uuid, 'foo') - - @prepare_mocks - def test_node_in_db_invalid_state(self, fsm_event_mock, add_node_mock): - fsm_event_mock.side_effect = utils.NodeStateInvalidEvent('Oops!') - six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, 'Oops!', - node_cache.start_introspection, - self.node_info.uuid) - self.assertFalse(add_node_mock.called) - - @prepare_mocks - def test_node_in_db_race_condition(self, fsm_event_mock, add_node_mock): - fsm_event_mock.side_effect = utils.NodeStateRaceCondition() - six.assertRaisesRegex(self, utils.NodeStateRaceCondition, '.*', - node_cache.start_introspection, - self.node_info.uuid) - self.assertFalse(add_node_mock.called) - - @prepare_mocks - def test_error_fsm_event(self, fsm_event_mock, add_node_mock): - fsm_event_mock.side_effect = utils.Error('Oops!') - six.assertRaisesRegex(self, utils.Error, 'Oops!', - node_cache.start_introspection, - self.node_info.uuid) - self.assertFalse(add_node_mock.called) - - @prepare_mocks - def test_node_not_in_db(self, fsm_event_mock, add_node_mock): - fsm_event_mock.side_effect = utils.NotFoundInCacheError('Oops!') - node_cache.start_introspection(self.node_info.uuid) - add_node_mock.assert_called_once_with(self.node_info.uuid, - istate.States.starting) - - @prepare_mocks - def test_custom_exc_fsm_event(self, fsm_event_mock, add_node_mock): - class CustomError(Exception): - pass - - fsm_event_mock.side_effect = CustomError('Oops!') - six.assertRaisesRegex(self, CustomError, 'Oops!', - node_cache.start_introspection, - self.node_info.uuid) - self.assertFalse(add_node_mock.called) diff --git a/ironic_inspector/test/unit/test_plugins_base.py b/ironic_inspector/test/unit/test_plugins_base.py deleted file mode 100644 index 0eb5a59..0000000 --- a/ironic_inspector/test/unit/test_plugins_base.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections - -import mock - -from ironic_inspector.plugins import base -from ironic_inspector.test import base as test_base - - -class WithValidation(base.WithValidation): - REQUIRED_PARAMS = {'x'} - OPTIONAL_PARAMS = {'y', 'z'} - - -class TestWithValidation(test_base.BaseTest): - def setUp(self): - super(TestWithValidation, self).setUp() - self.test = WithValidation() - - def test_ok(self): - for x in (1, 0, '', False, True): - self.test.validate({'x': x}) - self.test.validate({'x': 'x', 'y': 42}) - self.test.validate({'x': 'x', 'y': 42, 'z': False}) - - def test_required_missing(self): - err_re = 'missing required parameter\(s\): x' - self.assertRaisesRegex(ValueError, err_re, self.test.validate, {}) - self.assertRaisesRegex(ValueError, err_re, self.test.validate, - {'x': None}) - self.assertRaisesRegex(ValueError, err_re, self.test.validate, - {'y': 1, 'z': 2}) - - def test_unexpected(self): - self.assertRaisesRegex(ValueError, 'unexpected parameter\(s\): foo', - self.test.validate, {'foo': 'bar', 'x': 42}) - - -fake_ext = collections.namedtuple('Extension', ['name', 'obj']) - - -@mock.patch.object(base, 'processing_hooks_manager', autospec=True) -class TestValidateProcessingHooks(test_base.BaseTest): - def test_ok(self, mock_mgr): - mock_mgr.return_value = [ - fake_ext(name='1', obj=mock.Mock(dependencies=[])), - fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), - fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), - ] - - hooks = base.validate_processing_hooks() - self.assertEqual(mock_mgr.return_value, hooks) - mock_mgr.assert_called_once_with() - - def test_broken_dependencies(self, mock_mgr): - mock_mgr.return_value = [ - fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), - fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), - ] - - self.assertRaisesRegex(RuntimeError, "missing: 1", - base.validate_processing_hooks) - - def test_self_dependency(self, mock_mgr): - mock_mgr.return_value = [ - fake_ext(name='1', obj=mock.Mock(dependencies=['1'])), - ] - - self.assertRaisesRegex(RuntimeError, "missing: 1", - base.validate_processing_hooks) - - def test_wrong_dependencies_order(self, mock_mgr): - mock_mgr.return_value = [ - fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), - fake_ext(name='1', obj=mock.Mock(dependencies=[])), - fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), - ] - - self.assertRaisesRegex(RuntimeError, "missing: 1", - base.validate_processing_hooks) diff --git a/ironic_inspector/test/unit/test_plugins_capabilities.py b/ironic_inspector/test/unit/test_plugins_capabilities.py deleted file mode 100644 index 41eafaf..0000000 --- a/ironic_inspector/test/unit/test_plugins_capabilities.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from oslo_config import cfg - -from ironic_inspector import node_cache -from ironic_inspector.plugins import base -from ironic_inspector.plugins import capabilities -from ironic_inspector.test import base as test_base - - -CONF = cfg.CONF - - -@mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) -class TestCapabilitiesHook(test_base.NodeTest): - hook = capabilities.CapabilitiesHook() - - def test_loadable_by_name(self, mock_caps): - base.CONF.set_override('processing_hooks', 'capabilities', - 'processing') - ext = base.processing_hooks_manager()['capabilities'] - self.assertIsInstance(ext.obj, capabilities.CapabilitiesHook) - - def test_no_data(self, mock_caps): - self.hook.before_update(self.data, self.node_info) - self.assertFalse(mock_caps.called) - - def test_boot_mode(self, mock_caps): - CONF.set_override('boot_mode', True, 'capabilities') - self.inventory['boot'] = {'current_boot_mode': 'uefi'} - - self.hook.before_update(self.data, self.node_info) - mock_caps.assert_called_once_with(self.node_info, boot_mode='uefi') - - def test_boot_mode_disabled(self, mock_caps): - self.inventory['boot'] = {'current_boot_mode': 'uefi'} - - self.hook.before_update(self.data, self.node_info) - self.assertFalse(mock_caps.called) - - def test_cpu_flags(self, mock_caps): - self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse', 'smx'] - - self.hook.before_update(self.data, self.node_info) - mock_caps.assert_called_once_with(self.node_info, - cpu_vt='true', - cpu_hugepages='true', - cpu_txt='true', - cpu_aes='true') - - def test_cpu_no_known_flags(self, mock_caps): - self.inventory['cpu']['flags'] = ['fpu'] - - self.hook.before_update(self.data, self.node_info) - self.assertFalse(mock_caps.called) - - def test_cpu_flags_custom(self, mock_caps): - CONF.set_override('cpu_flags', {'fpu': 'new_cap'}, - 'capabilities') - self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse'] - - self.hook.before_update(self.data, self.node_info) - mock_caps.assert_called_once_with(self.node_info, - new_cap='true') diff --git a/ironic_inspector/test/unit/test_plugins_discovery.py b/ironic_inspector/test/unit/test_plugins_discovery.py deleted file mode 100644 index 656ba07..0000000 --- a/ironic_inspector/test/unit/test_plugins_discovery.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import mock - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import node_cache -from ironic_inspector.plugins import discovery -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - - -def copy_call_args(mock_arg): - new_mock = mock.Mock() - - def side_effect(*args, **kwargs): - args = copy.deepcopy(args) - kwargs = copy.deepcopy(kwargs) - new_mock(*args, **kwargs) - return mock.DEFAULT - mock_arg.side_effect = side_effect - return new_mock - - -class TestEnrollNodeNotFoundHook(test_base.NodeTest): - def setUp(self): - super(TestEnrollNodeNotFoundHook, self).setUp() - self.ironic = mock.MagicMock() - - @mock.patch.object(node_cache, 'create_node', autospec=True) - @mock.patch.object(ir_utils, 'get_client', autospec=True) - @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) - def test_enroll_default(self, mock_check_existing, mock_client, - mock_create_node): - mock_client.return_value = self.ironic - introspection_data = {'test': 'test'} - - discovery.enroll_node_not_found_hook(introspection_data) - - mock_create_node.assert_called_once_with('fake', ironic=self.ironic, - driver_info={}) - mock_check_existing.assert_called_once_with( - introspection_data, {}, self.ironic) - - @mock.patch.object(node_cache, 'create_node', autospec=True) - @mock.patch.object(ir_utils, 'get_client', autospec=True) - @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) - def test_enroll_with_ipmi_address(self, mock_check_existing, mock_client, - mock_create_node): - mock_client.return_value = self.ironic - introspection_data = {'ipmi_address': '1.2.3.4'} - expected_data = introspection_data.copy() - mock_check_existing = copy_call_args(mock_check_existing) - - discovery.enroll_node_not_found_hook(introspection_data) - - mock_create_node.assert_called_once_with( - 'fake', ironic=self.ironic, - driver_info={'ipmi_address': '1.2.3.4'}) - mock_check_existing.assert_called_once_with( - expected_data, {'ipmi_address': '1.2.3.4'}, self.ironic) - self.assertEqual({'ipmi_address': '1.2.3.4', 'auto_discovered': True}, - introspection_data) - - @mock.patch.object(node_cache, 'create_node', autospec=True) - @mock.patch.object(ir_utils, 'get_client', autospec=True) - @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) - def test_enroll_with_non_default_driver(self, mock_check_existing, - mock_client, mock_create_node): - mock_client.return_value = self.ironic - discovery.CONF.set_override('enroll_node_driver', 'fake2', - 'discovery') - mock_check_existing = copy_call_args(mock_check_existing) - introspection_data = {} - - discovery.enroll_node_not_found_hook(introspection_data) - - mock_create_node.assert_called_once_with('fake2', ironic=self.ironic, - driver_info={}) - mock_check_existing.assert_called_once_with( - {}, {}, self.ironic) - self.assertEqual({'auto_discovered': True}, introspection_data) - - def test__check_existing_nodes_new_mac(self): - self.ironic.port.list.return_value = [] - introspection_data = {'macs': self.macs} - node_driver_info = {} - - discovery._check_existing_nodes( - introspection_data, node_driver_info, self.ironic) - - def test__check_existing_nodes_existing_mac(self): - self.ironic.port.list.return_value = [mock.MagicMock( - address=self.macs[0], uuid='fake_port')] - introspection_data = { - 'all_interfaces': {'eth%d' % i: {'mac': m} - for i, m in enumerate(self.macs)} - } - node_driver_info = {} - - self.assertRaises(utils.Error, - discovery._check_existing_nodes, - introspection_data, node_driver_info, self.ironic) - - def test__check_existing_nodes_new_node(self): - self.ironic.node.list.return_value = [mock.MagicMock( - driver_info={'ipmi_address': '1.2.4.3'}, uuid='fake_node')] - introspection_data = {} - node_driver_info = {'ipmi_address': self.bmc_address} - - discovery._check_existing_nodes(introspection_data, node_driver_info, - self.ironic) - - def test__check_existing_nodes_existing_node(self): - self.ironic.node.list.return_value = [mock.MagicMock( - driver_info={'ipmi_address': self.bmc_address}, uuid='fake_node')] - introspection_data = {} - node_driver_info = {'ipmi_address': self.bmc_address} - - self.assertRaises(utils.Error, discovery._check_existing_nodes, - introspection_data, node_driver_info, self.ironic) diff --git a/ironic_inspector/test/unit/test_plugins_extra_hardware.py b/ironic_inspector/test/unit/test_plugins_extra_hardware.py deleted file mode 100644 index d5cbe44..0000000 --- a/ironic_inspector/test/unit/test_plugins_extra_hardware.py +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import mock - -from ironic_inspector import node_cache -from ironic_inspector.plugins import extra_hardware -from ironic_inspector.test import base as test_base - - -@mock.patch.object(extra_hardware.swift, 'SwiftAPI', autospec=True) -@mock.patch.object(node_cache.NodeInfo, 'patch') -class TestExtraHardware(test_base.NodeTest): - hook = extra_hardware.ExtraHardwareHook() - - def test_data_recieved(self, patch_mock, swift_mock): - introspection_data = { - 'data': [['memory', 'total', 'size', '4294967296'], - ['cpu', 'physical', 'number', '1'], - ['cpu', 'logical', 'number', '1']]} - data = json.dumps(introspection_data['data']) - self.hook.before_processing(introspection_data) - self.hook.before_update(introspection_data, self.node_info) - - swift_conn = swift_mock.return_value - name = 'extra_hardware-%s' % self.uuid - swift_conn.create_object.assert_called_once_with(name, data) - patch_mock.assert_called_once_with( - [{'op': 'add', 'path': '/extra/hardware_swift_object', - 'value': name}]) - - expected = { - 'memory': { - 'total': { - 'size': 4294967296 - } - }, - 'cpu': { - 'physical': { - 'number': 1 - }, - 'logical': { - 'number': 1 - }, - } - } - - self.assertEqual(expected, introspection_data['extra']) - - def test_data_not_in_edeploy_format(self, patch_mock, swift_mock): - introspection_data = { - 'data': [['memory', 'total', 'size', '4294967296'], - ['cpu', 'physical', 'number', '1'], - {'interface': 'eth1'}]} - data = json.dumps(introspection_data['data']) - self.hook.before_processing(introspection_data) - self.hook.before_update(introspection_data, self.node_info) - - swift_conn = swift_mock.return_value - name = 'extra_hardware-%s' % self.uuid - swift_conn.create_object.assert_called_once_with(name, data) - patch_mock.assert_called_once_with( - [{'op': 'add', 'path': '/extra/hardware_swift_object', - 'value': name}]) - - self.assertNotIn('data', introspection_data) - - def test_no_data_recieved(self, patch_mock, swift_mock): - introspection_data = {'cats': 'meow'} - swift_conn = swift_mock.return_value - self.hook.before_processing(introspection_data) - self.hook.before_update(introspection_data, self.node_info) - self.assertFalse(patch_mock.called) - self.assertFalse(swift_conn.create_object.called) - - def test__convert_edeploy_data(self, patch_mock, swift_mock): - introspection_data = [['Sheldon', 'J.', 'Plankton', '123'], - ['Larry', 'the', 'Lobster', None], - ['Eugene', 'H.', 'Krabs', 'The cashier']] - - data = self.hook._convert_edeploy_data(introspection_data) - expected_data = {'Sheldon': {'J.': {'Plankton': 123}}, - 'Larry': {'the': {'Lobster': None}}, - 'Eugene': {'H.': {'Krabs': 'The cashier'}}} - self.assertEqual(expected_data, data) diff --git a/ironic_inspector/test/unit/test_plugins_lldp_basic.py b/ironic_inspector/test/unit/test_plugins_lldp_basic.py deleted file mode 100644 index ce58932..0000000 --- a/ironic_inspector/test/unit/test_plugins_lldp_basic.py +++ /dev/null @@ -1,329 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from ironic_inspector.common import lldp_parsers as nv -from ironic_inspector.plugins import lldp_basic -from ironic_inspector.test import base as test_base - - -class TestLLDPBasicProcessingHook(test_base.NodeTest): - hook = lldp_basic.LLDPBasicProcessingHook() - - def setUp(self): - super(TestLLDPBasicProcessingHook, self).setUp() - self.data = { - 'inventory': { - 'interfaces': [{ - 'name': 'em1', - }], - 'cpu': 1, - 'disks': 1, - 'memory': 1 - }, - 'all_interfaces': - { - 'em1': {'mac': self.macs[0], 'ip': self.ips[0]} - } - } - - self.expected = {"em1": {"ip": self.ips[0], "mac": self.macs[0]}} - - def test_all_valid_data(self): - - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [1, "04112233aabbcc"], # ChassisId - [2, "07373334"], # PortId - [3, "003c"], # TTL - [4, "686f737430322e6c61622e656e6720706f7274203320" - "28426f6e6429"], # PortDesc - [5, "737730312d646973742d31622d623132"], # SysName - [6, "4e6574776f726b732c20496e632e20353530302c2076657273696f" - "6e203132204275696c6420646174653a20323031342d30332d31332030" - "383a33383a33302055544320"], # SysDesc - [7, "00140014"], # SysCapabilities - [8, "0501c000020f020000000000"], # MgmtAddress - [8, "110220010db885a3000000008a2e03707334020000000000"], - [8, "0706aa11bb22cc3302000003e900"], # MgmtAddress - [127, "00120f01036c110010"], # dot3 MacPhyConfigStatus - [127, "00120f030300000002"], # dot3 LinkAggregation - [127, "00120f0405ea"], # dot3 MTU - [127, "0080c2010066"], # dot1 PortVlan - [127, "0080c20206000a"], # dot1 PortProtocolVlanId - [127, "0080c202060014"], # dot1 PortProtocolVlanId - [127, "0080c204080026424203000000"], # dot1 ProtocolIdentity - [127, "0080c203006507766c616e313031"], # dot1 VlanName - [127, "0080c203006607766c616e313032"], # dot1 VlanName - [127, "0080c203006807766c616e313034"], # dot1 VlanName - [127, "0080c2060058"], # dot1 MgmtVID - [0, ""]] - }] - - expected = { - nv.LLDP_CAP_ENABLED_NM: ['Bridge', 'Router'], - nv.LLDP_CAP_SUPPORT_NM: ['Bridge', 'Router'], - nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", - nv.LLDP_MGMT_ADDRESSES_NM: ['192.0.2.15', - '2001:db8:85a3::8a2e:370:7334', - 'aa:11:bb:22:cc:33'], - nv.LLDP_PORT_LINK_AUTONEG_ENABLED_NM: True, - nv.LLDP_PORT_LINK_AUTONEG_ENABLED_NM: True, - nv.LLDP_PORT_DESC_NM: 'host02.lab.eng port 3 (Bond)', - nv.LLDP_PORT_ID_NM: '734', - nv.LLDP_PORT_LINK_AGG_ENABLED_NM: True, - nv.LLDP_PORT_LINK_AGG_ID_NM: 2, - nv.LLDP_PORT_LINK_AGG_SUPPORT_NM: True, - nv.LLDP_PORT_MGMT_VLANID_NM: 88, - nv.LLDP_PORT_MAU_TYPE_NM: '100BASE-TX full duplex', - nv.LLDP_MTU_NM: 1514, - nv.LLDP_PORT_CAPABILITIES_NM: ['1000BASE-T fdx', - '100BASE-TX fdx', - '100BASE-TX hdx', - '10BASE-T fdx', - '10BASE-T hdx', - 'Asym and Sym PAUSE fdx'], - nv.LLDP_PORT_PROT_VLAN_ENABLED_NM: True, - nv.LLDP_PORT_PROT_VLANIDS_NM: [10, 20], - nv.LLDP_PORT_PROT_VLAN_SUPPORT_NM: True, - nv.LLDP_PORT_VLANID_NM: 102, - nv.LLDP_PORT_VLANS_NM: [{'id': 101, 'name': 'vlan101'}, - {'id': 102, 'name': 'vlan102'}, - {'id': 104, "name": 'vlan104'}], - nv.LLDP_PROTOCOL_IDENTITIES_NM: ['0026424203000000'], - nv.LLDP_SYS_DESC_NM: 'Networks, Inc. 5500, version 12' - ' Build date: 2014-03-13 08:38:30 UTC ', - nv.LLDP_SYS_NAME_NM: 'sw01-dist-1b-b12' - } - - self.hook.before_update(self.data, self.node_info) - - actual_all_int = self.data['all_interfaces'] - actual = actual_all_int['em1']['lldp_processed'] - - for name, value in expected.items(): - if name is nv.LLDP_PORT_VLANS_NM: - for d1, d2 in zip(expected[name], actual[name]): - for key, value in d1.items(): - self.assertEqual(d2[key], value) - else: - self.assertEqual(actual[name], expected[name]) - - def test_multiple_interfaces(self): - self.data = { - 'inventory': { - 'interfaces': [ - {'name': 'em1', - 'lldp': [ - [1, "04112233aabbcc"], - [2, "07373334"], - [3, "003c"]]}, - {'name': 'em2', - 'lldp': [ - [1, "04112233aabbdd"], - [2, "07373838"], - [3, "003c"]]}, - {'name': 'em3', - 'lldp': [ - [1, "04112233aabbee"], - [2, "07373939"], - [3, "003c"]]}], - 'cpu': 1, - 'disks': 1, - 'memory': 1 - }, - 'all_interfaces': - { - 'em1': {'mac': self.macs[0], 'ip': self.ips[0]}, - 'em2': {'mac': self.macs[0], 'ip': self.ips[0]}, - 'em3': {'mac': self.macs[0], 'ip': self.ips[0]} - } - } - - expected = {"em1": {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", - nv.LLDP_PORT_ID_NM: "734"}}, - "em2": {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:dd", - nv.LLDP_PORT_ID_NM: "788"}}, - "em3": {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:ee", - nv.LLDP_PORT_ID_NM: "799"}}} - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(expected, self.data['all_interfaces']) - - def test_chassis_ids(self): - # Test IPv4 address - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [1, "0501c000020f"], - ]}] - - self.expected['em1']['lldp_processed'] = { - nv.LLDP_CHASSIS_ID_NM: "192.0.2.15" - } - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - - # Test name - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [1, "0773773031"], - ]}] - - self.expected['em1']['lldp_processed'] = { - nv.LLDP_CHASSIS_ID_NM: "sw01" - } - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - - def test_duplicate_tlvs(self): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [1, "04112233aabbcc"], # ChassisId - [1, "04332211ddeeff"], # ChassisId - [1, "04556677aabbcc"], # ChassisId - [2, "07373334"], # PortId - [2, "07373435"], # PortId - [2, "07373536"] # PortId - ]}] - - # Only the first unique TLV is processed - self.expected['em1']['lldp_processed'] = { - nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", - nv.LLDP_PORT_ID_NM: "734" - } - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - - def test_unhandled_tlvs(self): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [10, "04112233aabbcc"], - [12, "07373334"], - [128, "00120f080300010000"]]}] - - # nothing should be written to lldp_processed - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - - def test_unhandled_oui(self): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [127, "00906901425030323134323530393236"], - [127, "23ac0074657374"], - [127, "00120e010300010000"]]}] - - # nothing should be written to lldp_processed - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_null_strings(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [1, "04"], - [4, ""], # PortDesc - [5, ""], # SysName - [6, ""], # SysDesc - [127, "0080c203006507"] # dot1 VlanName - ]}] - - self.expected['em1']['lldp_processed'] = { - nv.LLDP_PORT_DESC_NM: '', - nv.LLDP_SYS_DESC_NM: '', - nv.LLDP_SYS_NAME_NM: '' - } - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(2, mock_log.warning.call_count) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_truncated_int(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [127, "00120f04"], # dot3 MTU - [127, "0080c201"], # dot1 PortVlan - [127, "0080c206"], # dot1 MgmtVID - ]}] - - # nothing should be written to lldp_processed - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(3, mock_log.warning.call_count) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_invalid_ip(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [8, "0501"], # truncated - [8, "0507c000020f020000000000"]] # invalid id - }] - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(2, mock_log.warning.call_count) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_truncated_mac(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [8, "0506"]] - }] - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(1, mock_log.warning.call_count) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_bad_value_macphy(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [127, "00120f01036c11FFFF"], # invalid mau type - [127, "00120f01036c11"], # truncated - [127, "00120f01036c"] # truncated - ]}] - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(3, mock_log.warning.call_count) - - @mock.patch('ironic_inspector.common.lldp_parsers.LOG') - def test_bad_value_linkagg(self, mock_log): - self.data['inventory']['interfaces'] = [{ - 'name': 'em1', - 'lldp': [ - [127, "00120f0303"], # dot3 LinkAggregation - [127, "00120f03"] # truncated - ]}] - - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.expected, self.data['all_interfaces']) - self.assertEqual(2, mock_log.warning.call_count) diff --git a/ironic_inspector/test/unit/test_plugins_local_link_connection.py b/ironic_inspector/test/unit/test_plugins_local_link_connection.py deleted file mode 100644 index 67150fb..0000000 --- a/ironic_inspector/test/unit/test_plugins_local_link_connection.py +++ /dev/null @@ -1,196 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from oslo_config import cfg - -from ironic_inspector import node_cache -from ironic_inspector.plugins import local_link_connection -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - - -class TestGenericLocalLinkConnectionHook(test_base.NodeTest): - hook = local_link_connection.GenericLocalLinkConnectionHook() - - def setUp(self): - super(TestGenericLocalLinkConnectionHook, self).setUp() - self.data = { - 'inventory': { - 'interfaces': [{ - 'name': 'em1', 'mac_address': '11:11:11:11:11:11', - 'ipv4_address': '1.1.1.1', - 'lldp': [ - (0, ''), - (1, '04885a92ec5459'), - (2, '0545746865726e6574312f3138'), - (3, '0078')] - }], - 'cpu': 1, - 'disks': 1, - 'memory': 1 - }, - 'all_interfaces': { - 'em1': {}, - } - } - - llc = { - 'port_id': '56' - } - - ports = [mock.Mock(spec=['address', 'uuid', 'local_link_connection'], - address=a, local_link_connection=llc) - for a in ('11:11:11:11:11:11',)] - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=self.node, ports=ports) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_expected_data(self, mock_patch): - patches = [ - {'path': '/local_link_connection/port_id', - 'value': 'Ethernet1/18', 'op': 'add'}, - {'path': '/local_link_connection/switch_id', - 'value': '88:5a:92:ec:54:59', 'op': 'add'}, - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_invalid_chassis_id_subtype(self, mock_patch): - # First byte of TLV value is processed to calculate the subtype for the - # chassis ID, Subtype 5 ('05...') isn't a subtype supported by this - # plugin, so we expect it to skip this TLV. - self.data['inventory']['interfaces'][0]['lldp'][1] = ( - 1, '05885a92ec5459') - patches = [ - {'path': '/local_link_connection/port_id', - 'value': 'Ethernet1/18', 'op': 'add'}, - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_invalid_port_id_subtype(self, mock_patch): - # First byte of TLV value is processed to calculate the subtype for the - # port ID, Subtype 6 ('06...') isn't a subtype supported by this - # plugin, so we expect it to skip this TLV. - self.data['inventory']['interfaces'][0]['lldp'][2] = ( - 2, '0645746865726e6574312f3138') - patches = [ - {'path': '/local_link_connection/switch_id', - 'value': '88:5a:92:ec:54:59', 'op': 'add'} - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_port_id_subtype_mac(self, mock_patch): - self.data['inventory']['interfaces'][0]['lldp'][2] = ( - 2, '03885a92ec5458') - patches = [ - {'path': '/local_link_connection/port_id', - 'value': '88:5a:92:ec:54:58', 'op': 'add'}, - {'path': '/local_link_connection/switch_id', - 'value': '88:5a:92:ec:54:59', 'op': 'add'} - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_lldp_none(self, mock_patch): - self.data['inventory']['interfaces'][0]['lldp'] = None - patches = [] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_interface_not_in_all_interfaces(self, mock_patch): - self.data['all_interfaces'] = {} - patches = [] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_interface_not_in_ironic(self, mock_patch): - self.node_info._ports = {} - patches = [] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - def test_no_inventory(self): - del self.data['inventory'] - self.assertRaises(utils.Error, self.hook.before_update, - self.data, self.node_info) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_no_overwrite(self, mock_patch): - cfg.CONF.set_override('overwrite_existing', False, group='processing') - patches = [ - {'path': '/local_link_connection/switch_id', - 'value': '88:5a:92:ec:54:59', 'op': 'add'} - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_processed_data_available(self, mock_patch): - self.data['all_interfaces'] = { - 'em1': {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - "switch_chassis_id": "11:22:33:aa:bb:dd", - "switch_port_id": "Ethernet2/66"} - } - } - - patches = [ - {'path': '/local_link_connection/port_id', - 'value': 'Ethernet2/66', 'op': 'add'}, - {'path': '/local_link_connection/switch_id', - 'value': '11:22:33:aa:bb:dd', 'op': 'add'}, - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_processed_data_chassis_only(self, mock_patch): - self.data['all_interfaces'] = { - 'em1': {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - "switch_chassis_id": "11:22:33:aa:bb:dd"} - } - } - - patches = [ - {'path': '/local_link_connection/switch_id', - 'value': '11:22:33:aa:bb:dd', 'op': 'add'} - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch_port') - def test_processed_data_port_only(self, mock_patch): - self.data['all_interfaces'] = { - 'em1': {"ip": self.ips[0], "mac": self.macs[0], - "lldp_processed": { - "switch_port_id": "Ethernet2/66"} - } - } - - patches = [ - {'path': '/local_link_connection/port_id', - 'value': 'Ethernet2/66', 'op': 'add'} - ] - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patches, mock_patch) diff --git a/ironic_inspector/test/unit/test_plugins_pci_devices.py b/ironic_inspector/test/unit/test_plugins_pci_devices.py deleted file mode 100644 index eb57369..0000000 --- a/ironic_inspector/test/unit/test_plugins_pci_devices.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from ironic_inspector import node_cache -from ironic_inspector.plugins import base -from ironic_inspector.plugins import pci_devices -from ironic_inspector.test import base as test_base - - -class TestPciDevicesHook(test_base.NodeTest): - hook = pci_devices.PciDevicesHook() - - def test_parse_pci_alias_entry(self): - pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' - ' "name": "baz1"}', - '{"vendor_id": "foo2", "product_id": "bar2",' - ' "name": "baz2"}'] - valid_pci_entry = {("foo1", "bar1"): "baz1", ("foo2", "bar2"): "baz2"} - base.CONF.set_override('alias', pci_alias, 'pci_devices') - parsed_pci_entry = pci_devices._parse_pci_alias_entry() - self.assertEqual(valid_pci_entry, parsed_pci_entry) - - def test_parse_pci_alias_entry_no_entries(self): - pci_alias = [] - base.CONF.set_override('alias', pci_alias, 'pci_devices') - parsed_pci_alias = pci_devices._parse_pci_alias_entry() - self.assertFalse(parsed_pci_alias) - - @mock.patch('ironic_inspector.plugins.pci_devices.LOG') - def test_parse_pci_alias_entry_invalid_json(self, mock_oslo_log): - pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' - ' "name": "baz1"}', '{"invalid" = "entry"}'] - base.CONF.set_override('alias', pci_alias, 'pci_devices') - valid_pci_alias = {("foo1", "bar1"): "baz1"} - parsed_pci_alias = pci_devices._parse_pci_alias_entry() - self.assertEqual(valid_pci_alias, parsed_pci_alias) - mock_oslo_log.error.assert_called_once() - - @mock.patch('ironic_inspector.plugins.pci_devices.LOG') - def test_parse_pci_alias_entry_invalid_keys(self, mock_oslo_log): - pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' - ' "name": "baz1"}', '{"invalid": "keys"}'] - base.CONF.set_override('alias', pci_alias, 'pci_devices') - valid_pci_alias = {("foo1", "bar1"): "baz1"} - parsed_pci_alias = pci_devices._parse_pci_alias_entry() - self.assertEqual(valid_pci_alias, parsed_pci_alias) - mock_oslo_log.error.assert_called_once() - - @mock.patch.object(hook, 'aliases', {("1234", "5678"): "pci_dev1", - ("9876", "5432"): "pci_dev2"}) - @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', - autospec=True) - def test_before_update(self, mock_update_props): - self.data['pci_devices'] = [ - {"vendor_id": "1234", "product_id": "5678"}, - {"vendor_id": "1234", "product_id": "5678"}, - {"vendor_id": "1234", "product_id": "7890"}, - {"vendor_id": "9876", "product_id": "5432"} - ] - expected_pci_devices_count = {"pci_dev1": 2, "pci_dev2": 1} - self.hook.before_update(self.data, self.node_info) - mock_update_props.assert_called_once_with(self.node_info, - **expected_pci_devices_count) - - @mock.patch('ironic_inspector.plugins.pci_devices.LOG') - @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', - autospec=True) - def test_before_update_no_pci_info_from_ipa(self, mock_update_props, - mock_oslo_log): - pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' - ' "name": "baz1"}'] - base.CONF.set_override('alias', pci_alias, 'pci_devices') - self.hook.before_update(self.data, self.node_info) - mock_oslo_log.warning.assert_called_once() - self.assertFalse(mock_update_props.called) - - @mock.patch.object(pci_devices, '_parse_pci_alias_entry') - @mock.patch('ironic_inspector.plugins.pci_devices.LOG') - @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', - autospec=True) - def test_before_update_no_match(self, mock_update_props, mock_oslo_log, - mock_parse_pci_alias): - self.data['pci_devices'] = [ - {"vendor_id": "1234", "product_id": "5678"}, - {"vendor_id": "1234", "product_id": "7890"}, - ] - mock_parse_pci_alias.return_value = {("9876", "5432"): "pci_dev"} - self.hook.before_update(self.data, self.node_info) - self.assertFalse(mock_update_props.called) - self.assertFalse(mock_oslo_log.info.called) diff --git a/ironic_inspector/test/unit/test_plugins_raid_device.py b/ironic_inspector/test/unit/test_plugins_raid_device.py deleted file mode 100644 index 61785cc..0000000 --- a/ironic_inspector/test/unit/test_plugins_raid_device.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from ironic_inspector import node_cache -from ironic_inspector.plugins import base -from ironic_inspector.plugins import raid_device -from ironic_inspector.test import base as test_base - - -class TestRaidDeviceDetection(test_base.NodeTest): - hook = raid_device.RaidDeviceDetection() - - def test_loadable_by_name(self): - base.CONF.set_override('processing_hooks', 'raid_device', 'processing') - ext = base.processing_hooks_manager()['raid_device'] - self.assertIsInstance(ext.obj, raid_device.RaidDeviceDetection) - - def test_missing_local_gb(self): - introspection_data = {} - self.hook.before_processing(introspection_data) - - self.assertEqual(1, introspection_data['local_gb']) - - def test_local_gb_not_changed(self): - introspection_data = {'local_gb': 42} - self.hook.before_processing(introspection_data) - - self.assertEqual(42, introspection_data['local_gb']) - - -class TestRaidDeviceDetectionUpdate(test_base.NodeTest): - hook = raid_device.RaidDeviceDetection() - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def _check(self, data, patch, mock_patch): - self.hook.before_processing(data) - self.hook.before_update(data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - - def test_no_previous_block_devices(self): - introspection_data = {'inventory': { - 'disks': [ - {'name': '/dev/sda', 'serial': 'foo'}, - {'name': '/dev/sdb', 'serial': 'bar'}, - ] - }} - expected = [{'op': 'add', 'path': '/extra/block_devices', - 'value': {'serials': ['foo', 'bar']}}] - self._check(introspection_data, expected) - - def test_no_previous_block_devices_old_ramdisk(self): - introspection_data = {'block_devices': {'serials': ['foo', 'bar']}} - expected = [{'op': 'add', 'path': '/extra/block_devices', - 'value': introspection_data['block_devices']}] - self._check(introspection_data, expected) - - def test_root_device_found(self): - self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} - introspection_data = {'inventory': { - 'disks': [ - {'name': '/dev/sda', 'serial': 'foo'}, - {'name': '/dev/sdb', 'serial': 'baz'}, - ] - }} - expected = [{'op': 'remove', 'path': '/extra/block_devices'}, - {'op': 'add', 'path': '/properties/root_device', - 'value': {'serial': 'baz'}}] - - self._check(introspection_data, expected) - - def test_root_device_found_old_ramdisk(self): - self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} - introspection_data = {'block_devices': {'serials': ['foo', 'baz']}} - expected = [{'op': 'remove', 'path': '/extra/block_devices'}, - {'op': 'add', 'path': '/properties/root_device', - 'value': {'serial': 'baz'}}] - - self._check(introspection_data, expected) - - def test_root_device_already_exposed(self): - self.node.properties['root_device'] = {'serial': 'foo'} - introspection_data = {'inventory': { - 'disks': [ - {'name': '/dev/sda', 'serial': 'foo'}, - {'name': '/dev/sdb', 'serial': 'baz'}, - ] - }} - - self._check(introspection_data, []) - - def test_multiple_new_devices(self): - self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} - introspection_data = {'inventory': { - 'disks': [ - {'name': '/dev/sda', 'serial': 'foo'}, - {'name': '/dev/sdb', 'serial': 'baz'}, - {'name': '/dev/sdc', 'serial': 'qux'}, - ] - }} - - self._check(introspection_data, []) - - def test_no_new_devices(self): - self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} - introspection_data = {'inventory': { - 'disks': [ - {'name': '/dev/sda', 'serial': 'foo'}, - {'name': '/dev/sdb', 'serial': 'bar'}, - ] - }} - - self._check(introspection_data, []) - - def test_no_block_devices_from_ramdisk(self): - introspection_data = {} - - self._check(introspection_data, []) diff --git a/ironic_inspector/test/unit/test_plugins_rules.py b/ironic_inspector/test/unit/test_plugins_rules.py deleted file mode 100644 index d5017e9..0000000 --- a/ironic_inspector/test/unit/test_plugins_rules.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for introspection rules plugins.""" - -import mock - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import node_cache -from ironic_inspector.plugins import rules as rules_plugins -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - - -TEST_SET = [(42, 42), ('42', 42), ('4.2', 4.2), - (42, 41), ('42', 41), ('4.2', 4.0), - (41, 42), ('41', 42), ('4.0', 4.2)] - - -class TestSimpleConditions(test_base.BaseTest): - def test_validate(self): - cond = rules_plugins.SimpleCondition() - cond.validate({'value': 42}) - self.assertRaises(ValueError, cond.validate, {}) - - def _test(self, cond, expected, value, ref): - self.assertIs(expected, cond.check(None, value, {'value': ref})) - - def test_eq(self): - cond = rules_plugins.EqCondition() - for values, expected in zip(TEST_SET, [True] * 3 + [False] * 6): - self._test(cond, expected, *values) - self._test(cond, True, 'foo', 'foo') - self._test(cond, False, 'foo', 'bar') - - def test_ne(self): - cond = rules_plugins.NeCondition() - for values, expected in zip(TEST_SET, [False] * 3 + [True] * 6): - self._test(cond, expected, *values) - self._test(cond, False, 'foo', 'foo') - self._test(cond, True, 'foo', 'bar') - - def test_gt(self): - cond = rules_plugins.GtCondition() - for values, expected in zip(TEST_SET, [False] * 3 + [True] * 3 - + [False] * 3): - self._test(cond, expected, *values) - - def test_ge(self): - cond = rules_plugins.GeCondition() - for values, expected in zip(TEST_SET, [True] * 6 + [False] * 3): - self._test(cond, expected, *values) - - def test_le(self): - cond = rules_plugins.LeCondition() - for values, expected in zip(TEST_SET, [True] * 3 + [False] * 3 - + [True] * 3): - self._test(cond, expected, *values) - - def test_lt(self): - cond = rules_plugins.LtCondition() - for values, expected in zip(TEST_SET, [False] * 6 + [True] * 3): - self._test(cond, expected, *values) - - -class TestReConditions(test_base.BaseTest): - def test_validate(self): - for cond in (rules_plugins.MatchesCondition(), - rules_plugins.ContainsCondition()): - cond.validate({'value': r'[a-z]?(foo|b.r).+'}) - self.assertRaises(ValueError, cond.validate, - {'value': '**'}) - - def test_matches(self): - cond = rules_plugins.MatchesCondition() - for reg, field, res in [(r'.*', 'foo', True), - (r'fo{1,2}', 'foo', True), - (r'o{1,2}', 'foo', False), - (r'[1-9]*', 42, True), - (r'^(foo|bar)$', 'foo', True), - (r'fo', 'foo', False)]: - self.assertEqual(res, cond.check(None, field, {'value': reg})) - - def test_contains(self): - cond = rules_plugins.ContainsCondition() - for reg, field, res in [(r'.*', 'foo', True), - (r'fo{1,2}', 'foo', True), - (r'o{1,2}', 'foo', True), - (r'[1-9]*', 42, True), - (r'bar', 'foo', False)]: - self.assertEqual(res, cond.check(None, field, {'value': reg})) - - -class TestNetCondition(test_base.BaseTest): - cond = rules_plugins.NetCondition() - - def test_validate(self): - self.cond.validate({'value': '192.0.2.1/24'}) - self.assertRaises(ValueError, self.cond.validate, {'value': 'foo'}) - - def test_check(self): - self.assertTrue(self.cond.check(None, '192.0.2.4', - {'value': '192.0.2.1/24'})) - self.assertFalse(self.cond.check(None, '192.1.2.4', - {'value': '192.0.2.1/24'})) - - -class TestEmptyCondition(test_base.BaseTest): - cond = rules_plugins.EmptyCondition() - - def test_check_none(self): - self.assertTrue(self.cond.check(None, None, {})) - self.assertFalse(self.cond.check(None, 0, {})) - - def test_check_empty_string(self): - self.assertTrue(self.cond.check(None, '', {})) - self.assertFalse(self.cond.check(None, '16', {})) - - def test_check_empty_list(self): - self.assertTrue(self.cond.check(None, [], {})) - self.assertFalse(self.cond.check(None, ['16'], {})) - - def test_check_empty_dict(self): - self.assertTrue(self.cond.check(None, {}, {})) - self.assertFalse(self.cond.check(None, {'test': '16'}, {})) - - -class TestFailAction(test_base.BaseTest): - act = rules_plugins.FailAction() - - def test_validate(self): - self.act.validate({'message': 'boom'}) - self.assertRaises(ValueError, self.act.validate, {}) - - def test_apply(self): - self.assertRaisesRegex(utils.Error, 'boom', - self.act.apply, None, {'message': 'boom'}) - - -class TestSetAttributeAction(test_base.NodeTest): - act = rules_plugins.SetAttributeAction() - params = {'path': '/extra/value', 'value': 42} - - def test_validate(self): - self.act.validate(self.params) - self.assertRaises(ValueError, self.act.validate, {'value': 42}) - self.assertRaises(ValueError, self.act.validate, - {'path': '/extra/value'}) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply(self, mock_patch): - self.act.apply(self.node_info, self.params) - mock_patch.assert_called_once_with([{'op': 'add', - 'path': '/extra/value', - 'value': 42}]) - - -class TestSetCapabilityAction(test_base.NodeTest): - act = rules_plugins.SetCapabilityAction() - params = {'name': 'cap1', 'value': 'val'} - - def test_validate(self): - self.act.validate(self.params) - self.assertRaises(ValueError, self.act.validate, {'value': 42}) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply(self, mock_patch): - self.act.apply(self.node_info, self.params) - mock_patch.assert_called_once_with( - [{'op': 'add', 'path': '/properties/capabilities', - 'value': 'cap1:val'}], mock.ANY) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply_with_existing(self, mock_patch): - self.node.properties['capabilities'] = 'x:y,cap1:old_val,answer:42' - self.act.apply(self.node_info, self.params) - - patch = mock_patch.call_args[0][0] - new_caps = ir_utils.capabilities_to_dict(patch[0]['value']) - self.assertEqual({'cap1': 'val', 'x': 'y', 'answer': '42'}, new_caps) - - -class TestExtendAttributeAction(test_base.NodeTest): - act = rules_plugins.ExtendAttributeAction() - params = {'path': '/extra/value', 'value': 42} - - def test_validate(self): - self.act.validate(self.params) - self.assertRaises(ValueError, self.act.validate, {'value': 42}) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply(self, mock_patch): - self.act.apply(self.node_info, self.params) - mock_patch.assert_called_once_with( - [{'op': 'add', 'path': '/extra/value', 'value': [42]}], mock.ANY) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply_non_empty(self, mock_patch): - self.node.extra['value'] = [0] - self.act.apply(self.node_info, self.params) - - mock_patch.assert_called_once_with( - [{'op': 'replace', 'path': '/extra/value', 'value': [0, 42]}], - mock.ANY) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_apply_unique_with_existing(self, mock_patch): - params = dict(unique=True, **self.params) - self.node.extra['value'] = [42] - self.act.apply(self.node_info, params) - self.assertFalse(mock_patch.called) diff --git a/ironic_inspector/test/unit/test_plugins_standard.py b/ironic_inspector/test/unit/test_plugins_standard.py deleted file mode 100644 index 3a579b1..0000000 --- a/ironic_inspector/test/unit/test_plugins_standard.py +++ /dev/null @@ -1,412 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import six - -from oslo_config import cfg -from oslo_utils import units - -from ironic_inspector import node_cache -from ironic_inspector.plugins import base -from ironic_inspector.plugins import standard as std_plugins -from ironic_inspector import process -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - -CONF = cfg.CONF - - -class TestSchedulerHook(test_base.NodeTest): - def setUp(self): - super(TestSchedulerHook, self).setUp() - self.hook = std_plugins.SchedulerHook() - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=self.node) - - def test_hook_loadable_by_name(self): - CONF.set_override('processing_hooks', 'scheduler', 'processing') - ext = base.processing_hooks_manager()['scheduler'] - self.assertIsInstance(ext.obj, std_plugins.SchedulerHook) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_no_root_disk(self, mock_patch): - del self.inventory['disks'] - del self.data['root_disk'] - - patch = [ - {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '0', 'op': 'add'} - ] - - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - self.assertEqual(0, self.data['local_gb']) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_ok(self, mock_patch): - patch = [ - {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} - ] - - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_no_overwrite(self, mock_patch): - CONF.set_override('overwrite_existing', False, 'processing') - self.node.properties = { - 'memory_mb': '4096', - 'cpu_arch': 'i686' - } - patch = [ - {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} - ] - - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_root_disk_no_spacing(self, mock_patch): - CONF.set_override('disk_partitioning_spacing', False, 'processing') - patch = [ - {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '1000', 'op': 'add'} - ] - - self.hook.before_update(self.data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - - -class TestValidateInterfacesHookLoad(test_base.NodeTest): - def test_hook_loadable_by_name(self): - CONF.set_override('processing_hooks', 'validate_interfaces', - 'processing') - ext = base.processing_hooks_manager()['validate_interfaces'] - self.assertIsInstance(ext.obj, std_plugins.ValidateInterfacesHook) - - -class TestValidateInterfacesHookBeforeProcessing(test_base.NodeTest): - def setUp(self): - super(TestValidateInterfacesHookBeforeProcessing, self).setUp() - self.hook = std_plugins.ValidateInterfacesHook() - - def test_no_interfaces(self): - self.assertRaisesRegex(utils.Error, - 'Hardware inventory is empty or missing', - self.hook.before_processing, {}) - self.assertRaisesRegex(utils.Error, - 'Hardware inventory is empty or missing', - self.hook.before_processing, {'inventory': {}}) - del self.inventory['interfaces'] - self.assertRaisesRegex(utils.Error, - 'interfaces key is missing or empty', - self.hook.before_processing, self.data) - - def test_only_pxe(self): - self.hook.before_processing(self.data) - - self.assertEqual(self.pxe_interfaces, self.data['interfaces']) - self.assertEqual([self.pxe_mac], self.data['macs']) - self.assertEqual(self.all_interfaces, self.data['all_interfaces']) - - def test_only_pxe_mac_format(self): - self.data['boot_interface'] = self.pxe_mac - self.hook.before_processing(self.data) - - self.assertEqual(self.pxe_interfaces, self.data['interfaces']) - self.assertEqual([self.pxe_mac], self.data['macs']) - self.assertEqual(self.all_interfaces, self.data['all_interfaces']) - - def test_only_pxe_not_found(self): - self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff' - self.assertRaisesRegex(utils.Error, 'No suitable interfaces', - self.hook.before_processing, self.data) - - def test_only_pxe_no_boot_interface(self): - del self.data['boot_interface'] - self.hook.before_processing(self.data) - self.active_interfaces[self.pxe_iface_name]['pxe'] = False - self.all_interfaces[self.pxe_iface_name]['pxe'] = False - - self.assertEqual(self.active_interfaces, self.data['interfaces']) - self.assertEqual(sorted(i['mac'] for i in - self.active_interfaces.values()), - sorted(self.data['macs'])) - self.assertEqual(self.all_interfaces, self.data['all_interfaces']) - - def test_only_active(self): - CONF.set_override('add_ports', 'active', 'processing') - self.hook.before_processing(self.data) - - self.assertEqual(self.active_interfaces, self.data['interfaces']) - self.assertEqual(sorted(i['mac'] for i in - self.active_interfaces.values()), - sorted(self.data['macs'])) - self.assertEqual(self.all_interfaces, self.data['all_interfaces']) - - def test_all(self): - CONF.set_override('add_ports', 'all', 'processing') - self.hook.before_processing(self.data) - - self.assertEqual(self.all_interfaces, self.data['interfaces']) - self.assertEqual(sorted(i['mac'] for i in - self.all_interfaces.values()), - sorted(self.data['macs'])) - self.assertEqual(self.all_interfaces, self.data['all_interfaces']) - - @mock.patch.object(node_cache.NodeInfo, 'create_ports') - def test_disabled_bad_conf(self, mock_create_port): - CONF.set_override('add_ports', 'disabled', 'processing') - CONF.set_override('keep_ports', 'added', 'processing') - - self.assertRaisesRegex(utils.Error, 'Configuration error:', - self.hook.__init__) - mock_create_port.assert_not_called() - - @mock.patch.object(node_cache.NodeInfo, 'create_ports') - def test_disabled(self, mock_create_port): - CONF.set_override('add_ports', 'disabled', 'processing') - CONF.set_override('keep_ports', 'all', 'processing') - - self.hook.before_processing(self.data) - self.assertEqual(self.active_interfaces, self.data['interfaces']) - mock_create_port.assert_not_called() - - def test_malformed_interfaces(self): - self.inventory['interfaces'] = [ - # no name - {'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'}, - # empty - {}, - ] - self.assertRaisesRegex(utils.Error, 'No interfaces supplied', - self.hook.before_processing, self.data) - - def test_skipped_interfaces(self): - CONF.set_override('add_ports', 'all', 'processing') - self.inventory['interfaces'] = [ - # local interface (by name) - {'name': 'lo', 'mac_address': '11:11:11:11:11:11', - 'ipv4_address': '1.1.1.1'}, - # local interface (by IP address) - {'name': 'em1', 'mac_address': '22:22:22:22:22:22', - 'ipv4_address': '127.0.0.1'}, - # no MAC provided - {'name': 'em3', 'ipv4_address': '2.2.2.2'}, - # malformed MAC provided - {'name': 'em4', 'mac_address': 'foobar', - 'ipv4_address': '2.2.2.2'}, - ] - self.assertRaisesRegex(utils.Error, 'No suitable interfaces found', - self.hook.before_processing, self.data) - - -@mock.patch.object(node_cache.NodeInfo, 'delete_port', autospec=True) -@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True) -class TestValidateInterfacesHookBeforeUpdateDeletion(test_base.NodeTest): - def setUp(self): - super(TestValidateInterfacesHookBeforeUpdateDeletion, self).setUp() - self.hook = std_plugins.ValidateInterfacesHook() - self.interfaces_to_create = sorted(self.valid_interfaces.values(), - key=lambda i: i['mac']) - self.existing_ports = [mock.Mock(spec=['address', 'uuid'], - address=a) - for a in (self.macs[1], - '44:44:44:44:44:44')] - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=self.node, - ports=self.existing_ports) - - def test_keep_all(self, mock_create_ports, mock_delete_port): - self.hook.before_update(self.data, self.node_info) - - # NOTE(dtantsur): dictionary ordering is not defined - mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) - self.assertEqual(self.interfaces_to_create, - sorted(mock_create_ports.call_args[0][1], - key=lambda i: i['mac'])) - - self.assertFalse(mock_delete_port.called) - - def test_keep_present(self, mock_create_ports, mock_delete_port): - CONF.set_override('keep_ports', 'present', 'processing') - self.data['all_interfaces'] = self.all_interfaces - self.hook.before_update(self.data, self.node_info) - - mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) - self.assertEqual(self.interfaces_to_create, - sorted(mock_create_ports.call_args[0][1], - key=lambda i: i['mac'])) - - mock_delete_port.assert_called_once_with(self.node_info, - self.existing_ports[1]) - - def test_keep_added(self, mock_create_ports, mock_delete_port): - CONF.set_override('keep_ports', 'added', 'processing') - self.data['macs'] = [self.pxe_mac] - self.hook.before_update(self.data, self.node_info) - - mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) - self.assertEqual(self.interfaces_to_create, - sorted(mock_create_ports.call_args[0][1], - key=lambda i: i['mac'])) - - mock_delete_port.assert_any_call(self.node_info, - self.existing_ports[0]) - mock_delete_port.assert_any_call(self.node_info, - self.existing_ports[1]) - - -@mock.patch.object(node_cache.NodeInfo, 'patch_port', autospec=True) -@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True) -class TestValidateInterfacesHookBeforeUpdatePXEEnabled(test_base.NodeTest): - def setUp(self): - super(TestValidateInterfacesHookBeforeUpdatePXEEnabled, self).setUp() - self.hook = std_plugins.ValidateInterfacesHook() - # Note(milan) assumes the ordering of self.macs from test_base.NodeTest - # where the first item '11:22:33:44:55:66' is the MAC of the - # self.pxe_iface_name 'eth1', the "real" PXE interface - sorted_interfaces = sorted(self.valid_interfaces.values(), - key=lambda i: i['mac']) - self.existing_ports = [ - mock.Mock(spec=['address', 'uuid', 'pxe_enabled'], - address=iface['mac'], pxe_enabled=True) - for iface in sorted_interfaces - ] - self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, - node=self.node, - ports=self.existing_ports) - - def test_fix_pxe_enabled(self, mock_create_ports, mock_patch_port): - self.hook.before_update(self.data, self.node_info) - # Note(milan) there are just 2 self.valid_interfaces, 'eth1' and 'ib0' - # eth1 is the PXE booting interface and eth1.mac < ib0.mac - mock_patch_port.assert_called_once_with( - self.node_info, self.existing_ports[1], - [{'op': 'replace', 'path': '/pxe_enabled', 'value': False}]) - - def test_no_overwrite(self, mock_create_ports, mock_patch_port): - CONF.set_override('overwrite_existing', False, 'processing') - self.hook.before_update(self.data, self.node_info) - self.assertFalse(mock_patch_port.called) - - -class TestRootDiskSelection(test_base.NodeTest): - def setUp(self): - super(TestRootDiskSelection, self).setUp() - self.hook = std_plugins.RootDiskSelectionHook() - self.inventory['disks'] = [ - {'model': 'Model 1', 'size': 20 * units.Gi, 'name': '/dev/sdb'}, - {'model': 'Model 2', 'size': 5 * units.Gi, 'name': '/dev/sda'}, - {'model': 'Model 3', 'size': 10 * units.Gi, 'name': '/dev/sdc'}, - {'model': 'Model 4', 'size': 4 * units.Gi, 'name': '/dev/sdd'}, - {'model': 'Too Small', 'size': 1 * units.Gi, 'name': '/dev/sde'}, - ] - self.matched = self.inventory['disks'][2].copy() - self.node_info = mock.Mock(spec=node_cache.NodeInfo, - _state='foo', - uuid=self.uuid, - **{'node.return_value': self.node}) - - def test_no_hints(self): - del self.data['root_disk'] - - self.hook.before_update(self.data, self.node_info) - - self.assertNotIn('local_gb', self.data) - self.assertNotIn('root_disk', self.data) - - def test_no_inventory(self): - self.node.properties['root_device'] = {'model': 'foo'} - del self.data['inventory'] - del self.data['root_disk'] - - self.assertRaisesRegex(utils.Error, - 'Hardware inventory is empty or missing', - self.hook.before_update, - self.data, self.node_info) - - self.assertNotIn('local_gb', self.data) - self.assertNotIn('root_disk', self.data) - - def test_no_disks(self): - self.node.properties['root_device'] = {'size': 10} - self.inventory['disks'] = [] - - six.assertRaisesRegex(self, utils.Error, - 'No disks satisfied root device hints', - self.hook.before_update, - self.data, self.node_info) - - def test_one_matches(self): - self.node.properties['root_device'] = {'size': 10} - - self.hook.before_update(self.data, self.node_info) - - self.assertEqual(self.matched, self.data['root_disk']) - - def test_all_match(self): - self.node.properties['root_device'] = {'size': 10, - 'model': 'Model 3'} - - self.hook.before_update(self.data, self.node_info) - - self.assertEqual(self.matched, self.data['root_disk']) - - def test_one_fails(self): - self.node.properties['root_device'] = {'size': 10, - 'model': 'Model 42'} - del self.data['root_disk'] - - self.assertRaisesRegex(utils.Error, - 'No disks satisfied root device hints', - self.hook.before_update, - self.data, self.node_info) - - self.assertNotIn('local_gb', self.data) - self.assertNotIn('root_disk', self.data) - - def test_size_string(self): - self.node.properties['root_device'] = {'size': '10'} - self.hook.before_update(self.data, self.node_info) - self.assertEqual(self.matched, self.data['root_disk']) - - def test_size_invalid(self): - for bad_size in ('foo', None, {}): - self.node.properties['root_device'] = {'size': bad_size} - self.assertRaisesRegex(utils.Error, - 'No disks could be found', - self.hook.before_update, - self.data, self.node_info) - - -class TestRamdiskError(test_base.InventoryTest): - def setUp(self): - super(TestRamdiskError, self).setUp() - self.msg = 'BOOM' - self.bmc_address = '1.2.3.4' - self.data['error'] = self.msg - - def test_no_logs(self): - self.assertRaisesRegex(utils.Error, - self.msg, - process.process, self.data) diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py deleted file mode 100644 index af14e1d..0000000 --- a/ironic_inspector/test/unit/test_process.py +++ /dev/null @@ -1,675 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import functools -import json -import os -import shutil -import tempfile - -import eventlet -import fixtures -from ironicclient import exceptions -import mock -from oslo_config import cfg -from oslo_serialization import base64 -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import db -from ironic_inspector import firewall -from ironic_inspector import introspection_state as istate -from ironic_inspector import node_cache -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector.plugins import example as example_plugin -from ironic_inspector import process -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - -CONF = cfg.CONF - - -class BaseTest(test_base.NodeTest): - def setUp(self): - super(BaseTest, self).setUp() - self.started_at = timeutils.utcnow() - self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(), - address=mac) for mac in self.macs] - self.ports = [self.all_ports[1]] - self.fake_result_json = 'node json' - - self.cli_fixture = self.useFixture( - fixtures.MockPatchObject(ir_utils, 'get_client', autospec=True)) - self.cli = self.cli_fixture.mock.return_value - - -class BaseProcessTest(BaseTest): - def setUp(self): - super(BaseProcessTest, self).setUp() - - self.cache_fixture = self.useFixture( - fixtures.MockPatchObject(node_cache, 'find_node', autospec=True)) - self.process_fixture = self.useFixture( - fixtures.MockPatchObject(process, '_process_node', autospec=True)) - - self.find_mock = self.cache_fixture.mock - self.node_info = node_cache.NodeInfo( - uuid=self.node.uuid, - state=istate.States.waiting, - started_at=self.started_at) - self.node_info.finished = mock.Mock() - self.find_mock.return_value = self.node_info - self.cli.node.get.return_value = self.node - self.process_mock = self.process_fixture.mock - self.process_mock.return_value = self.fake_result_json - - -class TestProcess(BaseProcessTest): - def test_ok(self): - res = process.process(self.data) - - self.assertEqual(self.fake_result_json, res) - - self.find_mock.assert_called_once_with(bmc_address=self.bmc_address, - mac=mock.ANY) - actual_macs = self.find_mock.call_args[1]['mac'] - self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) - self.cli.node.get.assert_called_once_with(self.uuid) - self.process_mock.assert_called_once_with( - self.node_info, self.node, self.data) - - def test_no_ipmi(self): - del self.inventory['bmc_address'] - process.process(self.data) - - self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) - actual_macs = self.find_mock.call_args[1]['mac'] - self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) - self.cli.node.get.assert_called_once_with(self.uuid) - self.process_mock.assert_called_once_with(self.node_info, self.node, - self.data) - - def test_not_found_in_cache(self): - self.find_mock.side_effect = utils.Error('not found') - self.assertRaisesRegex(utils.Error, - 'not found', - process.process, self.data) - self.assertFalse(self.cli.node.get.called) - self.assertFalse(self.process_mock.called) - - def test_not_found_in_ironic(self): - self.cli.node.get.side_effect = exceptions.NotFound() - - self.assertRaisesRegex(utils.Error, - 'Node %s was not found' % self.uuid, - process.process, self.data) - self.cli.node.get.assert_called_once_with(self.uuid) - self.assertFalse(self.process_mock.called) - self.node_info.finished.assert_called_once_with(error=mock.ANY) - - def test_already_finished(self): - self.node_info.finished_at = timeutils.utcnow() - self.assertRaisesRegex(utils.Error, 'already finished', - process.process, self.data) - self.assertFalse(self.process_mock.called) - self.assertFalse(self.find_mock.return_value.finished.called) - - def test_expected_exception(self): - self.process_mock.side_effect = utils.Error('boom') - - self.assertRaisesRegex(utils.Error, 'boom', - process.process, self.data) - - self.node_info.finished.assert_called_once_with(error='boom') - - def test_unexpected_exception(self): - self.process_mock.side_effect = RuntimeError('boom') - - with self.assertRaisesRegex(utils.Error, - 'Unexpected exception') as ctx: - process.process(self.data) - - self.assertEqual(500, ctx.exception.http_code) - self.node_info.finished.assert_called_once_with( - error='Unexpected exception RuntimeError during processing: boom') - - def test_hook_unexpected_exceptions(self): - for ext in plugins_base.processing_hooks_manager(): - patcher = mock.patch.object(ext.obj, 'before_processing', - side_effect=RuntimeError('boom')) - patcher.start() - self.addCleanup(lambda p=patcher: p.stop()) - - self.assertRaisesRegex(utils.Error, 'Unexpected exception', - process.process, self.data) - - self.node_info.finished.assert_called_once_with( - error=mock.ANY) - error_message = self.node_info.finished.call_args[1]['error'] - self.assertIn('RuntimeError', error_message) - self.assertIn('boom', error_message) - - def test_hook_unexpected_exceptions_no_node(self): - # Check that error from hooks is raised, not "not found" - self.find_mock.side_effect = utils.Error('not found') - for ext in plugins_base.processing_hooks_manager(): - patcher = mock.patch.object(ext.obj, 'before_processing', - side_effect=RuntimeError('boom')) - patcher.start() - self.addCleanup(lambda p=patcher: p.stop()) - - self.assertRaisesRegex(utils.Error, 'Unexpected exception', - process.process, self.data) - - self.assertFalse(self.node_info.finished.called) - - def test_error_if_node_not_found_hook(self): - plugins_base._NOT_FOUND_HOOK_MGR = None - self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') - self.assertRaisesRegex(utils.Error, - 'Look up error: BOOM', - process.process, self.data) - - -@mock.patch.object(example_plugin, 'example_not_found_hook', - autospec=True) -class TestNodeNotFoundHook(BaseProcessTest): - def test_node_not_found_hook_run_ok(self, hook_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') - hook_mock.return_value = node_cache.NodeInfo( - uuid=self.node.uuid, - started_at=self.started_at) - res = process.process(self.data) - self.assertEqual(self.fake_result_json, res) - hook_mock.assert_called_once_with(self.data) - - def test_node_not_found_hook_run_none(self, hook_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') - hook_mock.return_value = None - self.assertRaisesRegex(utils.Error, - 'Node not found hook returned nothing', - process.process, self.data) - hook_mock.assert_called_once_with(self.data) - - def test_node_not_found_hook_exception(self, hook_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') - hook_mock.side_effect = Exception('Hook Error') - self.assertRaisesRegex(utils.Error, - 'Node not found hook failed: Hook Error', - process.process, self.data) - hook_mock.assert_called_once_with(self.data) - - -class TestUnprocessedData(BaseProcessTest): - @mock.patch.object(process, '_store_unprocessed_data', autospec=True) - def test_save_unprocessed_data(self, store_mock): - CONF.set_override('store_data', 'swift', 'processing') - expected = copy.deepcopy(self.data) - - process.process(self.data) - - store_mock.assert_called_once_with(mock.ANY, expected) - - @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_save_unprocessed_data_failure(self, swift_mock): - CONF.set_override('store_data', 'swift', 'processing') - name = 'inspector_data-%s-%s' % ( - self.uuid, - process._UNPROCESSED_DATA_STORE_SUFFIX - ) - - swift_conn = swift_mock.return_value - swift_conn.create_object.side_effect = utils.Error('Oops') - - res = process.process(self.data) - - # assert store failure doesn't break processing - self.assertEqual(self.fake_result_json, res) - swift_conn.create_object.assert_called_once_with(name, mock.ANY) - - -@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing', - autospec=True) -class TestStoreLogs(BaseProcessTest): - def setUp(self): - super(TestStoreLogs, self).setUp() - CONF.set_override('processing_hooks', 'ramdisk_error,example', - 'processing') - - self.tempdir = tempfile.mkdtemp() - self.addCleanup(lambda: shutil.rmtree(self.tempdir)) - CONF.set_override('ramdisk_logs_dir', self.tempdir, 'processing') - - self.logs = b'test logs' - self.data['logs'] = base64.encode_as_bytes(self.logs) - - def _check_contents(self, name=None): - files = os.listdir(self.tempdir) - self.assertEqual(1, len(files)) - filename = files[0] - if name is None: - self.assertTrue(filename.startswith(self.uuid), - '%s does not start with uuid' % filename) - else: - self.assertEqual(name, filename) - with open(os.path.join(self.tempdir, filename), 'rb') as fp: - self.assertEqual(self.logs, fp.read()) - - def test_store_on_preprocess_failure(self, hook_mock): - hook_mock.side_effect = Exception('Hook Error') - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_store_on_process_failure(self, hook_mock): - self.process_mock.side_effect = utils.Error('boom') - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_store_on_unexpected_process_failure(self, hook_mock): - self.process_mock.side_effect = RuntimeError('boom') - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_store_on_ramdisk_error(self, hook_mock): - self.data['error'] = 'boom' - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_store_find_node_error(self, hook_mock): - self.cli.node.get.side_effect = exceptions.NotFound('boom') - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_no_error_no_logs(self, hook_mock): - process.process(self.data) - self.assertEqual([], os.listdir(self.tempdir)) - - def test_logs_disabled(self, hook_mock): - CONF.set_override('ramdisk_logs_dir', None, 'processing') - hook_mock.side_effect = Exception('Hook Error') - self.assertRaises(utils.Error, process.process, self.data) - self.assertEqual([], os.listdir(self.tempdir)) - - def test_always_store_logs(self, hook_mock): - CONF.set_override('always_store_ramdisk_logs', True, 'processing') - process.process(self.data) - self._check_contents() - - @mock.patch.object(process.LOG, 'exception', autospec=True) - def test_failure_to_write(self, log_mock, hook_mock): - CONF.set_override('always_store_ramdisk_logs', True, 'processing') - CONF.set_override('ramdisk_logs_dir', '/I/cannot/write/here', - 'processing') - process.process(self.data) - self.assertEqual([], os.listdir(self.tempdir)) - self.assertTrue(log_mock.called) - - def test_directory_is_created(self, hook_mock): - shutil.rmtree(self.tempdir) - self.data['error'] = 'boom' - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents() - - def test_store_custom_name(self, hook_mock): - CONF.set_override('ramdisk_logs_filename_format', - '{uuid}-{bmc}-{mac}', - 'processing') - self.process_mock.side_effect = utils.Error('boom') - self.assertRaises(utils.Error, process.process, self.data) - self._check_contents(name='%s-%s-%s' % (self.uuid, - self.bmc_address, - self.pxe_mac.replace(':', ''))) - - -class TestProcessNode(BaseTest): - def setUp(self): - super(TestProcessNode, self).setUp() - CONF.set_override('processing_hooks', - '$processing.default_processing_hooks,example', - 'processing') - self.validate_attempts = 5 - self.data['macs'] = self.macs # validate_interfaces hook - self.valid_interfaces['eth3'] = { - 'mac': self.macs[1], 'ip': self.ips[1], 'extra': {}, 'pxe': False - } - self.data['interfaces'] = self.valid_interfaces - self.ports = self.all_ports - - self.cli.node.get_boot_device.side_effect = ( - [RuntimeError()] * self.validate_attempts + [None]) - self.cli.port.create.side_effect = self.ports - self.cli.node.update.return_value = self.node - self.cli.node.list_ports.return_value = [] - - self.useFixture(fixtures.MockPatchObject( - firewall, 'update_filters', autospec=True)) - - self.useFixture(fixtures.MockPatchObject( - eventlet.greenthread, 'sleep', autospec=True)) - self.node_info._state = istate.States.waiting - db.Node(uuid=self.node_info.uuid, state=self.node_info._state, - started_at=self.node_info.started_at, - finished_at=self.node_info.finished_at, - error=self.node_info.error).save(self.session) - - def test_return_includes_uuid(self): - ret_val = process._process_node(self.node_info, self.node, self.data) - self.assertEqual(self.uuid, ret_val.get('uuid')) - - @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') - def test_wrong_provision_state(self, post_hook_mock): - self.node.provision_state = 'active' - - self.assertRaises(utils.Error, process._process_node, - self.node_info, self.node, self.data) - self.assertFalse(post_hook_mock.called) - - @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') - @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_ok(self, finished_mock, post_hook_mock): - process._process_node(self.node_info, self.node, self.data) - - self.cli.port.create.assert_any_call(node_uuid=self.uuid, - address=self.macs[0], - extra={}, - pxe_enabled=True) - self.cli.port.create.assert_any_call(node_uuid=self.uuid, - address=self.macs[1], - extra={}, - pxe_enabled=False) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - self.assertFalse(self.cli.node.validate.called) - - post_hook_mock.assert_called_once_with(self.data, self.node_info) - finished_mock.assert_called_once_with(mock.ANY) - - def test_port_failed(self): - self.cli.port.create.side_effect = ( - [exceptions.Conflict()] + self.ports[1:]) - - process._process_node(self.node_info, self.node, self.data) - - self.cli.port.create.assert_any_call(node_uuid=self.uuid, - address=self.macs[0], - extra={}, pxe_enabled=True) - self.cli.port.create.assert_any_call(node_uuid=self.uuid, - address=self.macs[1], - extra={}, pxe_enabled=False) - - @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_power_off_failed(self, finished_mock): - self.cli.node.set_power_state.side_effect = RuntimeError('boom') - - process._process_node(self.node_info, self.node, self.data) - - self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - finished_mock.assert_called_once_with( - mock.ANY, - error='Failed to power off node %s, check its power ' - 'management configuration: boom' % self.uuid - ) - - @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') - @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_power_off_enroll_state(self, finished_mock, post_hook_mock): - self.node.provision_state = 'enroll' - self.node_info.node = mock.Mock(return_value=self.node) - - process._process_node(self.node_info, self.node, self.data) - - self.assertTrue(post_hook_mock.called) - self.assertTrue(self.cli.node.set_power_state.called) - finished_mock.assert_called_once_with(self.node_info) - - @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_no_power_off(self, finished_mock): - CONF.set_override('power_off', False, 'processing') - process._process_node(self.node_info, self.node, self.data) - - self.assertFalse(self.cli.node.set_power_state.called) - finished_mock.assert_called_once_with(self.node_info) - - @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data(self, swift_mock): - CONF.set_override('store_data', 'swift', 'processing') - swift_conn = swift_mock.return_value - name = 'inspector_data-%s' % self.uuid - expected = self.data - - process._process_node(self.node_info, self.node, self.data) - - swift_conn.create_object.assert_called_once_with(name, mock.ANY) - self.assertEqual(expected, - json.loads(swift_conn.create_object.call_args[0][1])) - - @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data_no_logs(self, swift_mock): - CONF.set_override('store_data', 'swift', 'processing') - swift_conn = swift_mock.return_value - name = 'inspector_data-%s' % self.uuid - self.data['logs'] = 'something' - - process._process_node(self.node_info, self.node, self.data) - - swift_conn.create_object.assert_called_once_with(name, mock.ANY) - self.assertNotIn('logs', - json.loads(swift_conn.create_object.call_args[0][1])) - - @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data_location(self, swift_mock): - CONF.set_override('store_data', 'swift', 'processing') - CONF.set_override('store_data_location', 'inspector_data_object', - 'processing') - swift_conn = swift_mock.return_value - name = 'inspector_data-%s' % self.uuid - patch = [{'path': '/extra/inspector_data_object', - 'value': name, 'op': 'add'}] - expected = self.data - - process._process_node(self.node_info, self.node, self.data) - - swift_conn.create_object.assert_called_once_with(name, mock.ANY) - self.assertEqual(expected, - json.loads(swift_conn.create_object.call_args[0][1])) - self.cli.node.update.assert_any_call(self.uuid, patch) - - -@mock.patch.object(process, '_reapply', autospec=True) -@mock.patch.object(node_cache, 'get_node', autospec=True) -class TestReapply(BaseTest): - def prepare_mocks(func): - @functools.wraps(func) - def wrapper(self, pop_mock, *args, **kw): - pop_mock.return_value = node_cache.NodeInfo( - uuid=self.node.uuid, - started_at=self.started_at) - pop_mock.return_value.finished = mock.Mock() - pop_mock.return_value.acquire_lock = mock.Mock() - return func(self, pop_mock, *args, **kw) - - return wrapper - - def setUp(self): - super(TestReapply, self).setUp() - CONF.set_override('store_data', 'swift', 'processing') - - @prepare_mocks - def test_ok(self, pop_mock, reapply_mock): - process.reapply(self.uuid) - pop_mock.assert_called_once_with(self.uuid, locked=False) - pop_mock.return_value.acquire_lock.assert_called_once_with( - blocking=False - ) - - reapply_mock.assert_called_once_with(pop_mock.return_value) - - @prepare_mocks - def test_locking_failed(self, pop_mock, reapply_mock): - pop_mock.return_value.acquire_lock.return_value = False - self.assertRaisesRegex(utils.Error, - 'Node locked, please, try again later', - process.reapply, self.uuid) - - pop_mock.assert_called_once_with(self.uuid, locked=False) - pop_mock.return_value.acquire_lock.assert_called_once_with( - blocking=False - ) - - -@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') -@mock.patch.object(process.rules, 'apply', autospec=True) -@mock.patch.object(process.swift, 'SwiftAPI', autospec=True) -@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) -@mock.patch.object(node_cache.NodeInfo, 'release_lock', autospec=True) -class TestReapplyNode(BaseTest): - def setUp(self): - super(TestReapplyNode, self).setUp() - CONF.set_override('processing_hooks', - '$processing.default_processing_hooks,example', - 'processing') - CONF.set_override('store_data', 'swift', 'processing') - self.data['macs'] = self.macs - self.ports = self.all_ports - self.node_info = node_cache.NodeInfo(uuid=self.uuid, - started_at=self.started_at, - node=self.node) - self.node_info.invalidate_cache = mock.Mock() - - self.cli.port.create.side_effect = self.ports - self.cli.node.update.return_value = self.node - self.cli.node.list_ports.return_value = [] - self.node_info._state = istate.States.finished - self.commit_fixture = self.useFixture( - fixtures.MockPatchObject(node_cache.NodeInfo, 'commit', - autospec=True)) - db.Node(uuid=self.node_info.uuid, state=self.node_info._state, - started_at=self.node_info.started_at, - finished_at=self.node_info.finished_at, - error=self.node_info.error).save(self.session) - - def call(self): - process._reapply(self.node_info) - # make sure node_info lock is released after a call - self.node_info.release_lock.assert_called_once_with(self.node_info) - - def prepare_mocks(fn): - @functools.wraps(fn) - def wrapper(self, release_mock, finished_mock, swift_mock, - *args, **kw): - finished_mock.side_effect = lambda *a, **kw: \ - release_mock(self.node_info) - swift_client_mock = swift_mock.return_value - fn(self, finished_mock, swift_client_mock, *args, **kw) - return wrapper - - @prepare_mocks - def test_ok(self, finished_mock, swift_mock, apply_mock, - post_hook_mock): - swift_name = 'inspector_data-%s' % self.uuid - swift_mock.get_object.return_value = json.dumps(self.data) - - self.call() - - self.commit_fixture.mock.assert_called_once_with(self.node_info) - - post_hook_mock.assert_called_once_with(mock.ANY, self.node_info) - swift_mock.create_object.assert_called_once_with(swift_name, - mock.ANY) - swifted_data = json.loads(swift_mock.create_object.call_args[0][1]) - - self.node_info.invalidate_cache.assert_called_once_with() - apply_mock.assert_called_once_with(self.node_info, swifted_data) - - # assert no power operations were performed - self.assertFalse(self.cli.node.set_power_state.called) - finished_mock.assert_called_once_with(self.node_info) - - # asserting validate_interfaces was called - self.assertEqual(self.pxe_interfaces, swifted_data['interfaces']) - self.assertEqual([self.pxe_mac], swifted_data['macs']) - - # assert ports were created with whatever there was left - # behind validate_interfaces - self.cli.port.create.assert_called_once_with( - node_uuid=self.uuid, - address=swifted_data['macs'][0], - extra={}, - pxe_enabled=True - ) - - @prepare_mocks - def test_get_incomming_data_exception(self, finished_mock, - swift_mock, apply_mock, - post_hook_mock): - exc = Exception('Oops') - expected_error = ('Unexpected exception Exception while fetching ' - 'unprocessed introspection data from Swift: Oops') - swift_mock.get_object.side_effect = exc - self.call() - - self.commit_fixture.mock.assert_called_once_with(self.node_info) - self.assertFalse(swift_mock.create_object.called) - self.assertFalse(apply_mock.called) - self.assertFalse(post_hook_mock.called) - finished_mock.assert_called_once_with(self.node_info, - expected_error) - - @prepare_mocks - def test_prehook_failure(self, finished_mock, swift_mock, - apply_mock, post_hook_mock): - CONF.set_override('processing_hooks', 'example', - 'processing') - plugins_base._HOOKS_MGR = None - - exc = Exception('Failed.') - swift_mock.get_object.return_value = json.dumps(self.data) - - with mock.patch.object(example_plugin.ExampleProcessingHook, - 'before_processing') as before_processing_mock: - before_processing_mock.side_effect = exc - self.call() - - exc_failure = ('Pre-processing failures detected reapplying ' - 'introspection on stored data:\n' - 'Unexpected exception %(exc_class)s during ' - 'preprocessing in hook example: %(error)s' % - {'exc_class': type(exc).__name__, 'error': - exc}) - finished_mock.assert_called_once_with(self.node_info, - error=exc_failure) - # assert _reapply ended having detected the failure - self.assertFalse(swift_mock.create_object.called) - self.assertFalse(apply_mock.called) - self.assertFalse(post_hook_mock.called) - - @prepare_mocks - def test_generic_exception_creating_ports(self, finished_mock, - swift_mock, apply_mock, - post_hook_mock): - swift_mock.get_object.return_value = json.dumps(self.data) - exc = Exception('Oops') - self.cli.port.create.side_effect = exc - self.call() - - finished_mock.assert_called_once_with(self.node_info, error=str(exc)) - self.assertFalse(swift_mock.create_object.called) - self.assertFalse(apply_mock.called) - self.assertFalse(post_hook_mock.called) diff --git a/ironic_inspector/test/unit/test_pxe_filter.py b/ironic_inspector/test/unit/test_pxe_filter.py deleted file mode 100644 index b954325..0000000 --- a/ironic_inspector/test/unit/test_pxe_filter.py +++ /dev/null @@ -1,272 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fixtures -import mock -import six -import stevedore - -from automaton import exceptions as automaton_errors -from eventlet import semaphore -from futurist import periodics -from oslo_config import cfg - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector.pxe_filter import base as pxe_filter -from ironic_inspector.pxe_filter import interface -from ironic_inspector.test import base as test_base - -CONF = cfg.CONF - - -class TestDriverManager(test_base.BaseTest): - def setUp(self): - super(TestDriverManager, self).setUp() - pxe_filter._DRIVER_MANAGER = None - stevedore_driver_fixture = self.useFixture(fixtures.MockPatchObject( - stevedore.driver, 'DriverManager', autospec=True)) - self.stevedore_driver_mock = stevedore_driver_fixture.mock - - def test_default(self): - driver_manager = pxe_filter._driver_manager() - self.stevedore_driver_mock.assert_called_once_with( - pxe_filter._STEVEDORE_DRIVER_NAMESPACE, - name='noop', - invoke_on_load=True - ) - self.assertIsNotNone(driver_manager) - self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) - - def test_pxe_filter_name(self): - CONF.set_override('driver', 'foo', 'pxe_filter') - driver_manager = pxe_filter._driver_manager() - self.stevedore_driver_mock.assert_called_once_with( - pxe_filter._STEVEDORE_DRIVER_NAMESPACE, - 'foo', - invoke_on_load=True - ) - self.assertIsNotNone(driver_manager) - self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) - - def test_default_existing_driver_manager(self): - pxe_filter._DRIVER_MANAGER = True - driver_manager = pxe_filter._driver_manager() - self.stevedore_driver_mock.assert_not_called() - self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) - - -class TestDriverManagerLoading(test_base.BaseTest): - def setUp(self): - super(TestDriverManagerLoading, self).setUp() - pxe_filter._DRIVER_MANAGER = None - - @mock.patch.object(pxe_filter, 'NoopFilter', autospec=True) - def test_pxe_filter_driver_loads(self, noop_driver_cls): - CONF.set_override('driver', 'noop', 'pxe_filter') - driver_manager = pxe_filter._driver_manager() - noop_driver_cls.assert_called_once_with() - self.assertIs(noop_driver_cls.return_value, driver_manager.driver) - - def test_invalid_filter_driver(self): - CONF.set_override('driver', 'foo', 'pxe_filter') - six.assertRaisesRegex(self, stevedore.exception.NoMatches, 'foo', - pxe_filter._driver_manager) - self.assertIsNone(pxe_filter._DRIVER_MANAGER) - - -class BaseFilterBaseTest(test_base.BaseTest): - def setUp(self): - super(BaseFilterBaseTest, self).setUp() - self.mock_lock = mock.MagicMock(spec=semaphore.BoundedSemaphore) - self.mock_bounded_semaphore = self.useFixture( - fixtures.MockPatchObject(semaphore, 'BoundedSemaphore')).mock - self.mock_bounded_semaphore.return_value = self.mock_lock - self.driver = pxe_filter.NoopFilter() - - def assert_driver_is_locked(self): - """Assert the driver is currently locked and wasn't locked before.""" - self.driver.lock.__enter__.assert_called_once_with() - self.driver.lock.__exit__.assert_not_called() - - def assert_driver_was_locked_once(self): - """Assert the driver was locked exactly once before.""" - self.driver.lock.__enter__.assert_called_once_with() - self.driver.lock.__exit__.assert_called_once_with(None, None, None) - - def assert_driver_was_not_locked(self): - """Assert the driver was not locked""" - self.mock_lock.__enter__.assert_not_called() - self.mock_lock.__exit__.assert_not_called() - - -class TestLockedDriverEvent(BaseFilterBaseTest): - def setUp(self): - super(TestLockedDriverEvent, self).setUp() - self.mock_fsm_reset_on_error = self.useFixture( - fixtures.MockPatchObject(self.driver, 'fsm_reset_on_error')).mock - self.expected_args = (None,) - self.expected_kwargs = {'foo': None} - self.mock_fsm = self.useFixture( - fixtures.MockPatchObject(self.driver, 'fsm')).mock - (self.driver.fsm_reset_on_error.return_value. - __enter__.return_value) = self.mock_fsm - - def test_locked_driver_event(self): - event = 'foo' - - @pxe_filter.locked_driver_event(event) - def fun(driver, *args, **kwargs): - self.assertIs(self.driver, driver) - self.assertEqual(self.expected_args, args) - self.assertEqual(self.expected_kwargs, kwargs) - self.assert_driver_is_locked() - - self.assert_driver_was_not_locked() - fun(self.driver, *self.expected_args, **self.expected_kwargs) - - self.mock_fsm_reset_on_error.assert_called_once_with() - self.mock_fsm.process_event.assert_called_once_with(event) - self.assert_driver_was_locked_once() - - -class TestBaseFilterFsmPrecautions(BaseFilterBaseTest): - def setUp(self): - super(TestBaseFilterFsmPrecautions, self).setUp() - self.mock_fsm = self.useFixture( - fixtures.MockPatchObject(pxe_filter.NoopFilter, 'fsm')).mock - # NOTE(milan): overriding driver so that the patch ^ is applied - self.mock_bounded_semaphore.reset_mock() - self.driver = pxe_filter.NoopFilter() - self.mock_reset = self.useFixture( - fixtures.MockPatchObject(self.driver, 'reset')).mock - - def test___init__(self): - self.assertIs(self.mock_lock, self.driver.lock) - self.mock_bounded_semaphore.assert_called_once_with() - self.assertIs(self.mock_fsm, self.driver.fsm) - self.mock_fsm.initialize.assert_called_once_with( - start_state=pxe_filter.States.uninitialized) - - def test_fsm_reset_on_error(self): - with self.driver.fsm_reset_on_error() as fsm: - self.assertIs(self.mock_fsm, fsm) - - self.mock_reset.assert_not_called() - - def test_fsm_automaton_error(self): - - def fun(): - with self.driver.fsm_reset_on_error(): - raise automaton_errors.NotFound('Oops!') - - self.assertRaisesRegex(pxe_filter.InvalidFilterDriverState, - '.*NoopFilter.*Oops!', fun) - self.mock_reset.assert_not_called() - - def test_fsm_reset_on_error_ctx_custom_error(self): - - class MyError(Exception): - pass - - def fun(): - with self.driver.fsm_reset_on_error(): - raise MyError('Oops!') - - self.assertRaisesRegex(MyError, 'Oops!', fun) - self.mock_reset.assert_called_once_with() - - -class TestBaseFilterInterface(BaseFilterBaseTest): - def setUp(self): - super(TestBaseFilterInterface, self).setUp() - self.mock_get_client = self.useFixture( - fixtures.MockPatchObject(ir_utils, 'get_client')).mock - self.mock_ironic = mock.Mock() - self.mock_get_client.return_value = self.mock_ironic - self.mock_periodic = self.useFixture( - fixtures.MockPatchObject(periodics, 'periodic')).mock - self.mock_reset = self.useFixture( - fixtures.MockPatchObject(self.driver, 'reset')).mock - self.mock_log = self.useFixture( - fixtures.MockPatchObject(pxe_filter, 'LOG')).mock - self.driver.fsm_reset_on_error = self.useFixture( - fixtures.MockPatchObject(self.driver, 'fsm_reset_on_error')).mock - - def test_init_filter(self): - self.driver.init_filter() - - self.mock_log.debug.assert_called_once_with( - 'Initializing the PXE filter driver %s', self.driver) - self.mock_reset.assert_not_called() - - def test_sync(self): - self.driver.sync(self.mock_ironic) - - self.mock_log.debug.assert_called_once_with( - 'Syncing the PXE filter driver %s', self.driver) - self.mock_reset.assert_not_called() - - def test_tear_down_filter(self): - self.assert_driver_was_not_locked() - self.driver.tear_down_filter() - - self.assert_driver_was_locked_once() - self.mock_reset.assert_called_once_with() - - def test_get_periodic_sync_task(self): - sync_mock = self.useFixture( - fixtures.MockPatchObject(self.driver, 'sync')).mock - self.driver.get_periodic_sync_task() - self.mock_periodic.assert_called_once_with(spacing=15, enabled=True) - self.mock_periodic.return_value.call_args[0][0]() - sync_mock.assert_called_once_with(self.mock_get_client.return_value) - - def test_get_periodic_sync_task_disabled(self): - CONF.set_override('sync_period', 0, 'pxe_filter') - self.driver.get_periodic_sync_task() - self.mock_periodic.assert_called_once_with(spacing=float('inf'), - enabled=False) - - def test_get_periodic_sync_task_custom_spacing(self): - CONF.set_override('sync_period', 4224, 'pxe_filter') - self.driver.get_periodic_sync_task() - self.mock_periodic.assert_called_once_with(spacing=4224, enabled=True) - - -class TestDriverReset(BaseFilterBaseTest): - def setUp(self): - super(TestDriverReset, self).setUp() - self.mock_fsm = self.useFixture( - fixtures.MockPatchObject(self.driver, 'fsm')).mock - - def test_reset(self): - self.driver.reset() - - self.assert_driver_was_not_locked() - self.mock_fsm.process_event.assert_called_once_with( - pxe_filter.Events.reset) - - -class TestDriver(test_base.BaseTest): - def setUp(self): - super(TestDriver, self).setUp() - self.mock_driver = mock.Mock(spec=interface.FilterDriver) - self.mock__driver_manager = self.useFixture( - fixtures.MockPatchObject(pxe_filter, '_driver_manager')).mock - self.mock__driver_manager.return_value.driver = self.mock_driver - - def test_driver(self): - ret = pxe_filter.driver() - - self.assertIs(self.mock_driver, ret) - self.mock__driver_manager.assert_called_once_with() diff --git a/ironic_inspector/test/unit/test_rules.py b/ironic_inspector/test/unit/test_rules.py deleted file mode 100644 index 58efc1f..0000000 --- a/ironic_inspector/test/unit/test_rules.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for introspection rules.""" -import mock -from oslo_utils import uuidutils - -from ironic_inspector import db -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector import rules -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - - -class BaseTest(test_base.NodeTest): - def setUp(self): - super(BaseTest, self).setUp() - self.uuid = uuidutils.generate_uuid() - self.conditions_json = [ - {'op': 'eq', 'field': 'memory_mb', 'value': 1024}, - {'op': 'eq', 'field': 'local_gb', 'value': 60}, - ] - self.actions_json = [ - {'action': 'fail', 'message': 'boom!'} - ] - - self.data = { - 'memory_mb': 1024, - 'local_gb': 42, - } - - @staticmethod - def condition_defaults(condition): - condition = condition.copy() - condition.setdefault('multiple', 'any') - condition.setdefault('invert', False) - return condition - - -class TestCreateRule(BaseTest): - def test_only_actions(self): - rule = rules.create([], self.actions_json) - rule_json = rule.as_dict() - - self.assertTrue(rule_json.pop('uuid')) - self.assertEqual({'description': None, - 'conditions': [], - 'actions': self.actions_json}, - rule_json) - - def test_duplicate_uuid(self): - rules.create([], self.actions_json, uuid=self.uuid) - self.assertRaisesRegex(utils.Error, 'already exists', - rules.create, [], self.actions_json, - uuid=self.uuid) - - def test_with_conditions(self): - self.conditions_json.extend([ - # multiple present&default, invert absent - {'op': 'eq', 'field': 'local_gb', 'value': 60, 'multiple': 'any'}, - # multiple absent, invert present&default - {'op': 'eq', 'field': 'local_gb', 'value': 60, 'invert': False}, - # multiple&invert present&non-default - {'op': 'eq', 'field': 'memory_mb', 'value': 1024, - 'multiple': 'all', 'invert': True}, - ]) - rule = rules.create(self.conditions_json, self.actions_json) - rule_json = rule.as_dict() - - self.assertTrue(rule_json.pop('uuid')) - self.assertEqual({'description': None, - 'conditions': [BaseTest.condition_defaults(cond) - for cond in self.conditions_json], - 'actions': self.actions_json}, - rule_json) - - def test_invalid_condition(self): - del self.conditions_json[0]['op'] - - self.assertRaisesRegex(utils.Error, - 'Validation failed for conditions', - rules.create, - self.conditions_json, self.actions_json) - - self.conditions_json[0]['op'] = 'foobar' - - self.assertRaisesRegex(utils.Error, - 'Validation failed for conditions', - rules.create, - self.conditions_json, self.actions_json) - - def test_invalid_condition_field(self): - self.conditions_json[0]['field'] = '!*!' - - self.assertRaisesRegex(utils.Error, - 'Unable to parse field JSON path', - rules.create, - self.conditions_json, self.actions_json) - - def test_invalid_condition_parameters(self): - self.conditions_json[0]['foo'] = 'bar' - - self.assertRaisesRegex(utils.Error, - 'Invalid parameters for operator', - rules.create, - self.conditions_json, self.actions_json) - - def test_no_actions(self): - self.assertRaisesRegex(utils.Error, - 'Validation failed for actions', - rules.create, - self.conditions_json, []) - - def test_invalid_action(self): - del self.actions_json[0]['action'] - - self.assertRaisesRegex(utils.Error, - 'Validation failed for actions', - rules.create, - self.conditions_json, self.actions_json) - - self.actions_json[0]['action'] = 'foobar' - - self.assertRaisesRegex(utils.Error, - 'Validation failed for actions', - rules.create, - self.conditions_json, self.actions_json) - - def test_invalid_action_parameters(self): - self.actions_json[0]['foo'] = 'bar' - - self.assertRaisesRegex(utils.Error, - 'Invalid parameters for action', - rules.create, - self.conditions_json, self.actions_json) - - -class TestGetRule(BaseTest): - def setUp(self): - super(TestGetRule, self).setUp() - rules.create(self.conditions_json, self.actions_json, uuid=self.uuid) - - def test_get(self): - rule_json = rules.get(self.uuid).as_dict() - - self.assertTrue(rule_json.pop('uuid')) - self.assertEqual({'description': None, - 'conditions': [BaseTest.condition_defaults(cond) - for cond in self.conditions_json], - 'actions': self.actions_json}, - rule_json) - - def test_not_found(self): - self.assertRaises(utils.Error, rules.get, 'foobar') - - def test_get_all(self): - uuid2 = uuidutils.generate_uuid() - rules.create(self.conditions_json, self.actions_json, uuid=uuid2) - self.assertEqual({self.uuid, uuid2}, - {r.as_dict()['uuid'] for r in rules.get_all()}) - - -class TestDeleteRule(BaseTest): - def setUp(self): - super(TestDeleteRule, self).setUp() - self.uuid2 = uuidutils.generate_uuid() - rules.create(self.conditions_json, self.actions_json, uuid=self.uuid) - rules.create(self.conditions_json, self.actions_json, uuid=self.uuid2) - - def test_delete(self): - rules.delete(self.uuid) - - self.assertEqual([(self.uuid2,)], db.model_query(db.Rule.uuid).all()) - self.assertFalse(db.model_query(db.RuleCondition) - .filter_by(rule=self.uuid).all()) - self.assertFalse(db.model_query(db.RuleAction) - .filter_by(rule=self.uuid).all()) - - def test_delete_non_existing(self): - self.assertRaises(utils.Error, rules.delete, 'foo') - - def test_delete_all(self): - rules.delete_all() - - self.assertFalse(db.model_query(db.Rule).all()) - self.assertFalse(db.model_query(db.RuleCondition).all()) - self.assertFalse(db.model_query(db.RuleAction).all()) - - -@mock.patch.object(plugins_base, 'rule_conditions_manager', autospec=True) -class TestCheckConditions(BaseTest): - def setUp(self): - super(TestCheckConditions, self).setUp() - - self.rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - self.cond_mock = mock.Mock(spec=plugins_base.RuleConditionPlugin) - self.cond_mock.ALLOW_NONE = False - self.ext_mock = mock.Mock(spec=['obj'], obj=self.cond_mock) - - def test_ok(self, mock_ext_mgr): - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - self.cond_mock.check.return_value = True - - res = self.rule.check_conditions(self.node_info, self.data) - - self.cond_mock.check.assert_any_call(self.node_info, 1024, - {'value': 1024}) - self.cond_mock.check.assert_any_call(self.node_info, 42, - {'value': 60}) - self.assertEqual(len(self.conditions_json), - self.cond_mock.check.call_count) - self.assertTrue(res) - - def test_invert(self, mock_ext_mgr): - self.conditions_json = [ - {'op': 'eq', 'field': 'memory_mb', 'value': 42, - 'invert': True}, - ] - self.rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - self.cond_mock.check.return_value = False - - res = self.rule.check_conditions(self.node_info, self.data) - - self.cond_mock.check.assert_called_once_with(self.node_info, 1024, - {'value': 42}) - self.assertTrue(res) - - def test_no_field(self, mock_ext_mgr): - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - self.cond_mock.check.return_value = True - del self.data['local_gb'] - - res = self.rule.check_conditions(self.node_info, self.data) - - self.cond_mock.check.assert_called_once_with(self.node_info, 1024, - {'value': 1024}) - self.assertFalse(res) - - def test_no_field_none_allowed(self, mock_ext_mgr): - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - self.cond_mock.ALLOW_NONE = True - self.cond_mock.check.return_value = True - del self.data['local_gb'] - - res = self.rule.check_conditions(self.node_info, self.data) - - self.cond_mock.check.assert_any_call(self.node_info, 1024, - {'value': 1024}) - self.cond_mock.check.assert_any_call(self.node_info, None, - {'value': 60}) - self.assertEqual(len(self.conditions_json), - self.cond_mock.check.call_count) - self.assertTrue(res) - - def test_fail(self, mock_ext_mgr): - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - self.cond_mock.check.return_value = False - - res = self.rule.check_conditions(self.node_info, self.data) - - self.cond_mock.check.assert_called_once_with(self.node_info, 1024, - {'value': 1024}) - self.assertFalse(res) - - -class TestCheckConditionsMultiple(BaseTest): - def setUp(self): - super(TestCheckConditionsMultiple, self).setUp() - - self.conditions_json = [ - {'op': 'eq', 'field': 'interfaces[*].ip', 'value': '1.2.3.4'} - ] - - def _build_data(self, ips): - return { - 'interfaces': [ - {'ip': ip} for ip in ips - ] - } - - def test_default(self): - rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - data_set = [ - (['1.1.1.1', '1.2.3.4', '1.3.2.2'], True), - (['1.2.3.4'], True), - (['1.1.1.1', '1.3.2.2'], False), - (['1.2.3.4', '1.3.2.2'], True), - ] - for ips, result in data_set: - data = self._build_data(ips) - self.assertIs(result, rule.check_conditions(self.node_info, data), - data) - - def test_any(self): - self.conditions_json[0]['multiple'] = 'any' - rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - data_set = [ - (['1.1.1.1', '1.2.3.4', '1.3.2.2'], True), - (['1.2.3.4'], True), - (['1.1.1.1', '1.3.2.2'], False), - (['1.2.3.4', '1.3.2.2'], True), - ] - for ips, result in data_set: - data = self._build_data(ips) - self.assertIs(result, rule.check_conditions(self.node_info, data), - data) - - def test_all(self): - self.conditions_json[0]['multiple'] = 'all' - rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - data_set = [ - (['1.1.1.1', '1.2.3.4', '1.3.2.2'], False), - (['1.2.3.4'], True), - (['1.1.1.1', '1.3.2.2'], False), - (['1.2.3.4', '1.3.2.2'], False), - ] - for ips, result in data_set: - data = self._build_data(ips) - self.assertIs(result, rule.check_conditions(self.node_info, data), - data) - - def test_first(self): - self.conditions_json[0]['multiple'] = 'first' - rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - data_set = [ - (['1.1.1.1', '1.2.3.4', '1.3.2.2'], False), - (['1.2.3.4'], True), - (['1.1.1.1', '1.3.2.2'], False), - (['1.2.3.4', '1.3.2.2'], True), - ] - for ips, result in data_set: - data = self._build_data(ips) - self.assertIs(result, rule.check_conditions(self.node_info, data), - data) - - -class TestCheckConditionsSchemePath(BaseTest): - def test_conditions_data_path(self): - self.data_set = [ - ([{'op': 'eq', 'field': 'data://memory_mb', 'value': 1024}], - True), - ([{'op': 'gt', 'field': 'data://local_gb', 'value': 42}], - False) - ] - - for condition, res in self.data_set: - rule = rules.create(conditions_json=condition, - actions_json=self.actions_json) - self.assertIs(res, - rule.check_conditions(self.node_info, self.data), - self.data) - - def test_conditions_node_path(self): - self.node_set = [ - ([{'op': 'eq', 'field': 'node://driver_info.ipmi_address', - 'value': self.bmc_address}], - True), - ([{'op': 'eq', 'field': 'node://driver', 'value': 'fake'}], - False) - ] - - for condition, res in self.node_set: - rule = rules.create(conditions_json=condition, - actions_json=self.actions_json) - self.assertIs(res, - rule.check_conditions(self.node_info, self.data)) - - -@mock.patch.object(plugins_base, 'rule_actions_manager', autospec=True) -class TestApplyActions(BaseTest): - def setUp(self): - super(TestApplyActions, self).setUp() - self.actions_json.append({'action': 'example'}) - - self.rule = rules.create(conditions_json=self.conditions_json, - actions_json=self.actions_json) - self.act_mock = mock.Mock(spec=plugins_base.RuleActionPlugin) - self.act_mock.FORMATTED_PARAMS = ['value'] - self.ext_mock = mock.Mock(spec=['obj'], obj=self.act_mock) - - def test_apply(self, mock_ext_mgr): - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - - self.rule.apply_actions(self.node_info, data=self.data) - - self.act_mock.apply.assert_any_call(self.node_info, - {'message': 'boom!'}) - self.act_mock.apply.assert_any_call(self.node_info, {}) - self.assertEqual(len(self.actions_json), - self.act_mock.apply.call_count) - - def test_apply_data_format_value(self, mock_ext_mgr): - self.rule = rules.create(actions_json=[ - {'action': 'set-attribute', - 'path': '/driver_info/ipmi_address', - 'value': '{data[memory_mb]}'}], - conditions_json=self.conditions_json - ) - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - - self.rule.apply_actions(self.node_info, data=self.data) - - self.assertEqual(1, self.act_mock.apply.call_count) - - def test_apply_data_format_value_fail(self, mock_ext_mgr): - self.rule = rules.create( - actions_json=[ - {'action': 'set-attribute', - 'path': '/driver_info/ipmi_address', - 'value': '{data[inventory][bmc_address]}'}], - conditions_json=self.conditions_json - ) - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - - self.assertRaises(utils.Error, self.rule.apply_actions, - self.node_info, data=self.data) - - def test_apply_data_non_format_value(self, mock_ext_mgr): - self.rule = rules.create(actions_json=[ - {'action': 'set-attribute', - 'path': '/driver_info/ipmi_address', - 'value': 1}], - conditions_json=self.conditions_json - ) - mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock - - self.rule.apply_actions(self.node_info, data=self.data) - - self.assertEqual(1, self.act_mock.apply.call_count) - - -@mock.patch.object(rules, 'get_all', autospec=True) -class TestApply(BaseTest): - def setUp(self): - super(TestApply, self).setUp() - self.rules = [mock.Mock(spec=rules.IntrospectionRule), - mock.Mock(spec=rules.IntrospectionRule)] - - def test_no_rules(self, mock_get_all): - mock_get_all.return_value = [] - - rules.apply(self.node_info, self.data) - - def test_apply(self, mock_get_all): - mock_get_all.return_value = self.rules - for idx, rule in enumerate(self.rules): - rule.check_conditions.return_value = not bool(idx) - - rules.apply(self.node_info, self.data) - - for idx, rule in enumerate(self.rules): - rule.check_conditions.assert_called_once_with(self.node_info, - self.data) - if rule.check_conditions.return_value: - rule.apply_actions.assert_called_once_with( - self.node_info, data=self.data) - else: - self.assertFalse(rule.apply_actions.called) diff --git a/ironic_inspector/test/unit/test_swift.py b/ironic_inspector/test/unit/test_swift.py deleted file mode 100644 index 03bc13e..0000000 --- a/ironic_inspector/test/unit/test_swift.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Mostly copied from ironic/tests/test_swift.py - -try: - from unittest import mock -except ImportError: - import mock -from swiftclient import client as swift_client -from swiftclient import exceptions as swift_exception - -from ironic_inspector.common import keystone -from ironic_inspector.common import swift -from ironic_inspector.test import base as test_base -from ironic_inspector import utils - - -class BaseTest(test_base.NodeTest): - def setUp(self): - super(BaseTest, self).setUp() - self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD'] - self.pxe_mac = self.macs[1] - self.data = { - 'ipmi_address': self.bmc_address, - 'cpus': 2, - 'cpu_arch': 'x86_64', - 'memory_mb': 1024, - 'local_gb': 20, - 'interfaces': { - 'em1': {'mac': self.macs[0], 'ip': '1.2.0.1'}, - 'em2': {'mac': self.macs[1], 'ip': '1.2.0.2'}, - 'em3': {'mac': self.all_macs[2]}, - }, - 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), - } - - -@mock.patch.object(keystone, 'register_auth_opts') -@mock.patch.object(keystone, 'get_session') -@mock.patch.object(swift_client, 'Connection', autospec=True) -class SwiftTestCase(BaseTest): - - def setUp(self): - super(SwiftTestCase, self).setUp() - swift.reset_swift_session() - self.swift_exception = swift_exception.ClientException('', '') - self.cfg.config(group='swift', - os_service_type='object-store', - os_endpoint_type='internalURL', - os_region='somewhere', - max_retries=2) - self.addCleanup(swift.reset_swift_session) - - def test___init__(self, connection_mock, load_mock, opts_mock): - swift.SwiftAPI() - connection_mock.assert_called_once_with( - session=load_mock.return_value) - - def test_create_object(self, connection_mock, load_mock, opts_mock): - swiftapi = swift.SwiftAPI() - connection_obj_mock = connection_mock.return_value - - connection_obj_mock.put_object.return_value = 'object-uuid' - - object_uuid = swiftapi.create_object('object', 'some-string-data') - - connection_obj_mock.put_container.assert_called_once_with('ironic-' - 'inspector') - connection_obj_mock.put_object.assert_called_once_with( - 'ironic-inspector', 'object', 'some-string-data', headers=None) - self.assertEqual('object-uuid', object_uuid) - - def test_create_object_create_container_fails(self, connection_mock, - load_mock, opts_mock): - swiftapi = swift.SwiftAPI() - connection_obj_mock = connection_mock.return_value - connection_obj_mock.put_container.side_effect = self.swift_exception - self.assertRaises(utils.Error, swiftapi.create_object, 'object', - 'some-string-data') - connection_obj_mock.put_container.assert_called_once_with('ironic-' - 'inspector') - self.assertFalse(connection_obj_mock.put_object.called) - - def test_create_object_put_object_fails(self, connection_mock, load_mock, - opts_mock): - swiftapi = swift.SwiftAPI() - connection_obj_mock = connection_mock.return_value - connection_obj_mock.put_object.side_effect = self.swift_exception - self.assertRaises(utils.Error, swiftapi.create_object, 'object', - 'some-string-data') - connection_obj_mock.put_container.assert_called_once_with('ironic-' - 'inspector') - connection_obj_mock.put_object.assert_called_once_with( - 'ironic-inspector', 'object', 'some-string-data', headers=None) - - def test_get_object(self, connection_mock, load_mock, opts_mock): - swiftapi = swift.SwiftAPI() - connection_obj_mock = connection_mock.return_value - - expected_obj = self.data - connection_obj_mock.get_object.return_value = ('headers', expected_obj) - - swift_obj = swiftapi.get_object('object') - - connection_obj_mock.get_object.assert_called_once_with( - 'ironic-inspector', 'object') - self.assertEqual(expected_obj, swift_obj) - - def test_get_object_fails(self, connection_mock, load_mock, opts_mock): - swiftapi = swift.SwiftAPI() - connection_obj_mock = connection_mock.return_value - connection_obj_mock.get_object.side_effect = self.swift_exception - self.assertRaises(utils.Error, swiftapi.get_object, - 'object') - connection_obj_mock.get_object.assert_called_once_with( - 'ironic-inspector', 'object') diff --git a/ironic_inspector/test/unit/test_utils.py b/ironic_inspector/test/unit/test_utils.py deleted file mode 100644 index 6e8fab3..0000000 --- a/ironic_inspector/test/unit/test_utils.py +++ /dev/null @@ -1,164 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystonemiddleware import auth_token -from oslo_config import cfg - -from ironic_inspector import node_cache -from ironic_inspector.test import base -from ironic_inspector import utils -from ironicclient.v1 import node - -try: - from unittest import mock -except ImportError: - import mock - -CONF = cfg.CONF - - -class TestCheckAuth(base.BaseTest): - def setUp(self): - super(TestCheckAuth, self).setUp() - CONF.set_override('auth_strategy', 'keystone') - - @mock.patch.object(auth_token, 'AuthProtocol') - def test_middleware(self, mock_auth): - CONF.set_override('admin_user', 'admin', 'keystone_authtoken') - CONF.set_override('admin_tenant_name', 'admin', 'keystone_authtoken') - CONF.set_override('admin_password', 'password', 'keystone_authtoken') - CONF.set_override('auth_uri', 'http://127.0.0.1:5000', - 'keystone_authtoken') - CONF.set_override('identity_uri', 'http://127.0.0.1:35357', - 'keystone_authtoken') - - app = mock.Mock(wsgi_app=mock.sentinel.app) - utils.add_auth_middleware(app) - - call_args = mock_auth.call_args_list[0] - args = call_args[0] - self.assertEqual(mock.sentinel.app, args[0]) - args1 = args[1] - - self.assertEqual('admin', args1['admin_user']) - self.assertEqual('admin', args1['admin_tenant_name']) - self.assertEqual('password', args1['admin_password']) - self.assertTrue(args1['delay_auth_decision']) - self.assertEqual('http://127.0.0.1:5000', args1['auth_uri']) - self.assertEqual('http://127.0.0.1:35357', args1['identity_uri']) - - def test_ok(self): - request = mock.Mock(headers={'X-Identity-Status': 'Confirmed', - 'X-Roles': 'admin,member'}) - utils.check_auth(request) - - def test_invalid(self): - request = mock.Mock(headers={'X-Identity-Status': 'Invalid'}) - self.assertRaises(utils.Error, utils.check_auth, request) - - def test_not_admin(self): - request = mock.Mock(headers={'X-Identity-Status': 'Confirmed', - 'X-Roles': 'member'}) - self.assertRaises(utils.Error, utils.check_auth, request) - - def test_disabled(self): - CONF.set_override('auth_strategy', 'noauth') - request = mock.Mock(headers={'X-Identity-Status': 'Invalid'}) - utils.check_auth(request) - - -class TestProcessingLogger(base.BaseTest): - def test_prefix_no_info(self): - self.assertEqual('[unidentified node]', - utils.processing_logger_prefix()) - - def test_prefix_only_uuid(self): - node_info = node.Node(mock.Mock(), dict(uuid='NNN')) - self.assertEqual('[node: NNN]', - utils.processing_logger_prefix(node_info=node_info)) - - def test_prefix_only_bmc(self): - data = {'inventory': {'bmc_address': '1.2.3.4'}} - self.assertEqual('[node: BMC 1.2.3.4]', - utils.processing_logger_prefix(data=data)) - - def test_prefix_only_mac(self): - data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff'} - self.assertEqual('[node: MAC aa:bb:cc:dd:ee:ff]', - utils.processing_logger_prefix(data=data)) - - def test_prefix_everything(self): - node_info = node.Node(mock.Mock(), dict(uuid='NNN')) - data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff', - 'inventory': {'bmc_address': '1.2.3.4'}} - self.assertEqual('[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4]', - utils.processing_logger_prefix(node_info=node_info, - data=data)) - - def test_prefix_uuid_not_str(self): - node_info = node.Node(mock.Mock(), dict(uuid=None)) - self.assertEqual('[node: None]', - utils.processing_logger_prefix(node_info=node_info)) - - def test_prefix_NodeInfo_instance(self): - node_info = node_cache.NodeInfo('NNN') - self.assertEqual('[node: NNN]', - utils.processing_logger_prefix(node_info=node_info)) - - def test_prefix_NodeInfo_instance_with_state(self): - node_info = node_cache.NodeInfo('NNN', state='foobar') - self.assertEqual('[node: NNN state foobar]', - utils.processing_logger_prefix(node_info=node_info)) - - def test_adapter_no_bmc(self): - CONF.set_override('log_bmc_address', False, 'processing') - node_info = node.Node(mock.Mock(), dict(uuid='NNN')) - data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff', - 'inventory': {'bmc_address': '1.2.3.4'}} - logger = utils.getProcessingLogger(__name__) - msg, _kwargs = logger.process('foo', {'node_info': node_info, - 'data': data}) - self.assertEqual( - '[node: NNN MAC aa:bb:cc:dd:ee:ff] foo', - msg) - - def test_adapter_with_bmc(self): - node_info = node.Node(mock.Mock(), dict(uuid='NNN')) - data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff', - 'inventory': {'bmc_address': '1.2.3.4'}} - logger = utils.getProcessingLogger(__name__) - msg, _kwargs = logger.process('foo', {'node_info': node_info, - 'data': data}) - self.assertEqual( - '[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4] foo', - msg) - - def test_adapter_empty_data(self): - logger = utils.getProcessingLogger(__name__) - msg, _kwargs = logger.process('foo', {'node_info': None, - 'data': None}) - self.assertEqual('[unidentified node] foo', msg) - - def test_adapter_no_data(self): - logger = utils.getProcessingLogger(__name__) - msg, _kwargs = logger.process('foo', {}) - self.assertEqual('foo', msg) - - -class TestIsoTimestamp(base.BaseTest): - def test_ok(self): - iso_date = '1970-01-01T00:00:00+00:00' - self.assertEqual(iso_date, utils.iso_timestamp(0.0)) - - def test_none(self): - self.assertIsNone(utils.iso_timestamp(None)) diff --git a/ironic_inspector/test/unit/test_wsgi_service.py b/ironic_inspector/test/unit/test_wsgi_service.py deleted file mode 100644 index 52b3767..0000000 --- a/ironic_inspector/test/unit/test_wsgi_service.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ssl -import sys -import unittest - -import eventlet # noqa -import fixtures -import mock -from oslo_config import cfg - -from ironic_inspector import db -from ironic_inspector import firewall -from ironic_inspector import main -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector.test import base as test_base -from ironic_inspector import utils -from ironic_inspector import wsgi_service - - -CONF = cfg.CONF - - -@mock.patch.object(firewall, 'clean_up', lambda: None) -@mock.patch.object(db, 'init', lambda: None) -@mock.patch.object(wsgi_service.WSGIService, '_init_host', lambda x: None) -@mock.patch.object(utils, 'add_auth_middleware') -class TestWSGIService(test_base.BaseTest): - def setUp(self): - super(TestWSGIService, self).setUp() - self.app = self.useFixture(fixtures.MockPatchObject( - main, 'app', autospec=True)).mock - self.service = wsgi_service.WSGIService() - - def test_init_middleware(self, mock_auth): - CONF.set_override('auth_strategy', 'keystone') - self.service._init_middleware() - - mock_auth.assert_called_once_with(self.app) - - @mock.patch.object(wsgi_service.WSGIService, '_init_middleware') - def test_run_ok(self, mock_init_middlw, mock_auth): - self.service.run() - - mock_init_middlw.assert_called_once_with() - self.app.run.assert_called_once_with(host='0.0.0.0', port=5050) - - @mock.patch.object(wsgi_service.LOG, 'info') - def test_init_with_swift_storage(self, mock_log, mock_auth): - - CONF.set_override('store_data', 'swift', 'processing') - msg = mock.call('Introspection data will be stored in Swift in the ' - 'container %s', CONF.swift.container) - self.service.run() - self.assertIn(msg, mock_log.call_args_list) - - def test_init_without_authenticate(self, mock_auth): - - CONF.set_override('auth_strategy', 'noauth') - self.service.run() - self.assertFalse(mock_auth.called) - - @mock.patch.object(wsgi_service.LOG, 'warning') - def test_init_with_no_data_storage(self, mock_log, mock_auth): - msg = ('Introspection data will not be stored. Change ' - '"[processing] store_data" option if this is not the ' - 'desired behavior') - self.service.run() - mock_log.assert_called_once_with(msg) - - -class TestCreateSSLContext(test_base.BaseTest): - def setUp(self): - super(TestCreateSSLContext, self).setUp() - self.app = mock.Mock() - self.service = wsgi_service.WSGIService() - - def test_use_ssl_false(self): - CONF.set_override('use_ssl', False) - con = self.service._create_ssl_context() - self.assertIsNone(con) - - @mock.patch.object(sys, 'version_info') - def test_old_python_returns_none(self, mock_version_info): - mock_version_info.__lt__.return_value = True - CONF.set_override('use_ssl', True) - con = self.service._create_ssl_context() - self.assertIsNone(con) - - @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), - 'This feature is unsupported in this version of python ' - 'so the tests will be skipped') - @mock.patch.object(ssl, 'create_default_context', autospec=True) - def test_use_ssl_true(self, mock_cdc): - CONF.set_override('use_ssl', True) - m_con = mock_cdc() - con = self.service._create_ssl_context() - self.assertEqual(m_con, con) - - @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), - 'This feature is unsupported in this version of python ' - 'so the tests will be skipped') - @mock.patch.object(ssl, 'create_default_context', autospec=True) - def test_only_key_path_provided(self, mock_cdc): - CONF.set_override('use_ssl', True) - CONF.set_override('ssl_key_path', '/some/fake/path') - mock_context = mock_cdc() - con = self.service._create_ssl_context() - self.assertEqual(mock_context, con) - self.assertFalse(mock_context.load_cert_chain.called) - - @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), - 'This feature is unsupported in this version of python ' - 'so the tests will be skipped') - @mock.patch.object(ssl, 'create_default_context', autospec=True) - def test_only_cert_path_provided(self, mock_cdc): - CONF.set_override('use_ssl', True) - CONF.set_override('ssl_cert_path', '/some/fake/path') - mock_context = mock_cdc() - con = self.service._create_ssl_context() - self.assertEqual(mock_context, con) - self.assertFalse(mock_context.load_cert_chain.called) - - @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), - 'This feature is unsupported in this version of python ' - 'so the tests will be skipped') - @mock.patch.object(ssl, 'create_default_context', autospec=True) - def test_both_paths_provided(self, mock_cdc): - key_path = '/some/fake/path/key' - cert_path = '/some/fake/path/cert' - CONF.set_override('use_ssl', True) - CONF.set_override('ssl_key_path', key_path) - CONF.set_override('ssl_cert_path', cert_path) - mock_context = mock_cdc() - con = self.service._create_ssl_context() - self.assertEqual(mock_context, con) - mock_context.load_cert_chain.assert_called_once_with(cert_path, - key_path) - - @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), - 'This feature is unsupported in this version of python ' - 'so the tests will be skipped') - @mock.patch.object(ssl, 'create_default_context', autospec=True) - def test_load_cert_chain_fails(self, mock_cdc): - CONF.set_override('use_ssl', True) - key_path = '/some/fake/path/key' - cert_path = '/some/fake/path/cert' - CONF.set_override('use_ssl', True) - CONF.set_override('ssl_key_path', key_path) - CONF.set_override('ssl_cert_path', cert_path) - mock_context = mock_cdc() - mock_context.load_cert_chain.side_effect = IOError('Boom!') - con = self.service._create_ssl_context() - self.assertEqual(mock_context, con) - mock_context.load_cert_chain.assert_called_once_with(cert_path, - key_path) - - -@mock.patch.object(firewall, 'init') -@mock.patch.object(db, 'init') -class TestInit(test_base.BaseTest): - def setUp(self): - super(TestInit, self).setUp() - # Tests default to a synchronous executor which can't be used here - utils._EXECUTOR = None - # Monkey patch for periodic tasks - eventlet.monkey_patch() - self.wsgi = wsgi_service.WSGIService() - - @mock.patch.object(firewall, 'clean_up', lambda: None) - def tearDown(self): - self.wsgi.shutdown() - super(TestInit, self).tearDown() - - def test_ok(self, mock_db, mock_firewall): - self.wsgi._init_host() - - mock_db.assert_called_once_with() - mock_firewall.assert_called_once_with() - - def test_init_without_manage_firewall(self, mock_db, mock_firewall): - - CONF.set_override('manage_firewall', False, 'firewall') - self.wsgi._init_host() - self.assertFalse(mock_firewall.called) - - @mock.patch.object(wsgi_service.LOG, 'critical') - def test_init_failed_processing_hook(self, mock_log, - mock_db, mock_firewall): - - CONF.set_override('processing_hooks', 'foo!', 'processing') - plugins_base._HOOKS_MGR = None - - self.assertRaises(SystemExit, self.wsgi._init_host) - mock_log.assert_called_once_with( - 'The following hook(s) are missing or failed to load: foo!') diff --git a/ironic_inspector/utils.py b/ironic_inspector/utils.py deleted file mode 100644 index d5eeb3e..0000000 --- a/ironic_inspector/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import logging as pylog - -import futurist -from ironicclient.v1 import node -from keystonemiddleware import auth_token -from oslo_config import cfg -from oslo_log import log -from oslo_middleware import cors as cors_middleware -import pytz - -from ironic_inspector.common.i18n import _ -from ironic_inspector import conf # noqa - -CONF = cfg.CONF - -_EXECUTOR = None - - -def get_ipmi_address_from_data(introspection_data): - try: - return introspection_data['inventory']['bmc_address'] - except KeyError: - return introspection_data.get('ipmi_address') - - -def get_pxe_mac(introspection_data): - pxe_mac = introspection_data.get('boot_interface') - if pxe_mac and '-' in pxe_mac: - # pxelinux format: 01-aa-bb-cc-dd-ee-ff - pxe_mac = pxe_mac.split('-', 1)[1] - pxe_mac = pxe_mac.replace('-', ':').lower() - return pxe_mac - - -def processing_logger_prefix(data=None, node_info=None): - """Calculate prefix for logging. - - Tries to use: - * node UUID, node._state - * node PXE MAC, - * node BMC address - - :param data: introspection data - :param node_info: NodeInfo or ironic node object - :return: logging prefix as a string - """ - # TODO(dtantsur): try to get MAC and BMC address for node_info as well - parts = [] - data = data or {} - - if node_info is not None: - if isinstance(node_info, node.Node): - parts.append(str(node_info.uuid)) - else: - parts.append(str(node_info)) - - pxe_mac = get_pxe_mac(data) - if pxe_mac: - parts.append('MAC %s' % pxe_mac) - - if CONF.processing.log_bmc_address: - bmc_address = get_ipmi_address_from_data(data) if data else None - if bmc_address: - parts.append('BMC %s' % bmc_address) - - if parts: - return _('[node: %s]') % ' '.join(parts) - else: - return _('[unidentified node]') - - -class ProcessingLoggerAdapter(log.KeywordArgumentAdapter): - def process(self, msg, kwargs): - if 'data' not in kwargs and 'node_info' not in kwargs: - return super(ProcessingLoggerAdapter, self).process(msg, kwargs) - - data = kwargs.get('data', {}) - node_info = kwargs.get('node_info') - prefix = processing_logger_prefix(data, node_info) - - msg, kwargs = super(ProcessingLoggerAdapter, self).process(msg, kwargs) - return ('%s %s' % (prefix, msg)), kwargs - - -def getProcessingLogger(name): - # We can't use getLogger from oslo_log, as it's an adapter itself - logger = pylog.getLogger(name) - return ProcessingLoggerAdapter(logger, {}) - - -LOG = getProcessingLogger(__name__) - - -class Error(Exception): - """Inspector exception.""" - - def __init__(self, msg, code=400, log_level='error', **kwargs): - super(Error, self).__init__(msg) - getattr(LOG, log_level)(msg, **kwargs) - self.http_code = code - - -class NotFoundInCacheError(Error): - """Exception when node was not found in cache during processing.""" - - def __init__(self, msg, code=404, **kwargs): - super(NotFoundInCacheError, self).__init__(msg, code, - log_level='info', **kwargs) - - -class NodeStateRaceCondition(Error): - """State mismatch between the DB and a node_info.""" - def __init__(self, *args, **kwargs): - message = _('Node state mismatch detected between the DB and the ' - 'cached node_info object') - kwargs.setdefault('code', 500) - super(NodeStateRaceCondition, self).__init__(message, *args, **kwargs) - - -class NodeStateInvalidEvent(Error): - """Invalid event attempted.""" - - -def executor(): - """Return the current futures executor.""" - global _EXECUTOR - if _EXECUTOR is None: - _EXECUTOR = futurist.GreenThreadPoolExecutor( - max_workers=CONF.max_concurrency) - return _EXECUTOR - - -def add_auth_middleware(app): - """Add authentication middleware to Flask application. - - :param app: application. - """ - auth_conf = dict(CONF.keystone_authtoken) - auth_conf['delay_auth_decision'] = True - app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, auth_conf) - - -def add_cors_middleware(app): - """Create a CORS wrapper - - Attach ironic-inspector-specific defaults that must be included - in all CORS responses. - - :param app: application - """ - app.wsgi_app = cors_middleware.CORS(app.wsgi_app, CONF) - - -def check_auth(request): - """Check authentication on request. - - :param request: Flask request - :raises: utils.Error if access is denied - """ - if CONF.auth_strategy == 'noauth': - return - if request.headers.get('X-Identity-Status').lower() == 'invalid': - raise Error(_('Authentication required'), code=401) - roles = (request.headers.get('X-Roles') or '').split(',') - if 'admin' not in roles: - LOG.error('Role "admin" not in user role list %s', roles) - raise Error(_('Access denied'), code=403) - - -def get_valid_macs(data): - """Get a list of valid MAC's from the introspection data.""" - return [m['mac'] - for m in data.get('all_interfaces', {}).values() - if m.get('mac')] - - -_INVENTORY_MANDATORY_KEYS = ('memory', 'cpu', 'interfaces') - - -def get_inventory(data, node_info=None): - """Get and validate the hardware inventory from introspection data.""" - inventory = data.get('inventory') - # TODO(dtantsur): validate inventory using JSON schema - if not inventory: - raise Error(_('Hardware inventory is empty or missing'), - data=data, node_info=node_info) - - for key in _INVENTORY_MANDATORY_KEYS: - if not inventory.get(key): - raise Error(_('Invalid hardware inventory: %s key is missing ' - 'or empty') % key, data=data, node_info=node_info) - - if not inventory.get('disks'): - LOG.info('No disks were detected in the inventory, assuming this ' - 'is a disk-less node', data=data, node_info=node_info) - # Make sure the code iterating over it does not fail with a TypeError - inventory['disks'] = [] - - return inventory - - -def iso_timestamp(timestamp=None, tz=pytz.timezone('utc')): - """Return an ISO8601-formatted timestamp (tz: UTC) or None. - - :param timestamp: such as time.time() or None - :param tz: timezone - :returns: an ISO8601-formatted timestamp, or None - """ - if timestamp is None: - return None - date = datetime.datetime.fromtimestamp(timestamp, tz=tz) - return date.isoformat() diff --git a/ironic_inspector/version.py b/ironic_inspector/version.py deleted file mode 100644 index 2e57282..0000000 --- a/ironic_inspector/version.py +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('ironic-inspector') diff --git a/ironic_inspector/wsgi_service.py b/ironic_inspector/wsgi_service.py deleted file mode 100644 index 7365c5f..0000000 --- a/ironic_inspector/wsgi_service.py +++ /dev/null @@ -1,196 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ssl -import sys - -from futurist import periodics -from oslo_config import cfg -from oslo_log import log - -from ironic_inspector.common import ironic as ir_utils -from ironic_inspector import db -from ironic_inspector import firewall -from ironic_inspector import main as app -from ironic_inspector import node_cache -from ironic_inspector.plugins import base as plugins_base -from ironic_inspector import utils - - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class WSGIService(object): - """Provides ability to launch API from wsgi app.""" - - def __init__(self): - self.app = app.app - self._periodics_worker = None - - def _init_middleware(self): - """Initialize WSGI middleware. - - :returns: None - """ - - if CONF.auth_strategy != 'noauth': - utils.add_auth_middleware(self.app) - else: - LOG.warning('Starting unauthenticated, please check' - ' configuration') - - # TODO(aarefiev): move to WorkerService once we split service - if CONF.processing.store_data == 'none': - LOG.warning('Introspection data will not be stored. Change ' - '"[processing] store_data" option if this is not ' - 'the desired behavior') - elif CONF.processing.store_data == 'swift': - LOG.info('Introspection data will be stored in Swift in the ' - 'container %s', CONF.swift.container) - utils.add_cors_middleware(self.app) - - def _create_ssl_context(self): - if not CONF.use_ssl: - return - - MIN_VERSION = (2, 7, 9) - - if sys.version_info < MIN_VERSION: - LOG.warning(('Unable to use SSL in this version of Python: ' - '%(current)s, please ensure your version of Python ' - 'is greater than %(min)s to enable this feature.'), - {'current': '.'.join(map(str, sys.version_info[:3])), - 'min': '.'.join(map(str, MIN_VERSION))}) - return - - context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) - if CONF.ssl_cert_path and CONF.ssl_key_path: - try: - context.load_cert_chain(CONF.ssl_cert_path, CONF.ssl_key_path) - except IOError as exc: - LOG.warning('Failed to load certificate or key from defined ' - 'locations: %(cert)s and %(key)s, will continue ' - 'to run with the default settings: %(exc)s', - {'cert': CONF.ssl_cert_path, - 'key': CONF.ssl_key_path, - 'exc': exc}) - except ssl.SSLError as exc: - LOG.warning('There was a problem with the loaded certificate ' - 'and key, will continue to run with the default ' - 'settings: %s', exc) - return context - - # TODO(aarefiev): move init code to WorkerService - def _init_host(self): - """Initialize Worker host - - Init db connection, load and validate processing - hooks, runs periodic tasks. - - :returns None - """ - db.init() - - try: - hooks = plugins_base.validate_processing_hooks() - except Exception as exc: - LOG.critical(str(exc)) - sys.exit(1) - - LOG.info('Enabled processing hooks: %s', [h.name for h in hooks]) - - if CONF.firewall.manage_firewall: - firewall.init() - - periodic_update_ = periodics.periodic( - spacing=CONF.firewall.firewall_update_period, - enabled=CONF.firewall.manage_firewall - )(periodic_update) - periodic_clean_up_ = periodics.periodic( - spacing=CONF.clean_up_period - )(periodic_clean_up) - - self._periodics_worker = periodics.PeriodicWorker( - callables=[(periodic_update_, None, None), - (periodic_clean_up_, None, None)], - executor_factory=periodics.ExistingExecutor(utils.executor())) - utils.executor().submit(self._periodics_worker.start) - - def shutdown(self): - """Stop serving API, clean up. - - :returns: None - """ - # TODO(aarefiev): move shutdown code to WorkerService - LOG.debug('Shutting down') - - firewall.clean_up() - - if self._periodics_worker is not None: - try: - self._periodics_worker.stop() - self._periodics_worker.wait() - except Exception as e: - LOG.exception('Service error occurred when stopping ' - 'periodic workers. Error: %s', e) - self._periodics_worker = None - - if utils.executor().alive: - utils.executor().shutdown(wait=True) - - LOG.info('Shut down successfully') - - def run(self): - """Start serving this service using loaded application. - - :returns: None - """ - app_kwargs = {'host': CONF.listen_address, - 'port': CONF.listen_port} - - context = self._create_ssl_context() - if context: - app_kwargs['ssl_context'] = context - - self._init_middleware() - - self._init_host() - - try: - self.app.run(**app_kwargs) - finally: - self.shutdown() - - -def periodic_update(): # pragma: no cover - try: - firewall.update_filters() - except Exception: - LOG.exception('Periodic update of firewall rules failed') - - -def periodic_clean_up(): # pragma: no cover - try: - if node_cache.clean_up(): - firewall.update_filters() - sync_with_ironic() - except Exception: - LOG.exception('Periodic clean up of node cache failed') - - -def sync_with_ironic(): - ironic = ir_utils.get_client() - # TODO(yuikotakada): pagination - ironic_nodes = ironic.node.list(limit=0) - ironic_node_uuids = {node.uuid for node in ironic_nodes} - node_cache.delete_nodes_not_in_list(ironic_node_uuids) diff --git a/plugin-requirements.txt b/plugin-requirements.txt deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yaml b/releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yaml deleted file mode 100644 index 1b5a743..0000000 --- a/releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Querying **inspector** rules API now also returns the ``invert`` and - ``multiple`` attributes of the associated conditions. diff --git a/releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml b/releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml deleted file mode 100644 index 45c6869..0000000 --- a/releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The POST /v1/introspection//data/unprocessed API updates the - started_at time when ironic inspector begins processing the node. diff --git a/releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yaml b/releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yaml deleted file mode 100644 index 936fe00..0000000 --- a/releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Extend the introspection status returned from - ``GET@/v1/introspection/`` to contain the ``uuid``, ``started_at`` - and ``finished_at`` fields. - -upgrade: - - Add a new dependency, ``pytz``. diff --git a/releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml b/releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml deleted file mode 100644 index 8800269..0000000 --- a/releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Introduced API "POST /v1/introspection//abort" for aborting - the introspection process. diff --git a/releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml b/releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml deleted file mode 100644 index da54c8b..0000000 --- a/releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Allow a timeout to happen while a node inspection is in any active state. diff --git a/releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml b/releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml deleted file mode 100644 index 6f64856..0000000 --- a/releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add ``disabled`` option to add_ports, so discovered nodes can be created - without create ports. diff --git a/releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml b/releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml deleted file mode 100644 index 88c416e..0000000 --- a/releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add a plugin to parse raw LLDP Basic Management, 802.1, and - 802.3 TLVs and store the data in Swift. diff --git a/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml b/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml deleted file mode 100644 index eecd281..0000000 --- a/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added GenericLocalLinkConnectionHook processing plugin to process LLDP data - returned during inspection and set port ID and switch ID in an Ironic - node's port local link connection information using that data. diff --git a/releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml b/releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml deleted file mode 100644 index b96d164..0000000 --- a/releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add a check from the link_local_connection plugin to use data stored by the - lldp_basic plugin to avoid having to parse the LLDP packets twice. - diff --git a/releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yaml b/releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yaml deleted file mode 100644 index 92c51f1..0000000 --- a/releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds node state to the GET /v1/introspection/ and - GET /v1/introspection API response data. diff --git a/releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yaml b/releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yaml deleted file mode 100644 index d4e3f1c..0000000 --- a/releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - Add an API endpoint for listing introspection statuses. Operators can use - this to get the status for all running or previously run introspection - processing. - - - | - Introduce a new configuration option ``api_max_limit`` that defines the - maximum number of items per page when API results are paginated. diff --git a/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml b/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml deleted file mode 100644 index aad718e..0000000 --- a/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add configuration option `processing.power_off` defaulting to True, - which allows to leave nodes powered on after introspection. diff --git a/releasenotes/notes/add-transition-starting-error-on-timeout-904aeeeb319ecb2b.yaml b/releasenotes/notes/add-transition-starting-error-on-timeout-904aeeeb319ecb2b.yaml deleted file mode 100644 index b268f7c..0000000 --- a/releasenotes/notes/add-transition-starting-error-on-timeout-904aeeeb319ecb2b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Timeout event on ``starting`` state lead to undefined transition error. diff --git a/releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml b/releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml deleted file mode 100644 index db2967d..0000000 --- a/releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - The configuration option "log_bmc_address" is deprecated. diff --git a/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml b/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml deleted file mode 100644 index f2b28d6..0000000 --- a/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a new "capabilities" processing hook detecting the CPU and boot mode - capabilities (the latter disabled by default). diff --git a/releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yaml b/releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yaml deleted file mode 100644 index c50b599..0000000 --- a/releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -fixes: - - | - Change database columns ``started_at`` and ``finished_at`` to type - DateTime from type Float so that timestamps fit into these columns - correctly. -upgrade: - - | - A database migration is required to change some columns from Float to - DateTime type. This may take some time based on the number of introspection - statuses in DB. diff --git a/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml b/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml deleted file mode 100644 index c98628c..0000000 --- a/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fix setting non string 'value' field for rule's actions. As - non string value is obviously not a formatted value, add the - check to avoid AttributeError exception. diff --git a/releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml b/releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml deleted file mode 100644 index 583306a..0000000 --- a/releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Make debug-level logging more compact by removing newlines from firewall - logging and disabling some 3rdparty debug messages by default. diff --git a/releasenotes/notes/contains-matches-ee28958b08995494.yaml b/releasenotes/notes/contains-matches-ee28958b08995494.yaml deleted file mode 100644 index 2f721ea..0000000 --- a/releasenotes/notes/contains-matches-ee28958b08995494.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - New condition plugins "contains" and "matches" allow to match value against - regular expressions. diff --git a/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml b/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml deleted file mode 100644 index d5ba268..0000000 --- a/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed "/v1/continue" to return HTTP 500 on unexpected exceptions, not - HTTP 400. diff --git a/releasenotes/notes/cors-5f345c65da7f5c99.yaml b/releasenotes/notes/cors-5f345c65da7f5c99.yaml deleted file mode 100644 index ec66fef..0000000 --- a/releasenotes/notes/cors-5f345c65da7f5c99.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - Added CORS support middleware to Ironic Inspector, allowing a deployer - to optionally configure rules under which a javascript client may - break the single-origin policy and access the API directly. - - OpenStack CrossProject Spec: - http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html - Oslo_Middleware Docs: - http://docs.openstack.org/developer/oslo.middleware/cors.html - OpenStack Cloud Admin Guide: - http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html diff --git a/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml b/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml deleted file mode 100644 index ff15bad..0000000 --- a/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - File name for stored ramdisk logs can now be customized via - "ramdisk_logs_filename_format" option. -upgrade: - - The default file name for stored ramdisk logs was change to contain only - node UUID (if known) and the current date time. A proper ".tar.gz" - extension is now appended. diff --git a/releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml b/releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml deleted file mode 100644 index 9cbedf9..0000000 --- a/releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - The rollback actions for introspection rules are deprecated. No in-tree - actions are using them, 3rdpart should stop using them as soon as possible. diff --git a/releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml b/releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml deleted file mode 100644 index 4441c7d..0000000 --- a/releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - Using the root_device_hint alias for the raid_device plugin is deprecated. diff --git a/releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml b/releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml deleted file mode 100644 index ab7e658..0000000 --- a/releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -deprecations: - - | - Support for setting IPMI credentials via ironic-inspector is deprecated - and will be removed completely in Pike. A new API version 1.9 was - introduced with this feature de-activated. For reasoning see - https://bugs.launchpad.net/ironic-python-agent/+bug/1654318. -other: - - | - Default API version is temporary pinned to 1.8 (before deprecating setting - IPMI credentials). It will be reset to the latest version again when - support for setting IPMI credentials is removed. diff --git a/releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml b/releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml deleted file mode 100644 index 208e581..0000000 --- a/releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - Removed previously deprecated authentication options from "ironic", - "swift", and "keystone_authtoken" sections. - - | - Removed long deprecated support for "discoverd" section in configuration - file. diff --git a/releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml b/releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml deleted file mode 100644 index 0f32fda..0000000 --- a/releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - DHCP is now disabled completely when no nodes are on introspection and - the "node_not_found_hook" is not set. This reduces probability of serving - DHCP to wrong nodes, if their NIC is not registered in Ironic. See - https://bugs.launchpad.net/ironic-inspector/+bug/1557979 and - https://bugzilla.redhat.com/show_bug.cgi?id=1317695 for details. diff --git a/releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml b/releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml deleted file mode 100644 index 14ee6cf..0000000 --- a/releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Removed support for introspecting nodes in maintenance mode, deprecated in - the liberty cycle. Use "inspecting", "manageable" or "enroll" states - instead. diff --git a/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml b/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml deleted file mode 100644 index f51af31..0000000 --- a/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixes a problem which caused an unhandled TypeError exception to - bubble up when inspector was attempting to convert some eDeploy data - to integer. diff --git a/releasenotes/notes/empty-condition-abc707b771be6be3.yaml b/releasenotes/notes/empty-condition-abc707b771be6be3.yaml deleted file mode 100644 index 5cf7a53..0000000 --- a/releasenotes/notes/empty-condition-abc707b771be6be3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added new condition plugin "is-empty", which allows to match - empty string, list, dictionary or None. diff --git a/releasenotes/notes/enroll-hook-d8c32eba70848210.yaml b/releasenotes/notes/enroll-hook-d8c32eba70848210.yaml deleted file mode 100644 index 8bd28cd..0000000 --- a/releasenotes/notes/enroll-hook-d8c32eba70848210.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - Switch required Ironic API version to '1.11', which supports 'enroll' state. -features: - - Add a new node_not_found hook - enroll, which allows automatically discover - Ironic's node. diff --git a/releasenotes/notes/extend-rules-9a9d38701e970611.yaml b/releasenotes/notes/extend-rules-9a9d38701e970611.yaml deleted file mode 100644 index 34fe636..0000000 --- a/releasenotes/notes/extend-rules-9a9d38701e970611.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Conditions now support comparing fields from node info; - - Actions support formatting to fetch values from introspection data. - See http://docs.openstack.org/developer/ironic-inspector/usage.html#introspection-rules \ No newline at end of file diff --git a/releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml b/releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml deleted file mode 100644 index af2b345..0000000 --- a/releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed extra_hardware plugin connection to Swift. diff --git a/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml b/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml deleted file mode 100644 index 65068a2..0000000 --- a/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed a regression in the firewall code, which causes re-running - introspection for an already inspected node to fail. diff --git a/releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml b/releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml deleted file mode 100644 index 2ed5048..0000000 --- a/releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - Exception CalledProcessError is raised when running `iptables` cmd on start up. - The issue is caused by eventlet bug, see: - https://github.com/eventlet/eventlet/issues/357 - The issue affects *ironic-inspector* only if it manages firewall - configured - with ``manage_firewall = True`` configuration option. - diff --git a/releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml b/releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml deleted file mode 100644 index 4e6d968..0000000 --- a/releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Use only single quotes for strings inside SQL statements. Fixes a crash - when PostgreSQL is used as a database backend. diff --git a/releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml b/releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml deleted file mode 100644 index 40c8692..0000000 --- a/releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fix bug where periodic clean up failed with DBDeadlock if introspection - timed out. diff --git a/releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml b/releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml deleted file mode 100644 index 251f23e..0000000 --- a/releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -critical: - - | - Fixed several issues with MySQL database support: - - * https://bugs.launchpad.net/bugs/1501746 - * https://bugs.launchpad.net/bugs/1506160 - * https://bugs.launchpad.net/bugs/1501746 diff --git a/releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml b/releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml deleted file mode 100644 index 50ce47e..0000000 --- a/releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Ensure the configuration options ``firewall.firewall_update_period`` and - ``clean_up_period`` are applied to the ``periodic_clean_up`` and - ``periodic_update`` tasks after the config file is read. diff --git a/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml b/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml deleted file mode 100644 index 1cf1379..0000000 --- a/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - API "POST /v1/rules" returns 201 response code instead of - 200 on creating success. API version was bumped to 1.6. - API less than 1.6 continues to return 200. - - Default API version was changed from minimum to maximum - which Inspector can support. -fixes: - - Fix response return code for rule creating endpoint, it - returns 201 now instead of 200 on success. diff --git a/releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml b/releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml deleted file mode 100644 index a05f3cb..0000000 --- a/releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -fixes: - - | - Wrong provision state name 'inspectfail' in *ironic-inspector* valid - states for node inspection. - This issue leads to state inconsistency between *ironic* and - *ironic-inspector*. For example, if *ironic* inspection timeout is - lower than *ironic-inspector*'s, and inspection timeout occurs, *ironic* - will transition node into 'inspect failed' provision state. In such case - when node inspection finishes without errors the node will be in - 'inspect failed' provision state with inspection in 'finished' state. diff --git a/releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml b/releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml deleted file mode 100644 index f9a2002..0000000 --- a/releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - LLC hook now formats the chassis ID and port ID MAC addresses into Unix - format as expected by ironic. - - LLC hook ensures that correct port information is passed to the patch_port - function diff --git a/releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml b/releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml deleted file mode 100644 index a01b2a3..0000000 --- a/releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - LLC hook no longer assumes all inspected ports are added to ironic diff --git a/releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml b/releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml deleted file mode 100644 index 888c644..0000000 --- a/releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security: - - Never enable Flask debug mode as it may allow remote code execution. - See https://bugs.launchpad.net/bugs/1506419 for details. diff --git a/releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml b/releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml deleted file mode 100644 index 01b7340..0000000 --- a/releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Minimum possible value for the "max_concurrency" setting is now 2. -other: - - Switched to Futurist library for asynchronous tasks. diff --git a/releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml b/releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml deleted file mode 100644 index 5e94981..0000000 --- a/releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Removed deprecated support for passing "node_patches" and "ports_patches" - arguments to processing hooks. diff --git a/releasenotes/notes/hook-deps-83a867c7af0300e4.yaml b/releasenotes/notes/hook-deps-83a867c7af0300e4.yaml deleted file mode 100644 index 4c56000..0000000 --- a/releasenotes/notes/hook-deps-83a867c7af0300e4.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Processing hooks can now define dependencies on other processing hooks. - **ironic-inspector** start up fails when required hooks are not enabled - before the hook that requires them. diff --git a/releasenotes/notes/infiniband-support-960d6846e326dec4.yaml b/releasenotes/notes/infiniband-support-960d6846e326dec4.yaml deleted file mode 100644 index 84319cd..0000000 --- a/releasenotes/notes/infiniband-support-960d6846e326dec4.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - InfiniBand interface discovery is now supported through introspection. The - ironic-inspector will add the client-id to the corresponding ironic port - that represents the InfiniBand interface. The ironic-inspector should be - configured with a list of interfaces ``firewall.ethoib_interfaces`` to - indicate which Ethernet Over InfiniBand Interfaces are used for DHCP. diff --git a/releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yaml b/releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yaml deleted file mode 100644 index e0883a7..0000000 --- a/releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - | - The default value for the configuration option - "introspection_delay_drivers" was changed to ``.*``, which means that by - default "introspection_delay" is now applied to all drivers. Set - "introspection_delay" to 0 to disable the delay. -deprecations: - - | - The configuration option "introspection_delay_drivers" is deprecated. diff --git a/releasenotes/notes/introspection-state-03538fac198882b6.yaml b/releasenotes/notes/introspection-state-03538fac198882b6.yaml deleted file mode 100644 index 554c118..0000000 --- a/releasenotes/notes/introspection-state-03538fac198882b6.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - Node introspection state is now kept in a dedicated database column. The - introspection is now using a finite state machine. The state isn't exposed - to the user yet. -issues: - - Due to the nature of the NodeInfo.state attribute (being updated - independently from the rest of the node_info attributes) if a (DB) - connection was lost before the Node.state column was updated, - Node.finished_at and Node.error columns may not be in sync with the - Node.state column. -upgrade: - - Node.state and Node.version_id database columns are introduced. - - The introspection state column defaults to the state ``finished`` unless - the introspection error column value on a node row isn't null, then node - state is set to ``error``. diff --git a/releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml b/releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml deleted file mode 100644 index a2c25db..0000000 --- a/releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - Starting with this release, ironic-python-agent becomes the default - introspection ramdisk, with the old bash-based ramdisk being deprecated. -features: - - Inspector no longer requires old-style "local_gb", "memory_mb", "cpus" - and "cpu_arch" fields from the introspection ramdisk. They are still - supported, though, for compatibility with the old ramdisk. -upgrade: - - The root_disk_selection processing hook will now error out if root device - hints are specified on ironic node, but ironic-python-agent is not used - as an introspection ramdisk. -deprecations: - - Using old bash-based ramdisk is deprecated, please switch to - ironic-python-agent as soon as possible. diff --git a/releasenotes/notes/ipa-support-7eea800306829a49.yaml b/releasenotes/notes/ipa-support-7eea800306829a49.yaml deleted file mode 100644 index 139120c..0000000 --- a/releasenotes/notes/ipa-support-7eea800306829a49.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - IPA (ironic-python-agent) is now fully supported in the devstack plugin - and will become the default ramdisk in the next release. diff --git a/releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml b/releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml deleted file mode 100644 index b9a6b77..0000000 --- a/releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - Experimental setting IPMI credentials support was removed from all versions - of the API. The current API version was bumped to 1.12 to mark this change. - - | - The default API version was synchronized with the current API version again - after removal of the IPMI credentials setting. diff --git a/releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml b/releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml deleted file mode 100644 index c94c941..0000000 --- a/releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Adds support for using operators with the root device hints mechanism. - The supported operators are ``=``, ``==``, ``!=``, ``>=``, - ``<=``, ``>``, ``<``, ``s==``, ``s!=``, ``s>=``, ``s>``, - ``s<=``, ``s<``, ````, ```` and ````. diff --git a/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml b/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml deleted file mode 100644 index c048fdc..0000000 --- a/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed the "is-empty" condition to return True on missing values. diff --git a/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml b/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml deleted file mode 100644 index f0d0db5..0000000 --- a/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - Ironic-Inspector is now using keystoneauth and proper auth_plugins - instead of keystoneclient for communicating with Ironic and Swift. - It allows to finely tune authentification for each service independently. - For each service, the keystone session is created and reused, minimizing - the number of authentification requests to Keystone. -upgrade: - - Operators are advised to specify a proper keystoneauth plugin - and its appropriate settings in [ironic] and [swift] config sections. - Backward compatibility with previous authentification options is included. - Using authentification informaiton for Ironic and Swift from - [keystone_authtoken] config section is no longer supported. -deprecations: - - Most of current authentification options for either Ironic or Swift are - deprecated and will be removed in a future release. Please configure - the keystoneauth auth plugin authentification instead. diff --git a/releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml b/releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml deleted file mode 100644 index 3cda8aa..0000000 --- a/releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Only issue iptables calls when list of active MAC's changes. diff --git a/releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml b/releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml deleted file mode 100644 index 70628dc..0000000 --- a/releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - Log level for error when node was not found in Inspector cache was - changed from error to info level. It was done because not_found_hook - may handle this case, so this wouldn't be error anymore. diff --git a/releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml b/releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml deleted file mode 100644 index 032f69f..0000000 --- a/releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Improve logging for ramdisk logs collection. diff --git a/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml b/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml deleted file mode 100644 index eec9db3..0000000 --- a/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The lookup procedure now uses all valid MAC's, not only the MAC(s) that - will be used for creating port(s). - - The "enroll" node_not_found_hook now uses all valid MAC's to check node - existence, not only the MAC(s) that will be used for creating port(s). diff --git a/releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml b/releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml deleted file mode 100644 index 472756c..0000000 --- a/releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Loopback BMC addresses (useful e.g. with virtualbmc) are no longer used - for lookup. diff --git a/releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml b/releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml deleted file mode 100644 index 533a758..0000000 --- a/releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Allow autogeneration of database migrations. diff --git a/releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml b/releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml deleted file mode 100644 index 39475f5..0000000 --- a/releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Log a warning when add_ports is set to pxe, but no PXE MAC is returned from - the ramdisk. diff --git a/releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml b/releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml deleted file mode 100644 index 4ce995c..0000000 --- a/releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - | - Looking up nodes during introspection or discovery now supports multiple - attributes matching. For example, two nodes can use the same ``bmc_address`` - and still can be distinguished by MAC addresses. -upgrade: - - | - Uniqueness of a node ``bmc_address`` isn't enforced any more. - - | - The primary key of the ``attributes`` table is relaxed from the - ``attributes.name, attributes.value`` column pair to a new column - ``attributes.uuid``. -fixes: - - | - Introspection fails on nodes with the same IPMI address but different IPMI - ports. diff --git a/releasenotes/notes/names-82d9f84153a228ec.yaml b/releasenotes/notes/names-82d9f84153a228ec.yaml deleted file mode 100644 index ffcf468..0000000 --- a/releasenotes/notes/names-82d9f84153a228ec.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add support for using Ironic node names in API instead of UUIDs. - Note that using node names in the introspection status API will require - a call to Ironic to be made by the service. diff --git a/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml b/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml deleted file mode 100644 index 0a0db12..0000000 --- a/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Database migrations downgrade was removed. More info about - database migration/rollback could be found here - http://docs.openstack.org/openstack-ops/content/ops_upgrades-roll-back.html diff --git a/releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml b/releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml deleted file mode 100644 index 6bc33ba..0000000 --- a/releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Don't fail on finish power off if node in 'enroll' state. Nodes in - 'enroll' state are not expected to have power credentials. diff --git a/releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml b/releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml deleted file mode 100644 index b7e2652..0000000 --- a/releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Ramdisk logs are no longer part of data stored to Swift and returned - by the API. diff --git a/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml b/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml deleted file mode 100644 index e139c27..0000000 --- a/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -prelude: > - Starting with this release only ironic-python-agent (IPA) is supported - as an introspection ramdisk. -upgrade: - - Support for the old bash-based ramdisk was removed. Please switch to IPA - before upgrading. diff --git a/releasenotes/notes/no-rollback-e15bc7fee0134545.yaml b/releasenotes/notes/no-rollback-e15bc7fee0134545.yaml deleted file mode 100644 index e85cc1a..0000000 --- a/releasenotes/notes/no-rollback-e15bc7fee0134545.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - Introspection rules actions 'set-attribute', 'set-capability' and - 'extend-attribute' no longer have the opposite effect on nodes that do not - match a rule. -fixes: - - Dropped rollback actions from 'set-attribute', 'set-capability' and - 'extend-attribute' introspection rules actions, as they were confusing, - completely undocumented and broke some real world use cases - (e.g. setting driver field). diff --git a/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml b/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml deleted file mode 100644 index 0f87039..0000000 --- a/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed the deprecated "root_device_hint" alias for the "raid_device" hook. diff --git a/releasenotes/notes/node-locking-4d135ca5b93524b1.yaml b/releasenotes/notes/node-locking-4d135ca5b93524b1.yaml deleted file mode 100644 index 77896d9..0000000 --- a/releasenotes/notes/node-locking-4d135ca5b93524b1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Acquire a lock on a node UUID when handling it. diff --git a/releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml b/releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml deleted file mode 100644 index c7421a6..0000000 --- a/releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Avoid failing introspection on diskless nodes. The node property ``local_gb - == 0`` is set in that case. diff --git a/releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml b/releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml deleted file mode 100644 index b32e4d7..0000000 --- a/releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Introspection rules (e.g. set-attribute action) now accept 'path' - field without leading forward slash as Ironic cli does. diff --git a/releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml b/releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml deleted file mode 100644 index 5f7433d..0000000 --- a/releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Adds new processing hook pci_devices for setting node - capabilities based on PCI devices present on a node - and rules in the [pci_devices] aliases configuration - option. Requires "pci-devices" collector to be enabled - in IPA. diff --git a/releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml b/releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml deleted file mode 100644 index 03ae9ea..0000000 --- a/releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - Ports creating logic was moved from core processing code to the - ``validate_interfaces`` processing hook. This may affect deployments - that disable this hook or replace it with something else. Also make - sure to place this hook before any hooks expecting ports to be created. diff --git a/releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml b/releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml deleted file mode 100644 index cdcbfb3..0000000 --- a/releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed confusing error message shown to user when something bad happens - during preprocessing (https://launchpad.net/bugs/1523907). diff --git a/releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml b/releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml deleted file mode 100644 index d658000..0000000 --- a/releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The data processing API endpoint now validates that data received from - the ramdisk is actually a JSON object instead of failing the internal error - later (issue https://bugs.launchpad.net/bugs/1525876). diff --git a/releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml b/releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml deleted file mode 100644 index c594605..0000000 --- a/releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - Logging during processing is now more consistent in terms of how it - identifies the node. Now we try to prefix the log message with node UUID, - BMC address and PXE MAC address (if available). Logging BMC addresses can - be disabled via new "log_bmc_address" option in the "processing" section. diff --git a/releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml b/releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml deleted file mode 100644 index aaa164b..0000000 --- a/releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Update ``pxe_enabled`` field on ports. It is set to ``True`` for the - PXE-booting port and ``False`` for the remaining ports. Both newly - discovered and existing ports are affected. -upgrade: - - | - Bare metal API version '1.19' is now required. diff --git a/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml b/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml deleted file mode 100644 index 3e2a461..0000000 --- a/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -fixes: - - The ramdisk logs are now stored on all preprocessing errors, not only - ones reported by the ramdisk itself. This required moving the ramdisk - logs handling from the "ramdisk_error" plugin to the generic processing - code. -upgrade: - - Handling ramdisk logs was moved out of the "ramdisk_error" plugin, so - disabling it will no longer disable handling ramdisk logs. As before, - you can set "ramdisk_logs_dir" option to an empty value (the default) - to disable storing ramdisk logs. diff --git a/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml b/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml deleted file mode 100644 index 22c9fb6..0000000 --- a/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Introduced API "POST /v1/introspection/UUID/data/unprocessed" - for reapplying the introspection over stored data. diff --git a/releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml b/releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml deleted file mode 100644 index 2318952..0000000 --- a/releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Do not fail the whole introspection due to a value formatting error during - introspection rules rollback. See `bug 1686942 - `_ for an example - and detailed investigation. diff --git a/releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml b/releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml deleted file mode 100644 index f48386c..0000000 --- a/releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - Support for rollback actions in introspection rules was removed. diff --git a/releasenotes/notes/rules-invert-2585173a11db3c31.yaml b/releasenotes/notes/rules-invert-2585173a11db3c31.yaml deleted file mode 100644 index 5a56a1c..0000000 --- a/releasenotes/notes/rules-invert-2585173a11db3c31.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Introspection rules conditions got a new generic "invert" parameter that - inverts the result of the condition. diff --git a/releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml b/releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml deleted file mode 100644 index e1232a0..0000000 --- a/releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml +++ /dev/null @@ -1,3 +0,0 @@ -fixes: - - Set the node to the error state when it - failed get data from swift. diff --git a/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml b/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml deleted file mode 100644 index e75afa3..0000000 --- a/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The "size" root device hint is now always converted to an integer for - consistency with IPA. diff --git a/releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml b/releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml deleted file mode 100644 index 6af5d57..0000000 --- a/releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -prelude: > - This release includes automatic `docs` generation via Sphinx. -other: - - | - Introduced new docs generation via `Sphinx `_ - and `ReST `_. - - * Separate `doc` folder includes `source` and `build` - * Integration with `tox `_ as `docs` target - * `makefile` for manual building - * `Openstack Theme `_ support diff --git a/releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml b/releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml deleted file mode 100644 index 226102f..0000000 --- a/releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - Old status records are no longer removed by default. They are still - removed if a node is removed from Ironic. -deprecations: - - | - The ``node_status_keep_time`` configuration option is deprecated. Now that - we can remove status information about nodes removed from **ironic**, this - option does not make much sense, and maybe be confusing (see `bug 1695858 - `_). diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index b5f1b57..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,292 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Ironic Inspector Release Notes documentation build configuration file, -# created by sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'reno.sphinxext', -] - -try: - import openstackdocstheme - extensions.append('openstackdocstheme') -except ImportError: - openstackdocstheme = None - -repository_name = 'openstack/ironic-inspector' -bug_project = 'ironic-inspector' -bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Ironic Inspector Release Notes' -copyright = u'2015, Ironic Inspector Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from ironic_inspector.version import version_info as inspector_version -# The full version, including alpha/beta/rc tags. -release = inspector_version.version_string_with_vcs() -# The short X.Y version. -version = inspector_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -if openstackdocstheme is not None: - html_theme = 'openstackdocs' -else: - html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'IronicInspectorReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'IronicInspectorReleaseNotes.tex', - u'Ironic Inspector Release Notes Documentation', - u'Ironic Inspector Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'ironicinspectorreleasenotes', - u'Ironic Inspector Release Notes Documentation', - [u'Ironic Inspector Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'IronicInspectorReleaseNotes', - u'Ironic Inspector Release Notes Documentation', - u'Ironic Inspector Developers', 'IronicInspectorReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 385c5ec..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -============================== -Ironic Inspector Release Notes -============================== - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - mitaka - liberty diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index bd52787..0000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================================ -Liberty Series (2.0.0 - 2.2.7) Release Notes -============================================ - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index 3206bd5..0000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================================== -Mitaka Series (2.3.0 - 3.2.x) Release Notes -=========================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index a252ac6..0000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================================== -Newton Series (3.3.0 - 4.2.x) Release Notes -=========================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index dcb9e56..0000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================================== -Ocata Series (5.0.0 - 5.0.x) Release Notes -========================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 61f025f..0000000 --- a/requirements.txt +++ /dev/null @@ -1,32 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -automaton>=0.5.0 # Apache-2.0 -alembic>=0.8.10 # MIT -Babel!=2.4.0,>=2.3.4 # BSD -construct>=2.8.10 # MIT -eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -Flask!=0.11,<1.0,>=0.10 # BSD -futurist!=0.15.0,>=0.11.0 # Apache-2.0 -ironic-lib>=2.5.0 # Apache-2.0 -jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystoneauth1>=3.0.1 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -netaddr!=0.7.16,>=0.7.13 # BSD -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -python-ironicclient>=1.14.0 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -pytz>=2013.6 # MIT -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.rootwrap>=5.0.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -six>=1.9.0 # MIT -stevedore>=1.20.0 # Apache-2.0 -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT diff --git a/rootwrap.conf b/rootwrap.conf deleted file mode 100644 index 848dbfd..0000000 --- a/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for ironic-inspector-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/ironic-inspector/rootwrap.d,/usr/share/ironic-inspector/rootwrap - -# List of directories to search executables in, in case filters do not -# explicitly specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, user0, user1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/rootwrap.d/ironic-inspector-firewall.filters b/rootwrap.d/ironic-inspector-firewall.filters deleted file mode 100644 index 893454f..0000000 --- a/rootwrap.d/ironic-inspector-firewall.filters +++ /dev/null @@ -1,6 +0,0 @@ -# ironic-inspector-rootwrap command filters for firewall manipulation -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# ironic_inspector/firewall.py -iptables: CommandFilter, iptables, root diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index c6d9735..0000000 --- a/setup.cfg +++ /dev/null @@ -1,101 +0,0 @@ -[metadata] -name = ironic-inspector -summary = Hardware introspection for OpenStack Bare Metal -description-file = README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/ironic-inspector/ -license = Apache-2 -classifier = - Environment :: Console - Environment :: OpenStack - Intended Audience :: System Administrators - Intended Audience :: Information Technology - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - Programming Language :: Python - Programming Language :: Python :: 2 - -[files] -packages = - ironic_inspector - -[entry_points] -console_scripts = - ironic-inspector = ironic_inspector.cmd.all:main - ironic-inspector-dbsync = ironic_inspector.dbsync:main - ironic-inspector-rootwrap = oslo_rootwrap.cmd:main -ironic_inspector.hooks.processing = - scheduler = ironic_inspector.plugins.standard:SchedulerHook - validate_interfaces = ironic_inspector.plugins.standard:ValidateInterfacesHook - ramdisk_error = ironic_inspector.plugins.standard:RamdiskErrorHook - root_disk_selection = ironic_inspector.plugins.standard:RootDiskSelectionHook - example = ironic_inspector.plugins.example:ExampleProcessingHook - extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook - raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection - capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook - local_link_connection = ironic_inspector.plugins.local_link_connection:GenericLocalLinkConnectionHook - lldp_basic = ironic_inspector.plugins.lldp_basic:LLDPBasicProcessingHook - pci_devices = ironic_inspector.plugins.pci_devices:PciDevicesHook -ironic_inspector.hooks.node_not_found = - example = ironic_inspector.plugins.example:example_not_found_hook - enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook -ironic_inspector.rules.conditions = - eq = ironic_inspector.plugins.rules:EqCondition - lt = ironic_inspector.plugins.rules:LtCondition - gt = ironic_inspector.plugins.rules:GtCondition - le = ironic_inspector.plugins.rules:LeCondition - ge = ironic_inspector.plugins.rules:GeCondition - ne = ironic_inspector.plugins.rules:NeCondition - in-net = ironic_inspector.plugins.rules:NetCondition - matches = ironic_inspector.plugins.rules:MatchesCondition - contains = ironic_inspector.plugins.rules:ContainsCondition - is-empty = ironic_inspector.plugins.rules:EmptyCondition -ironic_inspector.rules.actions = - example = ironic_inspector.plugins.example:ExampleRuleAction - fail = ironic_inspector.plugins.rules:FailAction - set-attribute = ironic_inspector.plugins.rules:SetAttributeAction - set-capability = ironic_inspector.plugins.rules:SetCapabilityAction - extend-attribute = ironic_inspector.plugins.rules:ExtendAttributeAction -ironic_inspector.pxe_filter = - noop = ironic_inspector.pxe_filter.base:NoopFilter -oslo.config.opts = - ironic_inspector = ironic_inspector.conf:list_opts - ironic_inspector.common.ironic = ironic_inspector.common.ironic:list_opts - ironic_inspector.common.swift = ironic_inspector.common.swift:list_opts - ironic_inspector.plugins.discovery = ironic_inspector.plugins.discovery:list_opts - ironic_inspector.plugins.capabilities = ironic_inspector.plugins.capabilities:list_opts - ironic_inspector.plugins.pci_devices = ironic_inspector.plugins.pci_devices:list_opts -oslo.config.opts.defaults = - ironic_inspector = ironic_inspector.conf:set_config_defaults - -tempest.test_plugins = - ironic_inspector_tests = ironic_inspector.test.inspector_tempest_plugin.plugin:InspectorTempestPlugin - -[compile_catalog] -directory = ironic_inspector/locale -domain = ironic_inspector - -[update_catalog] -domain = ironic-inspector -output_dir = ironic_inspector/locale -input_file = ironic_inspector/locale/ironic_inspector.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = ironic_inspector/locale/ironic_inspector.pot - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[pbr] -autodoc_index_modules = True -autodoc_exclude_modules = - ironic_inspector.migrations.* - ironic_inspector.test.* - ironic.common.i18n -api_doc_dir = contributor/api diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d844..0000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 3ea1734..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -coverage!=4.4,>=4.0 # Apache-2.0 -doc8 # Apache-2.0 -flake8-import-order==0.11 # LGPLv3 -hacking<0.13,>=0.12.0 # Apache-2.0 -mock>=2.0 # BSD -sphinx>=1.6.2 # BSD -openstackdocstheme>=1.11.0 # Apache-2.0 -reno!=2.3.1,>=1.8.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -testresources>=0.2.4 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -oslotest>=1.10.0 # Apache-2.0 -pydot3>=1.0.8 # MIT License - diff --git a/tools/states_to_dot.py b/tools/states_to_dot.py deleted file mode 100755 index bde791c..0000000 --- a/tools/states_to_dot.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import optparse - -from automaton.converters import pydot - -from ironic_inspector import introspection_state as states - - -def print_header(text): - print("*" * len(text)) - print(text) - print("*" * len(text)) - - -def main(): - parser = optparse.OptionParser() - parser.add_option("-f", "--file", dest="filename", - help="write output to FILE", metavar="FILE") - parser.add_option("-T", "--format", dest="format", - help="output in given format (default: png)", - default='png') - parser.add_option("--no-labels", dest="labels", - help="do not include labels", - action='store_false', default=True) - (options, args) = parser.parse_args() - if options.filename is None: - options.filename = 'states.%s' % options.format - - def node_attrs(state): - """Attributes used for drawing the nodes (states). - - The user can perform actions on introspection states, we distinguish - the error states from the other states by highlighting the node. - Error stable states are labelled with red. - - This is a callback method used by pydot.convert(). - - :param state: name of state - :returns: A dictionary with graphic attributes used for displaying - the state. - # """ - attrs = {} - attrs['fontcolor'] = 'red' if 'error' in state else 'gray' - return attrs - - def edge_attrs(start_state, event, end_state): - """Attributes used for drawing the edges (transitions). - - This is a callback method used by pydot.convert(). - - :param start_state: name of the start state - :param event: the event, a string - :param end_state: name of the end state (unused) - :returns: A dictionary with graphic attributes used for displaying - the transition. - """ - if not options.labels: - return {} - - attrs = {} - attrs['fontsize'] = 10 - attrs['label'] = event - if end_state is 'error': - attrs['fontcolor'] = 'red' - return attrs - - source = states.FSM - graph_name = '"Ironic Inspector states"' - graph_attrs = {'size': 0} - dot_graph = pydot.convert( - source, graph_name, graph_attrs=graph_attrs, - node_attrs_cb=node_attrs, edge_attrs_cb=edge_attrs, - add_start_state=False) - - dot_graph.write(options.filename, format=options.format) - - print(dot_graph.to_string()) - print_header("Created %s at '%s'" % (options.format, options.filename)) - - -if __name__ == '__main__': - main() diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 07a0785..0000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developers should setup their test systems in a similar way. - -# This setup needs to be run as a user that can run sudo. - -# The root password for the MySQL database; pass it in via -# MYSQL_ROOT_PW. -DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} - -# This user and its password are used by the tests, if you change it, -# your tests might fail. -DB_USER=openstack_citest -DB_PW=openstack_citest - -sudo -H mysqladmin -u root password $DB_ROOT_PW - -# It's best practice to remove anonymous users from the database. If -# a anonymous user exists, then it matches first for connections and -# other connections from that host will not work. -sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " - DELETE FROM mysql.user WHERE User=''; - FLUSH PRIVILEGES; - GRANT ALL PRIVILEGES ON *.* - TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" - -# Now create our database. -mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " - SET default_storage_engine=MYISAM; - DROP DATABASE IF EXISTS openstack_citest; - CREATE DATABASE openstack_citest CHARACTER SET utf8;" - -# Same for PostgreSQL -# The root password for the PostgreSQL database; pass it in via -# POSTGRES_ROOT_PW. -DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} - -# Setup user -root_roles=$(sudo -H -u postgres psql -t -c " - SELECT 'HERE' from pg_roles where rolname='$DB_USER'") -if [[ ${root_roles} == *HERE ]];then - sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -else - sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -fi - -# Store password for tests -cat << EOF > $HOME/.pgpass -*:*:*:$DB_USER:$DB_PW -EOF -chmod 0600 $HOME/.pgpass - -# Now create our database -psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" -createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 6d79ffb..0000000 --- a/tox.ini +++ /dev/null @@ -1,69 +0,0 @@ -[tox] -envlist = py35,py27,pep8,func - -[testenv] -usedevelop = True -install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -deps = - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/plugin-requirements.txt -commands = - coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test.unit - coverage report -m --fail-under 90 -setenv = PYTHONDONTWRITEBYTECODE=1 - TZ=UTC -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - -[testenv:venv] -commands = {posargs} - -[testenv:releasenotes] -envdir = {toxworkdir}/venv -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:cover] -commands = - coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test.unit - coverage report -m - -[testenv:pep8] -basepython = python2.7 -commands = - flake8 ironic_inspector - doc8 README.rst CONTRIBUTING.rst doc/source - -[testenv:func] -basepython = python2.7 -commands = - python -m ironic_inspector.test.functional - -[testenv:func3] -basepython = python3 -commands = - python3 -m ironic_inspector.test.functional - -[testenv:genconfig] -envdir = {toxworkdir}/venv -commands = oslo-config-generator --config-file config-generator.conf - -[testenv:genstates] -deps = {[testenv]deps} -commands = {toxinidir}/tools/states_to_dot.py -f {toxinidir}/doc/source/images/states.svg --format svg - -[flake8] -max-complexity=15 -# [H106] Don’t put vim configuration in source files. -# [H203] Use assertIs(Not)None to check for None. -# [H904] Delay string interpolations at logging calls. -enable-extensions=H106,H203,H904 -import-order-style = pep8 - -[hacking] -import_exceptions = ironicclient.exceptions,ironic_inspector.common.i18n - -[testenv:docs] -setenv = PYTHONHASHSEED=0 -sitepackages = False -deps = -r{toxinidir}/test-requirements.txt -commands = - python setup.py build_sphinx