From b638c70f82e56dbd5a3a86febd42db04e0cdb6fc Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 18 Mar 2016 10:03:57 +0100 Subject: [PATCH 01/83] Use all valid MAC's for lookup Currently we are using only the resulting MAC(s) when doing a node lookup. In many cases it is the MAC of the PXE-booting NIC. However, it's not necessary the MAC that people used for enrolling the Ironic node, which will lead to lookup failures on the virtual environment. This change makes the lookup procedure use all of the valid MAC's. Similarly, the enroll node_not_found_hook now checks all MAC's before creating a node. Code in the validate_interfaces hook was reordered to ensure we only keep interfaces with valid MAC's even in the "all_interfaces" list. Change-Id: Ie7df05d9a7855716fb835c90cfb0ac7fc4cd66df --- ironic_inspector/plugins/discovery.py | 2 +- ironic_inspector/plugins/standard.py | 28 +++++++++---------- ironic_inspector/process.py | 2 +- .../test/test_plugins_discovery.py | 5 +++- ironic_inspector/test/test_process.py | 10 +++++-- ironic_inspector/utils.py | 7 +++++ .../lookup-all-macs-eead528c0b764ad7.yaml | 6 ++++ 7 files changed, 40 insertions(+), 20 deletions(-) create mode 100644 releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml diff --git a/ironic_inspector/plugins/discovery.py b/ironic_inspector/plugins/discovery.py index 4de8079..2ea441b 100644 --- a/ironic_inspector/plugins/discovery.py +++ b/ironic_inspector/plugins/discovery.py @@ -52,7 +52,7 @@ def _extract_node_driver_info(introspection_data): def _check_existing_nodes(introspection_data, node_driver_info, ironic): - macs = introspection_data.get('macs') + macs = utils.get_valid_macs(introspection_data) if macs: # verify existing ports for mac in macs: diff --git a/ironic_inspector/plugins/standard.py b/ironic_inspector/plugins/standard.py index 2fd714a..5cf7d66 100644 --- a/ironic_inspector/plugins/standard.py +++ b/ironic_inspector/plugins/standard.py @@ -191,6 +191,20 @@ class ValidateInterfacesHook(base.ProcessingHook): iface, data=data) continue + if not mac: + LOG.debug('Skipping interface %s without link information', + name, data=data) + continue + + if not utils.is_valid_mac(mac): + LOG.warning(_LW('MAC %(mac)s for interface %(name)s is ' + 'not valid, skipping'), + {'mac': mac, 'name': name}, + data=data) + continue + + mac = mac.lower() + LOG.debug('Found interface %(name)s with MAC "%(mac)s" and ' 'IP address "%(ip)s"', {'name': name, 'mac': mac, 'ip': ip}, data=data) @@ -223,20 +237,6 @@ class ValidateInterfacesHook(base.ProcessingHook): mac = iface.get('mac') ip = iface.get('ip') - if not mac: - LOG.debug('Skipping interface %s without link information', - name, data=data) - continue - - if not utils.is_valid_mac(mac): - LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not ' - 'valid, skipping'), - {'mac': mac, 'name': name}, - data=data) - continue - - mac = mac.lower() - if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): LOG.debug('Skipping local interface %s', name, data=data) continue diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 05b248b..b3a7199 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -39,7 +39,7 @@ def _find_node_info(introspection_data, failures): try: return node_cache.find_node( bmc_address=introspection_data.get('ipmi_address'), - mac=introspection_data.get('macs')) + mac=utils.get_valid_macs(introspection_data)) except utils.NotFoundInCacheError as exc: not_found_hook = plugins_base.node_not_found_hook_manager() if not_found_hook is None: diff --git a/ironic_inspector/test/test_plugins_discovery.py b/ironic_inspector/test/test_plugins_discovery.py index 0c25aea..7acb38e 100644 --- a/ironic_inspector/test/test_plugins_discovery.py +++ b/ironic_inspector/test/test_plugins_discovery.py @@ -102,7 +102,10 @@ class TestEnrollNodeNotFoundHook(test_base.NodeTest): def test__check_existing_nodes_existing_mac(self): self.ironic.port.list.return_value = [mock.MagicMock( address=self.macs[0], uuid='fake_port')] - introspection_data = {'macs': self.macs} + introspection_data = { + 'all_interfaces': {'eth%d' % i: {'mac': m} + for i, m in enumerate(self.macs)} + } node_driver_info = {} self.assertRaises(utils.Error, diff --git a/ironic_inspector/test/test_process.py b/ironic_inspector/test/test_process.py index 8765fda..021a134 100644 --- a/ironic_inspector/test/test_process.py +++ b/ironic_inspector/test/test_process.py @@ -54,6 +54,7 @@ class BaseTest(test_base.NodeTest): self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(), address=mac) for mac in self.macs] self.ports = [self.all_ports[1]] + self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD'] @mock.patch.object(process, '_process_node', autospec=True) @@ -90,7 +91,9 @@ class TestProcess(BaseTest): self.assertEqual([self.pxe_mac], self.data['macs']) pop_mock.assert_called_once_with(bmc_address=self.bmc_address, - mac=self.data['macs']) + mac=mock.ANY) + actual_macs = pop_mock.call_args[1]['mac'] + self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) cli.node.get.assert_called_once_with(self.uuid) process_mock.assert_called_once_with(cli.node.get.return_value, self.data, pop_mock.return_value) @@ -100,8 +103,9 @@ class TestProcess(BaseTest): del self.data['ipmi_address'] process.process(self.data) - pop_mock.assert_called_once_with(bmc_address=None, - mac=self.data['macs']) + pop_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) + actual_macs = pop_mock.call_args[1]['mac'] + self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) cli.node.get.assert_called_once_with(self.uuid) process_mock.assert_called_once_with(cli.node.get.return_value, self.data, pop_mock.return_value) diff --git a/ironic_inspector/utils.py b/ironic_inspector/utils.py index d46bef9..bf9da0f 100644 --- a/ironic_inspector/utils.py +++ b/ironic_inspector/utils.py @@ -198,3 +198,10 @@ def get_auth_strategy(): if CONF.authenticate is not None: return 'keystone' if CONF.authenticate else 'noauth' return CONF.auth_strategy + + +def get_valid_macs(data): + """Get a list of valid MAC's from the introspection data.""" + return [m['mac'] + for m in data.get('all_interfaces', {}).values() + if m.get('mac')] diff --git a/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml b/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml new file mode 100644 index 0000000..eec9db3 --- /dev/null +++ b/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - The lookup procedure now uses all valid MAC's, not only the MAC(s) that + will be used for creating port(s). + - The "enroll" node_not_found_hook now uses all valid MAC's to check node + existence, not only the MAC(s) that will be used for creating port(s). From 7b7fba72de46806ce84d6d4758a2343b52b0c96d Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 18 Mar 2016 09:14:45 -0400 Subject: [PATCH 02/83] Update reno for stable/mitaka Change-Id: I50bbf9b94407a524ced2c9f2fea53fe36abe457c --- releasenotes/source/mitaka.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst index b66086b..0dc585c 100644 --- a/releasenotes/source/mitaka.rst +++ b/releasenotes/source/mitaka.rst @@ -1,6 +1,6 @@ -============================ -Mitaka Series Release Notes -============================ +============================= + Mitaka Series Release Notes +============================= .. release-notes:: - :branch: origin/master + :branch: origin/stable/mitaka From a12d1af6805de8525258c83fd95e803cba78617a Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Mon, 21 Mar 2016 14:40:35 +0000 Subject: [PATCH 03/83] Better error handling when converting eDeploy data When attempting to convert some eDeploy data to integer, inspector will handle the ValueError exception which works fine for strings that are not interger-like. But, when this data is None a TypeError exception will be raised which wasn't handlded before, this patch is fixing it. Closes-Bug: #1560050 Change-Id: I830a1a88c765c6471c457e383c7e859fd7f93ef9 --- ironic_inspector/plugins/extra_hardware.py | 2 +- ironic_inspector/test/test_plugins_extra_hardware.py | 11 +++++++++++ .../notes/edeploy-typeerror-6486e31923d91666.yaml | 5 +++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml diff --git a/ironic_inspector/plugins/extra_hardware.py b/ironic_inspector/plugins/extra_hardware.py index e9fa6d9..9a65dfc 100644 --- a/ironic_inspector/plugins/extra_hardware.py +++ b/ironic_inspector/plugins/extra_hardware.py @@ -96,7 +96,7 @@ class ExtraHardwareHook(base.ProcessingHook): try: item[3] = int(item[3]) - except ValueError: + except (ValueError, TypeError): pass converted_1[item[2]] = item[3] diff --git a/ironic_inspector/test/test_plugins_extra_hardware.py b/ironic_inspector/test/test_plugins_extra_hardware.py index 84c4437..54fcf2f 100644 --- a/ironic_inspector/test/test_plugins_extra_hardware.py +++ b/ironic_inspector/test/test_plugins_extra_hardware.py @@ -84,3 +84,14 @@ class TestExtraHardware(test_base.NodeTest): self.hook.before_update(introspection_data, self.node_info) self.assertFalse(patch_mock.called) self.assertFalse(swift_conn.create_object.called) + + def test__convert_edeploy_data(self, patch_mock, swift_mock): + introspection_data = [['Sheldon', 'J.', 'Plankton', '123'], + ['Larry', 'the', 'Lobster', None], + ['Eugene', 'H.', 'Krabs', 'The cashier']] + + data = self.hook._convert_edeploy_data(introspection_data) + expected_data = {'Sheldon': {'J.': {'Plankton': 123}}, + 'Larry': {'the': {'Lobster': None}}, + 'Eugene': {'H.': {'Krabs': 'The cashier'}}} + self.assertEqual(expected_data, data) diff --git a/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml b/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml new file mode 100644 index 0000000..f51af31 --- /dev/null +++ b/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Fixes a problem which caused an unhandled TypeError exception to + bubble up when inspector was attempting to convert some eDeploy data + to integer. From 35f332539dc6a9b61c60b4314347ff0b2e7177a2 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Fri, 18 Mar 2016 13:32:41 +0200 Subject: [PATCH 04/83] Use keystoneauth for Ironic and Swift clients This patch does not change the options in config file yet to showcase backward compatibility with old config options. Change-Id: I1da93b59b2f4813c42008277bd6479dc6673e7f1 --- example.conf | 258 +++++++++++++++--- ironic_inspector/common/ironic.py | 104 ++++--- ironic_inspector/common/keystone.py | 129 +++++++++ ironic_inspector/common/swift.py | 122 ++++++--- ironic_inspector/main.py | 1 - ironic_inspector/test/test_common_ironic.py | 49 ++-- ironic_inspector/test/test_keystone.py | 115 ++++++++ ironic_inspector/test/test_swift.py | 111 +++----- ...keystoneauth-plugins-aab6cbe1d0e884bf.yaml | 17 ++ requirements.txt | 2 +- 10 files changed, 687 insertions(+), 221 deletions(-) create mode 100644 ironic_inspector/common/keystone.py create mode 100644 ironic_inspector/test/test_keystone.py create mode 100644 releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml diff --git a/example.conf b/example.conf index c1291b9..7905ff4 100644 --- a/example.conf +++ b/example.conf @@ -387,59 +387,149 @@ # From ironic_inspector.common.ironic # -# Keystone authentication endpoint for accessing Ironic API. Use -# [keystone_authtoken]/auth_uri for keystone authentication. (string -# value) -# Deprecated group/name - [discoverd]/os_auth_url -#os_auth_url = +# Authentication URL (unknown value) +#auth_url = -# User name for accessing Ironic API. Use -# [keystone_authtoken]/admin_user for keystone authentication. (string -# value) -# Deprecated group/name - [discoverd]/os_username -#os_username = +# Method to use for authentication: noauth or keystone. (string value) +# Allowed values: keystone, noauth +#auth_strategy = keystone -# Password for accessing Ironic API. Use -# [keystone_authtoken]/admin_password for keystone authentication. -# (string value) -# Deprecated group/name - [discoverd]/os_password -#os_password = +# Authentication type to load (unknown value) +# Deprecated group/name - [DEFAULT]/auth_plugin +#auth_type = -# Tenant name for accessing Ironic API. Use -# [keystone_authtoken]/admin_tenant_name for keystone authentication. -# (string value) -# Deprecated group/name - [discoverd]/os_tenant_name -#os_tenant_name = +# PEM encoded Certificate Authority to use when verifying HTTPs +# connections. (string value) +#cafile = -# Keystone admin endpoint. DEPRECATED: use -# [keystone_authtoken]/identity_uri. (string value) +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 +# authentication. (unknown value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will +# be used for both the user and project domain in v3 and ignored in v2 +# authentication. (unknown value) +#default_domain_name = + +# Domain ID to scope to (unknown value) +#domain_id = + +# Domain name to scope to (unknown value) +#domain_name = + +# Keystone admin endpoint. DEPRECATED: Use [keystone_authtoken] +# section for keystone token validation. (string value) # Deprecated group/name - [discoverd]/identity_uri # This option is deprecated for removal. # Its value may be silently ignored in the future. #identity_uri = -# Method to use for authentication: noauth or keystone. (string value) -# Allowed values: keystone, noauth -#auth_strategy = keystone +# Verify HTTPS connections. (boolean value) +#insecure = false # Ironic API URL, used to set Ironic API URL when auth_strategy option # is noauth to work with standalone Ironic without keystone. (string # value) #ironic_url = http://localhost:6385/ -# Ironic service type. (string value) -#os_service_type = baremetal +# PEM encoded client certificate key file (string value) +#keyfile = + +# Maximum number of retries in case of conflict error (HTTP 409). +# (integer value) +#max_retries = 30 + +# Keystone authentication endpoint for accessing Ironic API. Use +# [keystone_authtoken] section for keystone token validation. (string +# value) +# Deprecated group/name - [discoverd]/os_auth_url +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. +#os_auth_url = # Ironic endpoint type. (string value) #os_endpoint_type = internalURL +# Password for accessing Ironic API. Use [keystone_authtoken] section +# for keystone token validation. (string value) +# Deprecated group/name - [discoverd]/os_password +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. +#os_password = + +# Keystone region used to get Ironic endpoints. (string value) +#os_region = + +# Ironic service type. (string value) +#os_service_type = baremetal + +# Tenant name for accessing Ironic API. Use [keystone_authtoken] +# section for keystone token validation. (string value) +# Deprecated group/name - [discoverd]/os_tenant_name +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. +#os_tenant_name = + +# User name for accessing Ironic API. Use [keystone_authtoken] section +# for keystone token validation. (string value) +# Deprecated group/name - [discoverd]/os_username +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. +#os_username = + +# User's password (unknown value) +#password = + +# Domain ID containing project (unknown value) +#project_domain_id = + +# Domain name containing project (unknown value) +#project_domain_name = + +# Project ID to scope to (unknown value) +# Deprecated group/name - [DEFAULT]/tenant-id +#project_id = + +# Project name to scope to (unknown value) +# Deprecated group/name - [DEFAULT]/tenant-name +#project_name = + # Interval between retries in case of conflict error (HTTP 409). # (integer value) #retry_interval = 2 -# Maximum number of retries in case of conflict error (HTTP 409). -# (integer value) -#max_retries = 30 +# Tenant ID (unknown value) +#tenant_id = + +# Tenant Name (unknown value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (unknown value) +#trust_id = + +# User's domain id (unknown value) +#user_domain_id = + +# User's domain name (unknown value) +#user_domain_name = + +# User id (unknown value) +#user_id = + +# Username (unknown value) +# Deprecated group/name - [DEFAULT]/username +#username = [keystone_authtoken] @@ -676,34 +766,112 @@ # From ironic_inspector.common.swift # -# Maximum number of times to retry a Swift request, before failing. -# (integer value) -#max_retries = 2 +# Authentication URL (unknown value) +#auth_url = + +# Authentication type to load (unknown value) +# Deprecated group/name - [DEFAULT]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs +# connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Default Swift container to use when creating objects. (string value) +#container = ironic-inspector + +# Optional domain ID to use with v3 and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 +# authentication. (unknown value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will +# be used for both the user and project domain in v3 and ignored in v2 +# authentication. (unknown value) +#default_domain_name = # Number of seconds that the Swift object will last before being # deleted. (set to 0 to never delete the object). (integer value) #delete_after = 0 -# Default Swift container to use when creating objects. (string value) -#container = ironic-inspector +# Domain ID to scope to (unknown value) +#domain_id = -# User name for accessing Swift API. (string value) -#username = +# Domain name to scope to (unknown value) +#domain_name = -# Password for accessing Swift API. (string value) -#password = +# Verify HTTPS connections. (boolean value) +#insecure = false -# Tenant name for accessing Swift API. (string value) -#tenant_name = +# PEM encoded client certificate key file (string value) +#keyfile = -# Keystone authentication API version (string value) -#os_auth_version = 2 +# Maximum number of times to retry a Swift request, before failing. +# (integer value) +#max_retries = 2 # Keystone authentication URL (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. #os_auth_url = +# Keystone authentication API version (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use options presented by configured keystone auth plugin. +#os_auth_version = 2 + +# Swift endpoint type. (string value) +#os_endpoint_type = internalURL + +# Keystone region to get endpoint for. (string value) +#os_region = + # Swift service type. (string value) #os_service_type = object-store -# Swift endpoint type. (string value) -#os_endpoint_type = internalURL +# User's password (unknown value) +#password = + +# Domain ID containing project (unknown value) +#project_domain_id = + +# Domain name containing project (unknown value) +#project_domain_name = + +# Project ID to scope to (unknown value) +# Deprecated group/name - [DEFAULT]/tenant-id +#project_id = + +# Project name to scope to (unknown value) +# Deprecated group/name - [DEFAULT]/tenant-name +#project_name = + +# Tenant ID (unknown value) +#tenant_id = + +# Tenant Name (unknown value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (unknown value) +#trust_id = + +# User's domain id (unknown value) +#user_domain_id = + +# User's domain name (unknown value) +#user_domain_name = + +# User id (unknown value) +#user_id = + +# Username (unknown value) +# Deprecated group/name - [DEFAULT]/username +#username = diff --git a/ironic_inspector/common/ironic.py b/ironic_inspector/common/ironic.py index 4734c40..131be00 100644 --- a/ironic_inspector/common/ironic.py +++ b/ironic_inspector/common/ironic.py @@ -14,10 +14,10 @@ import socket from ironicclient import client -from keystoneclient import client as keystone_client from oslo_config import cfg from ironic_inspector.common.i18n import _ +from ironic_inspector.common import keystone from ironic_inspector import utils CONF = cfg.CONF @@ -32,35 +32,50 @@ DEFAULT_IRONIC_API_VERSION = '1.11' IRONIC_GROUP = 'ironic' IRONIC_OPTS = [ + cfg.StrOpt('os_region', + help='Keystone region used to get Ironic endpoints.'), cfg.StrOpt('os_auth_url', default='', help='Keystone authentication endpoint for accessing Ironic ' - 'API. Use [keystone_authtoken]/auth_uri for keystone ' - 'authentication.', - deprecated_group='discoverd'), + 'API. Use [keystone_authtoken] section for keystone ' + 'token validation.', + deprecated_group='discoverd', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), cfg.StrOpt('os_username', default='', help='User name for accessing Ironic API. ' - 'Use [keystone_authtoken]/admin_user for keystone ' - 'authentication.', - deprecated_group='discoverd'), + 'Use [keystone_authtoken] section for keystone ' + 'token validation.', + deprecated_group='discoverd', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), cfg.StrOpt('os_password', default='', help='Password for accessing Ironic API. ' - 'Use [keystone_authtoken]/admin_password for keystone ' - 'authentication.', + 'Use [keystone_authtoken] section for keystone ' + 'token validation.', secret=True, - deprecated_group='discoverd'), + deprecated_group='discoverd', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), cfg.StrOpt('os_tenant_name', default='', help='Tenant name for accessing Ironic API. ' - 'Use [keystone_authtoken]/admin_tenant_name for keystone ' - 'authentication.', - deprecated_group='discoverd'), + 'Use [keystone_authtoken] section for keystone ' + 'token validation.', + deprecated_group='discoverd', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), cfg.StrOpt('identity_uri', default='', help='Keystone admin endpoint. ' - 'DEPRECATED: use [keystone_authtoken]/identity_uri.', + 'DEPRECATED: Use [keystone_authtoken] section for ' + 'keystone token validation.', deprecated_group='discoverd', deprecated_for_removal=True), cfg.StrOpt('auth_strategy', @@ -90,6 +105,24 @@ IRONIC_OPTS = [ CONF.register_opts(IRONIC_OPTS, group=IRONIC_GROUP) +keystone.register_auth_opts(IRONIC_GROUP) + +IRONIC_SESSION = None +LEGACY_MAP = { + 'auth_url': 'os_auth_url', + 'username': 'os_username', + 'password': 'os_password', + 'tenant_name': 'os_tenant_name' +} + + +def reset_ironic_session(): + """Reset the global session variable. + + Mostly useful for unit tests. + """ + global IRONIC_SESSION + IRONIC_SESSION = None def get_ipmi_address(node): @@ -114,33 +147,28 @@ def get_client(token=None, """Get Ironic client instance.""" # NOTE: To support standalone ironic without keystone if CONF.ironic.auth_strategy == 'noauth': - args = {'os_auth_token': 'noauth', - 'ironic_url': CONF.ironic.ironic_url} - elif token is None: - args = {'os_password': CONF.ironic.os_password, - 'os_username': CONF.ironic.os_username, - 'os_auth_url': CONF.ironic.os_auth_url, - 'os_tenant_name': CONF.ironic.os_tenant_name, - 'os_service_type': CONF.ironic.os_service_type, - 'os_endpoint_type': CONF.ironic.os_endpoint_type} + args = {'token': 'noauth', + 'endpoint': CONF.ironic.ironic_url} else: - keystone_creds = {'password': CONF.ironic.os_password, - 'username': CONF.ironic.os_username, - 'auth_url': CONF.ironic.os_auth_url, - 'tenant_name': CONF.ironic.os_tenant_name} - keystone = keystone_client.Client(**keystone_creds) - # FIXME(sambetts): Work around for Bug 1539839 as client.authenticate - # is not called. - keystone.authenticate() - ironic_url = keystone.service_catalog.url_for( - service_type=CONF.ironic.os_service_type, - endpoint_type=CONF.ironic.os_endpoint_type) - args = {'os_auth_token': token, - 'ironic_url': ironic_url} + global IRONIC_SESSION + if not IRONIC_SESSION: + IRONIC_SESSION = keystone.get_session( + IRONIC_GROUP, legacy_mapping=LEGACY_MAP) + if token is None: + args = {'session': IRONIC_SESSION, + 'region_name': CONF.ironic.os_region} + else: + ironic_url = IRONIC_SESSION.get_endpoint( + service_type=CONF.ironic.os_service_type, + endpoint_type=CONF.ironic.os_endpoint_type, + region_name=CONF.ironic.os_region + ) + args = {'token': token, + 'endpoint': ironic_url} args['os_ironic_api_version'] = api_version args['max_retries'] = CONF.ironic.max_retries args['retry_interval'] = CONF.ironic.retry_interval - return client.get_client(1, **args) + return client.Client(1, **args) def check_provision_state(node, with_credentials=False): @@ -173,4 +201,4 @@ def dict_to_capabilities(caps_dict): def list_opts(): - return [(IRONIC_GROUP, IRONIC_OPTS)] + return keystone.add_auth_options(IRONIC_OPTS, IRONIC_GROUP) diff --git a/ironic_inspector/common/keystone.py b/ironic_inspector/common/keystone.py new file mode 100644 index 0000000..4965cec --- /dev/null +++ b/ironic_inspector/common/keystone.py @@ -0,0 +1,129 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from keystoneauth1 import exceptions +from keystoneauth1 import loading +from oslo_config import cfg +from oslo_log import log +from six.moves.urllib import parse # for legacy options loading only + +from ironic_inspector.common.i18n import _LW + +CONF = cfg.CONF +LOG = log.getLogger(__name__) + + +def register_auth_opts(group): + loading.register_session_conf_options(CONF, group) + loading.register_auth_conf_options(CONF, group) + CONF.set_default('auth_type', default='password', group=group) + + +def get_session(group, legacy_mapping=None, legacy_auth_opts=None): + auth = _get_auth(group, legacy_mapping, legacy_auth_opts) + session = loading.load_session_from_conf_options( + CONF, group, auth=auth) + return session + + +def _get_auth(group, legacy_mapping=None, legacy_opts=None): + try: + auth = loading.load_auth_from_conf_options(CONF, group) + except exceptions.MissingRequiredOptions: + auth = _get_legacy_auth(group, legacy_mapping, legacy_opts) + else: + if auth is None: + auth = _get_legacy_auth(group, legacy_mapping, legacy_opts) + return auth + + +def _get_legacy_auth(group, legacy_mapping, legacy_opts): + """Load auth plugin from legacy options. + + If legacy_opts is not empty, these options will be registered first. + + legacy_mapping is a dict that maps the following keys to legacy option + names: + auth_url + username + password + tenant_name + """ + LOG.warning(_LW("Group [%s]: Using legacy auth loader is deprecated. " + "Consider specifying appropriate keystone auth plugin as " + "'auth_type' and corresponding plugin options."), group) + if legacy_opts: + for opt in legacy_opts: + try: + CONF.register_opt(opt, group=group) + except cfg.DuplicateOptError: + pass + + conf = getattr(CONF, group) + auth_params = {a: getattr(conf, legacy_mapping[a]) + for a in legacy_mapping} + legacy_loader = loading.get_plugin_loader('password') + # NOTE(pas-ha) only Swift had this option, take it into account + try: + auth_version = conf.get('os_auth_version') + except cfg.NoSuchOptError: + auth_version = None + # NOTE(pas-ha) mimic defaults of keystoneclient + if _is_apiv3(auth_params['auth_url'], auth_version): + auth_params.update({ + 'project_domain_id': 'default', + 'user_domain_id': 'default'}) + return legacy_loader.load_from_options(**auth_params) + + +# NOTE(pas-ha): for backward compat with legacy options loading only +def _is_apiv3(auth_url, auth_version): + """Check if V3 version of API is being used or not. + + This method inspects auth_url and auth_version, and checks whether V3 + version of the API is being used or not. + When no auth_version is specified and auth_url is not a versioned + endpoint, v2.0 is assumed. + :param auth_url: a http or https url to be inspected (like + 'http://127.0.0.1:9898/'). + :param auth_version: a string containing the version (like 'v2', 'v3.0') + or None + :returns: True if V3 of the API is being used. + """ + return (auth_version in ('v3.0', '3') or + '/v3' in parse.urlparse(auth_url).path) + + +def add_auth_options(options, group): + + def add_options(opts, opts_to_add): + for new_opt in opts_to_add: + for opt in opts: + if opt.name == new_opt.name: + break + else: + opts.append(new_opt) + + opts = copy.deepcopy(options) + opts.insert(0, loading.get_auth_common_conf_options()[0]) + # NOTE(dims): There are a lot of auth plugins, we just generate + # the config options for a few common ones + plugins = ['password', 'v2password', 'v3password'] + for name in plugins: + plugin = loading.get_plugin_loader(name) + add_options(opts, loading.get_auth_plugin_conf_options(plugin)) + add_options(opts, loading.get_session_conf_options()) + opts.sort(key=lambda x: x.name) + return [(group, opts)] diff --git a/ironic_inspector/common/swift.py b/ironic_inspector/common/swift.py index c89e6cb..152a782 100644 --- a/ironic_inspector/common/swift.py +++ b/ironic_inspector/common/swift.py @@ -17,10 +17,12 @@ import json from oslo_config import cfg from oslo_log import log +import six from swiftclient import client as swift_client from swiftclient import exceptions as swift_exceptions from ironic_inspector.common.i18n import _ +from ironic_inspector.common import keystone from ironic_inspector import utils CONF = cfg.CONF @@ -28,7 +30,7 @@ CONF = cfg.CONF LOG = log.getLogger('ironic_inspector.common.swift') - +SWIFT_GROUP = 'swift' SWIFT_OPTS = [ cfg.IntOpt('max_retries', default=2, @@ -41,6 +43,32 @@ SWIFT_OPTS = [ cfg.StrOpt('container', default='ironic-inspector', help='Default Swift container to use when creating objects.'), + cfg.StrOpt('os_auth_version', + default='2', + help='Keystone authentication API version', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), + cfg.StrOpt('os_auth_url', + default='', + help='Keystone authentication URL', + deprecated_for_removal=True, + deprecated_reason='Use options presented by configured ' + 'keystone auth plugin.'), + cfg.StrOpt('os_service_type', + default='object-store', + help='Swift service type.'), + cfg.StrOpt('os_endpoint_type', + default='internalURL', + help='Swift endpoint type.'), + cfg.StrOpt('os_region', + help='Keystone region to get endpoint for.'), +] + +# NOTE(pas-ha) these old options conflict with options exported by +# most used keystone auth plugins. Need to register them manually +# for the backward-compat case. +LEGACY_OPTS = [ cfg.StrOpt('username', default='', help='User name for accessing Swift API.'), @@ -51,59 +79,67 @@ SWIFT_OPTS = [ cfg.StrOpt('tenant_name', default='', help='Tenant name for accessing Swift API.'), - cfg.StrOpt('os_auth_version', - default='2', - help='Keystone authentication API version'), - cfg.StrOpt('os_auth_url', - default='', - help='Keystone authentication URL'), - cfg.StrOpt('os_service_type', - default='object-store', - help='Swift service type.'), - cfg.StrOpt('os_endpoint_type', - default='internalURL', - help='Swift endpoint type.'), ] - -def list_opts(): - return [ - ('swift', SWIFT_OPTS) - ] - -CONF.register_opts(SWIFT_OPTS, group='swift') +CONF.register_opts(SWIFT_OPTS, group=SWIFT_GROUP) +keystone.register_auth_opts(SWIFT_GROUP) OBJECT_NAME_PREFIX = 'inspector_data' +SWIFT_SESSION = None +LEGACY_MAP = { + 'auth_url': 'os_auth_url', + 'username': 'username', + 'password': 'password', + 'tenant_name': 'tenant_name', +} + + +def reset_swift_session(): + """Reset the global session variable. + + Mostly useful for unit tests. + """ + global SWIFT_SESSION + SWIFT_SESSION = None class SwiftAPI(object): """API for communicating with Swift.""" - def __init__(self, user=None, tenant_name=None, key=None, - auth_url=None, auth_version=None, - service_type=None, endpoint_type=None): + def __init__(self): """Constructor for creating a SwiftAPI object. - :param user: the name of the user for Swift account - :param tenant_name: the name of the tenant for Swift account - :param key: the 'password' or key to authenticate with - :param auth_url: the url for authentication - :param auth_version: the version of api to use for authentication - :param service_type: service type in the service catalog - :param endpoint_type: service endpoint type + Authentification is loaded from config file. """ - self.connection = swift_client.Connection( - retries=CONF.swift.max_retries, - user=user or CONF.swift.username, - tenant_name=tenant_name or CONF.swift.tenant_name, - key=key or CONF.swift.password, - authurl=auth_url or CONF.swift.os_auth_url, - auth_version=auth_version or CONF.swift.os_auth_version, - os_options={ - 'service_type': service_type or CONF.swift.os_service_type, - 'endpoint_type': endpoint_type or CONF.swift.os_endpoint_type - } + global SWIFT_SESSION + if not SWIFT_SESSION: + SWIFT_SESSION = keystone.get_session( + SWIFT_GROUP, legacy_mapping=LEGACY_MAP, + legacy_auth_opts=LEGACY_OPTS) + # TODO(pas-ha): swiftclient does not support keystone sessions ATM. + # Must be reworked when LP bug #1518938 is fixed. + swift_url = SWIFT_SESSION.get_endpoint( + service_type=CONF.swift.os_service_type, + endpoint_type=CONF.swift.os_endpoint_type, + region_name=CONF.swift.os_region ) + token = SWIFT_SESSION.get_token() + params = dict(retries=CONF.swift.max_retries, + preauthurl=swift_url, + preauthtoken=token) + # NOTE(pas-ha):session.verify is for HTTPS urls and can be + # - False (do not verify) + # - True (verify but try to locate system CA certificates) + # - Path (verify using specific CA certificate) + # This is normally handled inside the Session instance, + # but swiftclient still does not support sessions, + # so we need to reconstruct these options from Session here. + verify = SWIFT_SESSION.verify + params['insecure'] = not verify + if verify and isinstance(verify, six.string_types): + params['cacert'] = verify + + self.connection = swift_client.Connection(**params) def create_object(self, object, data, container=CONF.swift.container, headers=None): @@ -182,3 +218,7 @@ def get_introspection_data(uuid): swift_api = SwiftAPI() swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) return swift_api.get_object(swift_object_name) + + +def list_opts(): + return keystone.add_auth_options(SWIFT_OPTS, SWIFT_GROUP) diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py index 1ac67bb..0d8e1e4 100644 --- a/ironic_inspector/main.py +++ b/ironic_inspector/main.py @@ -351,7 +351,6 @@ class Service(object): log.set_defaults(default_log_levels=[ 'sqlalchemy=WARNING', - 'keystoneclient=INFO', 'iso8601=WARNING', 'requests=WARNING', 'urllib3.connectionpool=WARNING', diff --git a/ironic_inspector/test/test_common_ironic.py b/ironic_inspector/test/test_common_ironic.py index b45e31e..846c783 100644 --- a/ironic_inspector/test/test_common_ironic.py +++ b/ironic_inspector/test/test_common_ironic.py @@ -16,10 +16,10 @@ import socket import unittest from ironicclient import client -from keystoneclient import client as keystone_client from oslo_config import cfg from ironic_inspector.common import ironic as ir_utils +from ironic_inspector.common import keystone from ironic_inspector.test import base from ironic_inspector import utils @@ -27,37 +27,44 @@ from ironic_inspector import utils CONF = cfg.CONF +@mock.patch.object(keystone, 'register_auth_opts') +@mock.patch.object(keystone, 'get_session') +@mock.patch.object(client, 'Client') class TestGetClient(base.BaseTest): def setUp(self): super(TestGetClient, self).setUp() - CONF.set_override('auth_strategy', 'keystone') + ir_utils.reset_ironic_session() + self.cfg.config(auth_strategy='keystone') + self.cfg.config(os_region='somewhere', group='ironic') + self.addCleanup(ir_utils.reset_ironic_session) - @mock.patch.object(client, 'get_client') - @mock.patch.object(keystone_client, 'Client') - def test_get_client_with_auth_token(self, mock_keystone_client, - mock_client): + def test_get_client_with_auth_token(self, mock_client, mock_load, + mock_opts): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' - mock_keystone_client().service_catalog.url_for.return_value = ( - fake_ironic_url) + mock_sess = mock.Mock() + mock_sess.get_endpoint.return_value = fake_ironic_url + mock_load.return_value = mock_sess ir_utils.get_client(fake_token) - args = {'os_auth_token': fake_token, - 'ironic_url': fake_ironic_url, - 'os_ironic_api_version': '1.11', + mock_sess.get_endpoint.assert_called_once_with( + endpoint_type=CONF.ironic.os_endpoint_type, + service_type=CONF.ironic.os_service_type, + region_name=CONF.ironic.os_region) + args = {'token': fake_token, + 'endpoint': fake_ironic_url, + 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args) - @mock.patch.object(client, 'get_client') - def test_get_client_without_auth_token(self, mock_client): + def test_get_client_without_auth_token(self, mock_client, mock_load, + mock_opts): + mock_sess = mock.Mock() + mock_load.return_value = mock_sess ir_utils.get_client(None) - args = {'os_password': CONF.ironic.os_password, - 'os_username': CONF.ironic.os_username, - 'os_auth_url': CONF.ironic.os_auth_url, - 'os_tenant_name': CONF.ironic.os_tenant_name, - 'os_endpoint_type': CONF.ironic.os_endpoint_type, - 'os_service_type': CONF.ironic.os_service_type, - 'os_ironic_api_version': '1.11', + args = {'session': mock_sess, + 'region_name': 'somewhere', + 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args) @@ -92,7 +99,7 @@ class TestGetIpmiAddress(base.BaseTest): driver_info={'foo': '192.168.1.1'}) self.assertIsNone(ir_utils.get_ipmi_address(node)) - CONF.set_override('ipmi_address_fields', ['foo', 'bar', 'baz']) + self.cfg.config(ipmi_address_fields=['foo', 'bar', 'baz']) ip = ir_utils.get_ipmi_address(node) self.assertEqual(ip, '192.168.1.1') diff --git a/ironic_inspector/test/test_keystone.py b/ironic_inspector/test/test_keystone.py new file mode 100644 index 0000000..0145556 --- /dev/null +++ b/ironic_inspector/test/test_keystone.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from keystoneauth1 import exceptions as kaexc +from keystoneauth1 import loading as kaloading +from oslo_config import cfg + +from ironic_inspector.common import keystone +from ironic_inspector.test import base + + +CONF = cfg.CONF +TESTGROUP = 'keystone_test' + + +class KeystoneTest(base.BaseTest): + + def setUp(self): + super(KeystoneTest, self).setUp() + self.cfg.conf.register_group(cfg.OptGroup(TESTGROUP)) + + def test_register_auth_opts(self): + keystone.register_auth_opts(TESTGROUP) + auth_opts = ['auth_type', 'auth_section'] + sess_opts = ['certfile', 'keyfile', 'insecure', 'timeout', 'cafile'] + for o in auth_opts + sess_opts: + self.assertIn(o, self.cfg.conf[TESTGROUP]) + self.assertEqual('password', self.cfg.conf[TESTGROUP]['auth_type']) + + @mock.patch.object(keystone, '_get_auth') + def test_get_session(self, auth_mock): + keystone.register_auth_opts(TESTGROUP) + self.cfg.config(group=TESTGROUP, + cafile='/path/to/ca/file') + auth1 = mock.Mock() + auth_mock.return_value = auth1 + sess = keystone.get_session(TESTGROUP) + self.assertEqual('/path/to/ca/file', sess.verify) + self.assertEqual(auth1, sess.auth) + + @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') + @mock.patch.object(keystone, '_get_legacy_auth') + def test__get_auth(self, legacy_mock, load_mock): + auth1 = mock.Mock() + load_mock.side_effect = [ + auth1, + None, + kaexc.MissingRequiredOptions([kaloading.Opt('spam')])] + auth2 = mock.Mock() + legacy_mock.return_value = auth2 + self.assertEqual(auth1, keystone._get_auth(TESTGROUP)) + self.assertEqual(auth2, keystone._get_auth(TESTGROUP)) + self.assertEqual(auth2, keystone._get_auth(TESTGROUP)) + + @mock.patch('keystoneauth1.loading._plugins.identity.generic.Password.' + 'load_from_options') + def test__get_legacy_auth(self, load_mock): + self.cfg.register_opts( + [cfg.StrOpt('identity_url'), + cfg.StrOpt('old_user'), + cfg.StrOpt('old_password')], + group=TESTGROUP) + self.cfg.config(group=TESTGROUP, + identity_url='http://fake:5000/v3', + old_password='ham', + old_user='spam') + options = [cfg.StrOpt('old_tenant_name', default='fake'), + cfg.StrOpt('old_user')] + mapping = {'username': 'old_user', + 'password': 'old_password', + 'auth_url': 'identity_url', + 'tenant_name': 'old_tenant_name'} + + keystone._get_legacy_auth(TESTGROUP, mapping, options) + load_mock.assert_called_once_with(username='spam', + password='ham', + tenant_name='fake', + user_domain_id='default', + project_domain_id='default', + auth_url='http://fake:5000/v3') + + def test__is_api_v3(self): + cases = ((False, 'http://fake:5000', None), + (False, 'http://fake:5000/v2.0', None), + (True, 'http://fake:5000/v3', None), + (True, 'http://fake:5000', '3'), + (True, 'http://fake:5000', 'v3.0')) + for case in cases: + result, url, version = case + self.assertEqual(result, keystone._is_apiv3(url, version)) + + def test_add_auth_options(self): + group, opts = keystone.add_auth_options([], TESTGROUP)[0] + self.assertEqual(TESTGROUP, group) + # check that there is no duplicates + names = {o.dest for o in opts} + self.assertEqual(len(names), len(opts)) + # NOTE(pas-ha) checking for most standard auth and session ones only + expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile', + 'auth_type', 'auth_url', 'username', 'password', + 'tenant_name', 'project_name', 'trust_id', + 'domain_id', 'user_domain_id', 'project_domain_id'} + self.assertTrue(expected.issubset(names)) diff --git a/ironic_inspector/test/test_swift.py b/ironic_inspector/test/test_swift.py index 567a3b9..c8c0668 100644 --- a/ironic_inspector/test/test_swift.py +++ b/ironic_inspector/test/test_swift.py @@ -14,23 +14,18 @@ # Mostly copied from ironic/tests/test_swift.py -import sys - try: from unittest import mock except ImportError: import mock -from oslo_config import cfg -from six.moves import reload_module from swiftclient import client as swift_client from swiftclient import exceptions as swift_exception +from ironic_inspector.common import keystone from ironic_inspector.common import swift from ironic_inspector.test import base as test_base from ironic_inspector import utils -CONF = cfg.CONF - class BaseTest(test_base.NodeTest): def setUp(self): @@ -52,61 +47,43 @@ class BaseTest(test_base.NodeTest): } +@mock.patch.object(keystone, 'register_auth_opts') +@mock.patch.object(keystone, 'get_session') @mock.patch.object(swift_client, 'Connection', autospec=True) class SwiftTestCase(BaseTest): def setUp(self): super(SwiftTestCase, self).setUp() + swift.reset_swift_session() self.swift_exception = swift_exception.ClientException('', '') + self.cfg.config(group='swift', + os_service_type='object-store', + os_endpoint_type='internalURL', + os_region='somewhere', + max_retries=2) + self.addCleanup(swift.reset_swift_session) - CONF.set_override('username', 'swift', 'swift') - CONF.set_override('tenant_name', 'tenant', 'swift') - CONF.set_override('password', 'password', 'swift') - CONF.set_override('os_auth_url', 'http://authurl/v2.0', 'swift') - CONF.set_override('os_auth_version', '2', 'swift') - CONF.set_override('max_retries', 2, 'swift') - CONF.set_override('os_service_type', 'object-store', 'swift') - CONF.set_override('os_endpoint_type', 'internalURL', 'swift') - - # The constructor of SwiftAPI accepts arguments whose - # default values are values of some config options above. So reload - # the module to make sure the required values are set. - reload_module(sys.modules['ironic_inspector.common.swift']) - - def test___init__(self, connection_mock): - swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) - params = {'retries': 2, - 'user': 'swift', - 'tenant_name': 'tenant', - 'key': 'password', - 'authurl': 'http://authurl/v2.0', - 'auth_version': '2', - 'os_options': {'service_type': 'object-store', - 'endpoint_type': 'internalURL'}} - connection_mock.assert_called_once_with(**params) - - def test___init__defaults(self, connection_mock): + def test___init__(self, connection_mock, load_mock, opts_mock): + swift_url = 'http://swiftapi' + token = 'secret_token' + mock_sess = mock.Mock() + mock_sess.get_token.return_value = token + mock_sess.get_endpoint.return_value = swift_url + mock_sess.verify = False + load_mock.return_value = mock_sess swift.SwiftAPI() params = {'retries': 2, - 'user': 'swift', - 'tenant_name': 'tenant', - 'key': 'password', - 'authurl': 'http://authurl/v2.0', - 'auth_version': '2', - 'os_options': {'service_type': 'object-store', - 'endpoint_type': 'internalURL'}} + 'preauthurl': swift_url, + 'preauthtoken': token, + 'insecure': True} connection_mock.assert_called_once_with(**params) + mock_sess.get_endpoint.assert_called_once_with( + service_type='object-store', + endpoint_type='internalURL', + region_name='somewhere') - def test_create_object(self, connection_mock): - swiftapi = swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) + def test_create_object(self, connection_mock, load_mock, opts_mock): + swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_object.return_value = 'object-uuid' @@ -119,12 +96,9 @@ class SwiftTestCase(BaseTest): 'ironic-inspector', 'object', 'some-string-data', headers=None) self.assertEqual('object-uuid', object_uuid) - def test_create_object_create_container_fails(self, connection_mock): - swiftapi = swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) + def test_create_object_create_container_fails(self, connection_mock, + load_mock, opts_mock): + swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_container.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.create_object, 'object', @@ -133,12 +107,9 @@ class SwiftTestCase(BaseTest): 'inspector') self.assertFalse(connection_obj_mock.put_object.called) - def test_create_object_put_object_fails(self, connection_mock): - swiftapi = swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) + def test_create_object_put_object_fails(self, connection_mock, load_mock, + opts_mock): + swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_object.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.create_object, 'object', @@ -148,12 +119,8 @@ class SwiftTestCase(BaseTest): connection_obj_mock.put_object.assert_called_once_with( 'ironic-inspector', 'object', 'some-string-data', headers=None) - def test_get_object(self, connection_mock): - swiftapi = swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) + def test_get_object(self, connection_mock, load_mock, opts_mock): + swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value expected_obj = self.data @@ -165,12 +132,8 @@ class SwiftTestCase(BaseTest): 'ironic-inspector', 'object') self.assertEqual(expected_obj, swift_obj) - def test_get_object_fails(self, connection_mock): - swiftapi = swift.SwiftAPI(user=CONF.swift.username, - tenant_name=CONF.swift.tenant_name, - key=CONF.swift.password, - auth_url=CONF.swift.os_auth_url, - auth_version=CONF.swift.os_auth_version) + def test_get_object_fails(self, connection_mock, load_mock, opts_mock): + swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.get_object.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.get_object, diff --git a/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml b/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml new file mode 100644 index 0000000..f0d0db5 --- /dev/null +++ b/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml @@ -0,0 +1,17 @@ +--- +features: + - Ironic-Inspector is now using keystoneauth and proper auth_plugins + instead of keystoneclient for communicating with Ironic and Swift. + It allows to finely tune authentification for each service independently. + For each service, the keystone session is created and reused, minimizing + the number of authentification requests to Keystone. +upgrade: + - Operators are advised to specify a proper keystoneauth plugin + and its appropriate settings in [ironic] and [swift] config sections. + Backward compatibility with previous authentification options is included. + Using authentification informaiton for Ironic and Swift from + [keystone_authtoken] config section is no longer supported. +deprecations: + - Most of current authentification options for either Ironic or Swift are + deprecated and will be removed in a future release. Please configure + the keystoneauth auth plugin authentification instead. diff --git a/requirements.txt b/requirements.txt index ae36002..0830b8f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,11 +8,11 @@ Flask<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT +keystoneauth1>=2.1.0 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 -python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 From 3aebf1f3b0f8d13f347be251eaeb4487b03e2ce8 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Fri, 18 Mar 2016 13:34:06 +0200 Subject: [PATCH 05/83] Set config options for keystoneauth. Change-Id: I671fbc666dcfc836b162addb9a77174ba279ba48 --- devstack/plugin.sh | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index a450260..39d595e 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -166,6 +166,18 @@ EOF fi } +function inspector_configure_auth_for { + inspector_iniset $1 auth_type password + inspector_iniset $1 auth_url "$KEYSTONE_SERVICE_URI" + inspector_iniset $1 username $IRONIC_INSPECTOR_ADMIN_USER + inspector_iniset $1 password $SERVICE_PASSWORD + inspector_iniset $1 project_name $SERVICE_PROJECT_NAME + inspector_iniset $1 user_domain_id default + inspector_iniset $1 project_domain_id default + inspector_iniset $1 cafile $SSL_BUNDLE_FILE + inspector_iniset $1 os_region $REGION_NAME +} + function configure_inspector { mkdir_chown_stack "$IRONIC_INSPECTOR_CONF_DIR" mkdir_chown_stack "$IRONIC_INSPECTOR_DATA_DIR" @@ -174,11 +186,7 @@ function configure_inspector { cp "$IRONIC_INSPECTOR_DIR/example.conf" "$IRONIC_INSPECTOR_CONF_FILE" inspector_iniset DEFAULT debug $IRONIC_INSPECTOR_DEBUG - inspector_iniset ironic os_auth_url "$KEYSTONE_SERVICE_URI" - inspector_iniset ironic os_username $IRONIC_INSPECTOR_ADMIN_USER - inspector_iniset ironic os_password $SERVICE_PASSWORD - inspector_iniset ironic os_tenant_name $SERVICE_PROJECT_NAME - + inspector_configure_auth_for ironic configure_auth_token_middleware $IRONIC_INSPECTOR_CONF_FILE $IRONIC_INSPECTOR_ADMIN_USER $IRONIC_INSPECTOR_AUTH_CACHE_DIR/api inspector_iniset DEFAULT listen_port $IRONIC_INSPECTOR_PORT @@ -227,11 +235,7 @@ function configure_inspector { } function configure_inspector_swift { - inspector_iniset swift os_auth_url "$KEYSTONE_SERVICE_URI/v2.0" - inspector_iniset swift username $IRONIC_INSPECTOR_ADMIN_USER - inspector_iniset swift password $SERVICE_PASSWORD - inspector_iniset swift tenant_name $SERVICE_PROJECT_NAME - + inspector_configure_auth_for swift inspector_iniset processing store_data swift } From a94486d80915ea3bede8ddecc51b9f99aa2faad7 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Tue, 23 Feb 2016 12:10:46 +0200 Subject: [PATCH 06/83] Remove downgrades from migrations According to cross project spec I622f89fe63327d44f9b229d3bd9e76e15acbaa7a downgrade migrations should be removed. Change-Id: Iee38d8ba5e414e67033db35db52724d3f9f84fd7 --- ironic_inspector/dbsync.py | 2 +- ironic_inspector/migrations/script.py.mako | 4 ---- .../migrations/versions/578f84f38d_inital_db_schema.py | 6 ------ .../migrations/versions/d588418040d_add_rules.py | 6 ------ .../e169a4a81d88_add_invert_field_to_rule_condition.py | 4 ---- .../notes/no-downgrade-migrations-514bf872d9f944ed.yaml | 5 +++++ 6 files changed, 6 insertions(+), 21 deletions(-) create mode 100644 releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml diff --git a/ironic_inspector/dbsync.py b/ironic_inspector/dbsync.py index c075fde..3a021ff 100644 --- a/ironic_inspector/dbsync.py +++ b/ironic_inspector/dbsync.py @@ -39,7 +39,7 @@ def add_command_parsers(subparsers): parser = add_alembic_command(subparsers, name) parser.set_defaults(func=do_alembic_command) - for name in ['downgrade', 'stamp', 'show', 'edit']: + for name in ['stamp', 'show', 'edit']: parser = add_alembic_command(subparsers, name) parser.set_defaults(func=with_revision) parser.add_argument('--revision', nargs='?', required=True) diff --git a/ironic_inspector/migrations/script.py.mako b/ironic_inspector/migrations/script.py.mako index d45a32a..1c3fcb4 100644 --- a/ironic_inspector/migrations/script.py.mako +++ b/ironic_inspector/migrations/script.py.mako @@ -30,7 +30,3 @@ ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py b/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py index b0fa92e..ee052f7 100644 --- a/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py +++ b/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py @@ -61,9 +61,3 @@ def upgrade(): mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) - - -def downgrade(): - op.drop_table('nodes') - op.drop_table('attributes') - op.drop_table('options') diff --git a/ironic_inspector/migrations/versions/d588418040d_add_rules.py b/ironic_inspector/migrations/versions/d588418040d_add_rules.py index 00ede8a..7b79704 100644 --- a/ironic_inspector/migrations/versions/d588418040d_add_rules.py +++ b/ironic_inspector/migrations/versions/d588418040d_add_rules.py @@ -62,9 +62,3 @@ def upgrade(): mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) - - -def downgrade(): - op.drop_table('rules') - op.drop_table('rule_conditions') - op.drop_table('rule_actions') diff --git a/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py b/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py index dbe83ad..001de0a 100644 --- a/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py +++ b/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py @@ -31,7 +31,3 @@ import sqlalchemy as sa def upgrade(): op.add_column('rule_conditions', sa.Column('invert', sa.Boolean(), nullable=True, default=False)) - - -def downgrade(): - op.drop_column('rule_conditions', 'invert') diff --git a/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml b/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml new file mode 100644 index 0000000..0a0db12 --- /dev/null +++ b/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml @@ -0,0 +1,5 @@ +--- +features: + - Database migrations downgrade was removed. More info about + database migration/rollback could be found here + http://docs.openstack.org/openstack-ops/content/ops_upgrades-roll-back.html From f974aa063fa27092b7d8a55fbf85e7360699f008 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Tue, 29 Mar 2016 16:03:26 +0200 Subject: [PATCH 07/83] Update versions on the release notes page Change-Id: Ib60128dc5c243a76c7d31b6b6b67bf2b3f36a4b1 --- releasenotes/source/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 94b2082..3d3e78c 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -5,8 +5,8 @@ .. toctree:: :maxdepth: 1 - Current (2.3.0 - unreleased) - Mitaka (2.3.0 - unreleased) + Current (3.3.0 - unreleased) + Mitaka (2.3.0 - 3.2.x) Liberty (2.0.0 - 2.2.x) From abe53410657fb8886232031cefa1f77e7a205e5e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 1 Apr 2016 19:29:32 +0000 Subject: [PATCH 08/83] Updated from global requirements Change-Id: Idfca358c36277f1deabf21cae5843318f42a23d3 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0830b8f..0c08469 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ Flask<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystoneauth1>=2.1.0 # Apache-2.0 +keystoneauth1>=2.1.0 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 From 0ec02d3695841f1557d2ba74c113499e2aa3b3bd Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 4 Apr 2016 12:02:21 +0200 Subject: [PATCH 09/83] Drop the TestInit node_cache unit test This test essentially tests get_session(), which is covered by anything else touching the database code. More importantly, as it does not use any mocks, it fails on stable/liberty right now. Change-Id: I175a38bd3675aabe1c518a0b7d4c2bd56d93cc1a --- ironic_inspector/test/test_node_cache.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/ironic_inspector/test/test_node_cache.py b/ironic_inspector/test/test_node_cache.py index 2591c46..0c10134 100644 --- a/ironic_inspector/test/test_node_cache.py +++ b/ironic_inspector/test/test_node_cache.py @@ -381,16 +381,6 @@ class TestNodeInfoFinished(test_base.NodeTest): self.assertFalse(self.node_info._locked) -class TestInit(unittest.TestCase): - def setUp(self): - super(TestInit, self).setUp() - - def test_ok(self): - db.init() - session = db.get_session() - db.model_query(db.Node, session=session) - - class TestNodeInfoOptions(test_base.NodeTest): def setUp(self): super(TestNodeInfoOptions, self).setUp() From 30ae1e72f1bb21e2c0eb8cc11bec43b4872b55bd Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Tue, 5 Apr 2016 11:04:59 +0300 Subject: [PATCH 10/83] Move unit tests to "unit" directory This refoctor is needed for tempest test work as tempest tests will placed in test dir. So move unit tests to separate directory "unit" under test. Change-Id: Ic99df6111ef30947148a9e38b9435a54f3d37064 --- ironic_inspector/test/unit/__init__.py | 0 ironic_inspector/test/{ => unit}/test_common_ironic.py | 0 ironic_inspector/test/{ => unit}/test_firewall.py | 0 ironic_inspector/test/{ => unit}/test_introspect.py | 0 ironic_inspector/test/{ => unit}/test_keystone.py | 0 ironic_inspector/test/{ => unit}/test_main.py | 0 ironic_inspector/test/{ => unit}/test_migrations.py | 0 ironic_inspector/test/{ => unit}/test_node_cache.py | 0 ironic_inspector/test/{ => unit}/test_plugins_base.py | 0 ironic_inspector/test/{ => unit}/test_plugins_discovery.py | 0 .../test/{ => unit}/test_plugins_extra_hardware.py | 0 ironic_inspector/test/{ => unit}/test_plugins_raid_device.py | 0 ironic_inspector/test/{ => unit}/test_plugins_rules.py | 0 ironic_inspector/test/{ => unit}/test_plugins_standard.py | 0 ironic_inspector/test/{ => unit}/test_process.py | 0 ironic_inspector/test/{ => unit}/test_rules.py | 0 ironic_inspector/test/{ => unit}/test_swift.py | 0 ironic_inspector/test/{ => unit}/test_utils.py | 0 tox.ini | 4 ++-- 19 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 ironic_inspector/test/unit/__init__.py rename ironic_inspector/test/{ => unit}/test_common_ironic.py (100%) rename ironic_inspector/test/{ => unit}/test_firewall.py (100%) rename ironic_inspector/test/{ => unit}/test_introspect.py (100%) rename ironic_inspector/test/{ => unit}/test_keystone.py (100%) rename ironic_inspector/test/{ => unit}/test_main.py (100%) rename ironic_inspector/test/{ => unit}/test_migrations.py (100%) rename ironic_inspector/test/{ => unit}/test_node_cache.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_base.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_discovery.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_extra_hardware.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_raid_device.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_rules.py (100%) rename ironic_inspector/test/{ => unit}/test_plugins_standard.py (100%) rename ironic_inspector/test/{ => unit}/test_process.py (100%) rename ironic_inspector/test/{ => unit}/test_rules.py (100%) rename ironic_inspector/test/{ => unit}/test_swift.py (100%) rename ironic_inspector/test/{ => unit}/test_utils.py (100%) diff --git a/ironic_inspector/test/unit/__init__.py b/ironic_inspector/test/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/test_common_ironic.py b/ironic_inspector/test/unit/test_common_ironic.py similarity index 100% rename from ironic_inspector/test/test_common_ironic.py rename to ironic_inspector/test/unit/test_common_ironic.py diff --git a/ironic_inspector/test/test_firewall.py b/ironic_inspector/test/unit/test_firewall.py similarity index 100% rename from ironic_inspector/test/test_firewall.py rename to ironic_inspector/test/unit/test_firewall.py diff --git a/ironic_inspector/test/test_introspect.py b/ironic_inspector/test/unit/test_introspect.py similarity index 100% rename from ironic_inspector/test/test_introspect.py rename to ironic_inspector/test/unit/test_introspect.py diff --git a/ironic_inspector/test/test_keystone.py b/ironic_inspector/test/unit/test_keystone.py similarity index 100% rename from ironic_inspector/test/test_keystone.py rename to ironic_inspector/test/unit/test_keystone.py diff --git a/ironic_inspector/test/test_main.py b/ironic_inspector/test/unit/test_main.py similarity index 100% rename from ironic_inspector/test/test_main.py rename to ironic_inspector/test/unit/test_main.py diff --git a/ironic_inspector/test/test_migrations.py b/ironic_inspector/test/unit/test_migrations.py similarity index 100% rename from ironic_inspector/test/test_migrations.py rename to ironic_inspector/test/unit/test_migrations.py diff --git a/ironic_inspector/test/test_node_cache.py b/ironic_inspector/test/unit/test_node_cache.py similarity index 100% rename from ironic_inspector/test/test_node_cache.py rename to ironic_inspector/test/unit/test_node_cache.py diff --git a/ironic_inspector/test/test_plugins_base.py b/ironic_inspector/test/unit/test_plugins_base.py similarity index 100% rename from ironic_inspector/test/test_plugins_base.py rename to ironic_inspector/test/unit/test_plugins_base.py diff --git a/ironic_inspector/test/test_plugins_discovery.py b/ironic_inspector/test/unit/test_plugins_discovery.py similarity index 100% rename from ironic_inspector/test/test_plugins_discovery.py rename to ironic_inspector/test/unit/test_plugins_discovery.py diff --git a/ironic_inspector/test/test_plugins_extra_hardware.py b/ironic_inspector/test/unit/test_plugins_extra_hardware.py similarity index 100% rename from ironic_inspector/test/test_plugins_extra_hardware.py rename to ironic_inspector/test/unit/test_plugins_extra_hardware.py diff --git a/ironic_inspector/test/test_plugins_raid_device.py b/ironic_inspector/test/unit/test_plugins_raid_device.py similarity index 100% rename from ironic_inspector/test/test_plugins_raid_device.py rename to ironic_inspector/test/unit/test_plugins_raid_device.py diff --git a/ironic_inspector/test/test_plugins_rules.py b/ironic_inspector/test/unit/test_plugins_rules.py similarity index 100% rename from ironic_inspector/test/test_plugins_rules.py rename to ironic_inspector/test/unit/test_plugins_rules.py diff --git a/ironic_inspector/test/test_plugins_standard.py b/ironic_inspector/test/unit/test_plugins_standard.py similarity index 100% rename from ironic_inspector/test/test_plugins_standard.py rename to ironic_inspector/test/unit/test_plugins_standard.py diff --git a/ironic_inspector/test/test_process.py b/ironic_inspector/test/unit/test_process.py similarity index 100% rename from ironic_inspector/test/test_process.py rename to ironic_inspector/test/unit/test_process.py diff --git a/ironic_inspector/test/test_rules.py b/ironic_inspector/test/unit/test_rules.py similarity index 100% rename from ironic_inspector/test/test_rules.py rename to ironic_inspector/test/unit/test_rules.py diff --git a/ironic_inspector/test/test_swift.py b/ironic_inspector/test/unit/test_swift.py similarity index 100% rename from ironic_inspector/test/test_swift.py rename to ironic_inspector/test/unit/test_swift.py diff --git a/ironic_inspector/test/test_utils.py b/ironic_inspector/test/unit/test_utils.py similarity index 100% rename from ironic_inspector/test/test_utils.py rename to ironic_inspector/test/unit/test_utils.py diff --git a/tox.ini b/tox.ini index 891c234..6ca9f26 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,7 @@ deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/plugin-requirements.txt commands = - coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test + coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test.unit coverage report -m --fail-under 90 setenv = PYTHONDONTWRITEBYTECODE=1 passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY @@ -21,7 +21,7 @@ commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasen [testenv:cover] commands = - coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test + coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test.unit coverage report -m [testenv:pep8] From cba6394fc957bc931065c5b54201ba1484849cf5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 6 Apr 2016 04:25:26 +0000 Subject: [PATCH 11/83] Updated from global requirements Change-Id: I5139466b5937a017536ee43aca0ef9d702755ac8 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0c08469..1e1385e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -alembic>=0.8.0 # MIT +alembic>=0.8.4 # MIT Babel>=1.3 # BSD eventlet!=0.18.3,>=0.18.2 # MIT Flask<1.0,>=0.10 # BSD From f9e9a88d89ff495333cdccd4ca53f43cf9c0bfd0 Mon Sep 17 00:00:00 2001 From: dparalen Date: Wed, 13 Jan 2016 11:12:28 +0100 Subject: [PATCH 12/83] Allow rerunning introspection on stored data As requested in the related bug, this pull request allows to run the introspection again on previously stored data. This should make it simple to correct mistakes in the introspection rules. For this purpose, new API entry point was introduced: /v1/introspection//data/unprocessed that supports an empty POST method to trigger the introspection over stored data. New function `reapply` was introduced that takes care about the entry point and carries out the introspection. The `process` function was modified to allow reusing common parts in the new reapply function. The storage access methods were updated to allow saving the "raw" memdisk data besides the processed introspection data. Following preconditions are checked the reapply function having been triggered: * no data is being sent along with the request * Swift store is configured and enabled and the stored data is present for the node UUID * node_info object is cached for the UUID and it is possible to lock the object Should the preconditions fail, an immediate response is given to the user: * 400 if the request contained data or in case Swift store is not enabled in configuration * 409 if it wasn't possible to acquire lock for the node_info object * 404 in case Ironic didn't keep track of related BM node If the preconditions are met, a background task is executed to carry out the processing and a 202 Accepted response is returned to the endpoint user. As requested, these steps are performed in the background task: * preprocessing hooks * post processing hooks, storing result in Swift * introspection rules These steps are avoided, based on the RFE: * not_found_hook is skipped * power operations Limitations: * IMPI credentials are not updated --- ramdisk not running * there's no way to update the raw data atm. * the raw data is never cleaned from the store * check for stored data presence is performed in background; missing data situation still results in a 202 response Change-Id: Ic027c9d15f7f5475fcc3f599d081d1e8d5e244d4 Closes-Bug: #1525237 --- doc/source/http-api.rst | 20 ++ doc/source/usage.rst | 48 ++++ ironic_inspector/common/swift.py | 12 +- ironic_inspector/main.py | 21 +- ironic_inspector/process.py | 195 ++++++++++++--- ironic_inspector/test/functional.py | 66 +++++ ironic_inspector/test/unit/test_main.py | 72 ++++++ ironic_inspector/test/unit/test_process.py | 235 +++++++++++++++++- ...eapply-introspection-5edbbfaf498dbd12.yaml | 4 + 9 files changed, 629 insertions(+), 44 deletions(-) create mode 100644 releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml diff --git a/doc/source/http-api.rst b/doc/source/http-api.rst index 197dedc..88d472b 100644 --- a/doc/source/http-api.rst +++ b/doc/source/http-api.rst @@ -93,6 +93,25 @@ Response body: JSON dictionary with introspection data format and contents of the stored data. Notably, it depends on the ramdisk used and plugins enabled both in the ramdisk and in inspector itself. +Reapply introspection on stored data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``POST /v1/introspection//data/unprocessed`` to trigger +introspection on stored unprocessed data. No data is allowed to be +sent along with the request. + +Requires X-Auth-Token header with Keystone token for authentication. +Requires enabling Swift store in processing section of the +configuration file. + +Response: + +* 202 - accepted +* 400 - bad request or store not configured +* 401, 403 - missing or invalid authentication +* 404 - node not found for UUID +* 409 - inspector locked node for processing + Introspection Rules ~~~~~~~~~~~~~~~~~~~ @@ -323,3 +342,4 @@ Version History * **1.1** adds endpoint to retrieve stored introspection data. * **1.2** endpoints for manipulating introspection rules. * **1.3** endpoint for canceling running introspection +* **1.4** endpoint for reapplying the introspection over stored data. diff --git a/doc/source/usage.rst b/doc/source/usage.rst index bb9a904..b0c946e 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -278,3 +278,51 @@ nodes in the introspection rules using the rule condition ``eq``:: "conditions": [ {'op': 'eq', 'field': 'data://auto_discovered', 'value': True} ] + +Reapplying introspection on stored data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To allow correcting mistakes in introspection rules the API provides +an entry point that triggers the introspection over stored data. The +data to use for processing is kept in Swift separately from the data +already processed. Reapplying introspection overwrites processed data +in the store. Updating the introspection data through the endpoint +isn't supported yet. Following preconditions are checked before +reapplying introspection: + +* no data is being sent along with the request +* Swift store is configured and enabled +* introspection data is stored in Swift for the node UUID +* node record is kept in database for the UUID +* introspection is not ongoing for the node UUID + +Should the preconditions fail an immediate response is given to the +user: + +* ``400`` if the request contained data or in case Swift store is not + enabled in configuration +* ``404`` in case Ironic doesn't keep track of the node UUID +* ``409`` if an introspection is already ongoing for the node + +If the preconditions are met a background task is executed to carry +out the processing and a ``202 Accepted`` response is returned to the +endpoint user. As requested, these steps are performed in the +background task: + +* preprocessing hooks +* post processing hooks, storing result in Swift +* introspection rules + +These steps are avoided, based on the feature requirements: + +* ``node_not_found_hook`` is skipped +* power operations +* roll-back actions done by hooks + +Limitations: + +* IPMI credentials are not updated --- ramdisk not running +* there's no way to update the unprocessed data atm. +* the unprocessed data is never cleaned from the store +* check for stored data presence is performed in background; + missing data situation still results in a ``202`` response diff --git a/ironic_inspector/common/swift.py b/ironic_inspector/common/swift.py index 152a782..12ba20a 100644 --- a/ironic_inspector/common/swift.py +++ b/ironic_inspector/common/swift.py @@ -196,27 +196,35 @@ class SwiftAPI(object): return obj -def store_introspection_data(data, uuid): +def store_introspection_data(data, uuid, suffix=None): """Uploads introspection data to Swift. :param data: data to store in Swift :param uuid: UUID of the Ironic node that the data came from + :param suffix: optional suffix to add to the underlying swift + object name :returns: name of the Swift object that the data is stored in """ swift_api = SwiftAPI() swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) + if suffix is not None: + swift_object_name = '%s-%s' % (swift_object_name, suffix) swift_api.create_object(swift_object_name, json.dumps(data)) return swift_object_name -def get_introspection_data(uuid): +def get_introspection_data(uuid, suffix=None): """Downloads introspection data from Swift. :param uuid: UUID of the Ironic node that the data came from + :param suffix: optional suffix to add to the underlying swift + object name :returns: Swift object with the introspection data """ swift_api = SwiftAPI() swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) + if suffix is not None: + swift_object_name = '%s-%s' % (swift_object_name, suffix) return swift_api.get_object(swift_object_name) diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py index 0d8e1e4..fb64f6b 100644 --- a/ironic_inspector/main.py +++ b/ironic_inspector/main.py @@ -47,7 +47,7 @@ app = flask.Flask(__name__) LOG = utils.getProcessingLogger(__name__) MINIMUM_API_VERSION = (1, 0) -CURRENT_API_VERSION = (1, 3) +CURRENT_API_VERSION = (1, 4) _LOGGING_EXCLUDED_KEYS = ('logs',) @@ -234,6 +234,25 @@ def api_introspection_data(uuid): code=404) +@app.route('/v1/introspection//data/unprocessed', methods=['POST']) +@convert_exceptions +def api_introspection_reapply(uuid): + utils.check_auth(flask.request) + + if flask.request.content_length: + return error_response(_('User data processing is not ' + 'supported yet'), code=400) + + if CONF.processing.store_data == 'swift': + process.reapply(uuid) + return '', 202 + else: + return error_response(_('Inspector is not configured to store' + ' data. Set the [processing] ' + 'store_data configuration option to ' + 'change this.'), code=400) + + def rule_repr(rule, short): result = rule.as_dict(short=short) result['links'] = [{ diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index b3a7199..42b9e73 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -13,7 +13,10 @@ """Handling introspection data from the ramdisk.""" +import copy import eventlet +import json + from ironicclient import exceptions from oslo_config import cfg @@ -33,6 +36,7 @@ LOG = utils.getProcessingLogger(__name__) _CREDENTIALS_WAIT_RETRIES = 10 _CREDENTIALS_WAIT_PERIOD = 3 _STORAGE_EXCLUDED_KEYS = {'logs'} +_UNPROCESSED_DATA_STORE_SUFFIX = 'UNPROCESSED' def _find_node_info(introspection_data, failures): @@ -60,13 +64,8 @@ def _find_node_info(introspection_data, failures): failures.append(_('Look up error: %s') % exc) -def process(introspection_data): - """Process data from the ramdisk. - - This function heavily relies on the hooks to do the actual data processing. - """ +def _run_pre_hooks(introspection_data, failures): hooks = plugins_base.processing_hooks_manager() - failures = [] for hook_ext in hooks: # NOTE(dtantsur): catch exceptions, so that we have changes to update # node introspection status after look up @@ -90,6 +89,64 @@ def process(introspection_data): 'exc_class': exc.__class__.__name__, 'error': exc}) + +def _filter_data_excluded_keys(data): + return {k: v for k, v in data.items() + if k not in _STORAGE_EXCLUDED_KEYS} + + +def _store_data(node_info, data, suffix=None): + if CONF.processing.store_data != 'swift': + LOG.debug("Swift support is disabled, introspection data " + "won't be stored", node_info=node_info) + return + + swift_object_name = swift.store_introspection_data( + _filter_data_excluded_keys(data), + node_info.uuid, + suffix=suffix + ) + LOG.info(_LI('Introspection data was stored in Swift in object ' + '%s'), swift_object_name, node_info=node_info) + if CONF.processing.store_data_location: + node_info.patch([{'op': 'add', 'path': '/extra/%s' % + CONF.processing.store_data_location, + 'value': swift_object_name}]) + + +def _store_unprocessed_data(node_info, data): + # runs in background + try: + _store_data(node_info, data, + suffix=_UNPROCESSED_DATA_STORE_SUFFIX) + except Exception: + LOG.exception(_LE('Encountered exception saving unprocessed ' + 'introspection data'), node_info=node_info, + data=data) + + +def _get_unprocessed_data(uuid): + if CONF.processing.store_data == 'swift': + LOG.debug('Fetching unprocessed introspection data from ' + 'Swift for %s', uuid) + return json.loads( + swift.get_introspection_data( + uuid, + suffix=_UNPROCESSED_DATA_STORE_SUFFIX + ) + ) + else: + raise utils.Error(_('Swift support is disabled'), code=400) + + +def process(introspection_data): + """Process data from the ramdisk. + + This function heavily relies on the hooks to do the actual data processing. + """ + unprocessed_data = copy.deepcopy(introspection_data) + failures = [] + _run_pre_hooks(introspection_data, failures) node_info = _find_node_info(introspection_data, failures) if node_info: # Locking is already done in find_node() but may be not done in a @@ -112,6 +169,12 @@ def process(introspection_data): 'error: %s') % node_info.error, node_info=node_info, code=400) + # Note(mkovacik): store data now when we're sure that a background + # thread won't race with other process() or introspect.abort() + # call + utils.executor().submit(_store_unprocessed_data, node_info, + unprocessed_data) + try: node = node_info.node() except exceptions.NotFound: @@ -148,23 +211,7 @@ def _process_node(node, introspection_data, node_info): node_info.create_ports(introspection_data.get('macs') or ()) _run_post_hooks(node_info, introspection_data) - - if CONF.processing.store_data == 'swift': - stored_data = {k: v for k, v in introspection_data.items() - if k not in _STORAGE_EXCLUDED_KEYS} - swift_object_name = swift.store_introspection_data(stored_data, - node_info.uuid) - LOG.info(_LI('Introspection data was stored in Swift in object %s'), - swift_object_name, - node_info=node_info, data=introspection_data) - if CONF.processing.store_data_location: - node_info.patch([{'op': 'add', 'path': '/extra/%s' % - CONF.processing.store_data_location, - 'value': swift_object_name}]) - else: - LOG.debug('Swift support is disabled, introspection data ' - 'won\'t be stored', - node_info=node_info, data=introspection_data) + _store_data(node_info, introspection_data) ironic = ir_utils.get_client() firewall.update_filters(ironic) @@ -222,23 +269,93 @@ def _finish_set_ipmi_credentials(ironic, node, node_info, introspection_data, raise utils.Error(msg, node_info=node_info, data=introspection_data) -def _finish(ironic, node_info, introspection_data): - LOG.debug('Forcing power off of node %s', node_info.uuid) - try: - ironic.node.set_power_state(node_info.uuid, 'off') - except Exception as exc: - if node_info.node().provision_state == 'enroll': - LOG.info(_LI("Failed to power off the node in 'enroll' state, " - "ignoring; error was %s") % exc, - node_info=node_info, data=introspection_data) - else: - msg = (_('Failed to power off node %(node)s, check it\'s ' - 'power management configuration: %(exc)s') % - {'node': node_info.uuid, 'exc': exc}) - node_info.finished(error=msg) - raise utils.Error(msg, node_info=node_info, - data=introspection_data) +def _finish(ironic, node_info, introspection_data, power_off=True): + if power_off: + LOG.debug('Forcing power off of node %s', node_info.uuid) + try: + ironic.node.set_power_state(node_info.uuid, 'off') + except Exception as exc: + if node_info.node().provision_state == 'enroll': + LOG.info(_LI("Failed to power off the node in" + "'enroll' state, ignoring; error was " + "%s") % exc, node_info=node_info, + data=introspection_data) + else: + msg = (_('Failed to power off node %(node)s, check ' + 'its power management configuration: ' + '%(exc)s') % {'node': node_info.uuid, 'exc': + exc}) + node_info.finished(error=msg) + raise utils.Error(msg, node_info=node_info, + data=introspection_data) + LOG.info(_LI('Node powered-off'), node_info=node_info, + data=introspection_data) node_info.finished() LOG.info(_LI('Introspection finished successfully'), node_info=node_info, data=introspection_data) + + +def reapply(uuid): + """Re-apply introspection steps. + + Re-apply preprocessing, postprocessing and introspection rules on + stored data. + + :param uuid: node uuid to use + :raises: utils.Error + + """ + + LOG.debug('Processing re-apply introspection request for node ' + 'UUID: %s', uuid) + node_info = node_cache.get_node(uuid, locked=False) + if not node_info.acquire_lock(blocking=False): + # Note (mkovacik): it should be sufficient to check data + # presence & locking. If either introspection didn't start + # yet, was in waiting state or didn't finish yet, either data + # won't be available or locking would fail + raise utils.Error(_('Node locked, please, try again later'), + node_info=node_info, code=409) + + utils.executor().submit(_reapply, node_info) + + +def _reapply(node_info): + # runs in background + try: + introspection_data = _get_unprocessed_data(node_info.uuid) + except Exception: + LOG.exception(_LE('Encountered exception while fetching ' + 'stored introspection data'), + node_info=node_info) + node_info.release_lock() + return + + failures = [] + _run_pre_hooks(introspection_data, failures) + if failures: + LOG.error(_LE('Pre-processing failures detected reapplying ' + 'introspection on stored data:\n%s'), + '\n'.join(failures), node_info=node_info) + node_info.finished(error='\n'.join(failures)) + return + + try: + ironic = ir_utils.get_client() + node_info.create_ports(introspection_data.get('macs') or ()) + _run_post_hooks(node_info, introspection_data) + _store_data(node_info, introspection_data) + node_info.invalidate_cache() + rules.apply(node_info, introspection_data) + _finish(ironic, node_info, introspection_data, + power_off=False) + except Exception as exc: + LOG.exception(_LE('Encountered exception reapplying ' + 'introspection on stored data'), + node_info=node_info, + data=introspection_data) + node_info.finished(error=str(exc)) + else: + LOG.info(_LI('Successfully reapplied introspection on stored ' + 'data'), node_info=node_info, data=introspection_data) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index b9c3b8e..023c9d8 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -15,6 +15,7 @@ import eventlet eventlet.monkey_patch() import contextlib +import copy import json import os import shutil @@ -27,6 +28,7 @@ from oslo_utils import units import requests from ironic_inspector.common import ironic as ir_utils +from ironic_inspector.common import swift from ironic_inspector import dbsync from ironic_inspector import main from ironic_inspector import rules @@ -164,6 +166,10 @@ class Base(base.NodeTest): def call_abort_introspect(self, uuid): return self.call('post', '/v1/introspection/%s/abort' % uuid) + def call_reapply(self, uuid): + return self.call('post', '/v1/introspection/%s/data/unprocessed' % + uuid) + def call_continue(self, data): return self.call('post', '/v1/continue', data=data).json() @@ -432,6 +438,66 @@ class Test(Base): # after releasing the node lock self.call('post', '/v1/continue', self.data, expect_error=400) + @mock.patch.object(swift, 'store_introspection_data', autospec=True) + @mock.patch.object(swift, 'get_introspection_data', autospec=True) + def test_stored_data_processing(self, get_mock, store_mock): + cfg.CONF.set_override('store_data', 'swift', 'processing') + + # ramdisk data copy + # please mind the data is changed during processing + ramdisk_data = json.dumps(copy.deepcopy(self.data)) + get_mock.return_value = ramdisk_data + + self.call_introspect(self.uuid) + eventlet.greenthread.sleep(DEFAULT_SLEEP) + self.cli.node.set_power_state.assert_called_once_with(self.uuid, + 'reboot') + + res = self.call_continue(self.data) + self.assertEqual({'uuid': self.uuid}, res) + eventlet.greenthread.sleep(DEFAULT_SLEEP) + + status = self.call_get_status(self.uuid) + self.assertEqual({'finished': True, 'error': None}, status) + + res = self.call_reapply(self.uuid) + self.assertEqual(202, res.status_code) + self.assertEqual(b'', res.text) + eventlet.greenthread.sleep(DEFAULT_SLEEP) + + # reapply request data + get_mock.assert_called_once_with(self.uuid, + suffix='UNPROCESSED') + + # store ramdisk data, store processing result data, store + # reapply processing result data; the ordering isn't + # guaranteed as store ramdisk data runs in a background + # thread; hower, last call has to always be reapply processing + # result data + store_ramdisk_call = mock.call(mock.ANY, self.uuid, + suffix='UNPROCESSED') + store_processing_call = mock.call(mock.ANY, self.uuid, + suffix=None) + self.assertEqual(3, len(store_mock.call_args_list)) + self.assertIn(store_ramdisk_call, + store_mock.call_args_list[0:2]) + self.assertIn(store_processing_call, + store_mock.call_args_list[0:2]) + self.assertEqual(store_processing_call, + store_mock.call_args_list[2]) + + # second reapply call + get_mock.return_value = ramdisk_data + res = self.call_reapply(self.uuid) + self.assertEqual(202, res.status_code) + self.assertEqual(b'', res.text) + eventlet.greenthread.sleep(DEFAULT_SLEEP) + + # reapply saves the result + self.assertEqual(4, len(store_mock.call_args_list)) + self.assertEqual(store_processing_call, + store_mock.call_args_list[-1]) + @contextlib.contextmanager def mocked_server(): diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index d400ae9..ec3b825 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -234,6 +234,78 @@ class TestApiGetData(BaseAPITest): self.assertEqual(404, res.status_code) +@mock.patch.object(process, 'reapply', autospec=True) +class TestApiReapply(BaseAPITest): + + def setUp(self): + super(TestApiReapply, self).setUp() + CONF.set_override('store_data', 'swift', 'processing') + + def test_ok(self, reapply_mock): + + self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid) + reapply_mock.assert_called_once_with(self.uuid) + + def test_user_data(self, reapply_mock): + res = self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid, data='some data') + self.assertEqual(400, res.status_code) + message = json.loads(res.data.decode())['error']['message'] + self.assertEqual('User data processing is not supported yet', + message) + self.assertFalse(reapply_mock.called) + + def test_swift_disabled(self, reapply_mock): + CONF.set_override('store_data', 'none', 'processing') + + res = self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid) + self.assertEqual(400, res.status_code) + message = json.loads(res.data.decode())['error']['message'] + self.assertEqual('Inspector is not configured to store ' + 'data. Set the [processing] store_data ' + 'configuration option to change this.', + message) + self.assertFalse(reapply_mock.called) + + def test_node_locked(self, reapply_mock): + exc = utils.Error('Locked.', code=409) + reapply_mock.side_effect = exc + + res = self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid) + + self.assertEqual(409, res.status_code) + message = json.loads(res.data.decode())['error']['message'] + self.assertEqual(str(exc), message) + reapply_mock.assert_called_once_with(self.uuid) + + def test_node_not_found(self, reapply_mock): + exc = utils.Error('Not found.', code=404) + reapply_mock.side_effect = exc + + res = self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid) + + self.assertEqual(404, res.status_code) + message = json.loads(res.data.decode())['error']['message'] + self.assertEqual(str(exc), message) + reapply_mock.assert_called_once_with(self.uuid) + + def test_generic_error(self, reapply_mock): + exc = utils.Error('Oops', code=400) + reapply_mock.side_effect = exc + + res = self.app.post('/v1/introspection/%s/data/unprocessed' % + self.uuid) + + self.assertEqual(400, res.status_code) + message = json.loads(res.data.decode())['error']['message'] + self.assertEqual(str(exc), message) + reapply_mock.assert_called_once_with(self.uuid) + + class TestApiRules(BaseAPITest): @mock.patch.object(rules, 'get_all') def test_get_all(self, get_all_mock): diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 021a134..01dc4a7 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import functools import json import time @@ -98,6 +99,37 @@ class TestProcess(BaseTest): process_mock.assert_called_once_with(cli.node.get.return_value, self.data, pop_mock.return_value) + @prepare_mocks + def test_save_unprocessed_data(self, cli, pop_mock, process_mock): + CONF.set_override('store_data', 'swift', 'processing') + expected = copy.deepcopy(self.data) + + with mock.patch.object(process, '_store_unprocessed_data', + autospec=True) as store_mock: + process.process(self.data) + + store_mock.assert_called_once_with(mock.ANY, expected) + + @prepare_mocks + def test_save_unprocessed_data_failure(self, cli, pop_mock, + process_mock): + CONF.set_override('store_data', 'swift', 'processing') + name = 'inspector_data-%s-%s' % ( + self.uuid, + process._UNPROCESSED_DATA_STORE_SUFFIX + ) + + with mock.patch.object(process.swift, 'SwiftAPI', + autospec=True) as swift_mock: + swift_conn = swift_mock.return_value + swift_conn.create_object.side_effect = iter([utils.Error('Oops')]) + + res = process.process(self.data) + + # assert store failure doesn't break processing + self.assertEqual(self.fake_result_json, res) + swift_conn.create_object.assert_called_once_with(name, mock.ANY) + @prepare_mocks def test_no_ipmi(self, cli, pop_mock, process_mock): del self.data['ipmi_address'] @@ -396,8 +428,9 @@ class TestProcessNode(BaseTest): self.assertCalledWithPatch(self.patch_props, self.cli.node.update) finished_mock.assert_called_once_with( mock.ANY, - error='Failed to power off node %s, check it\'s power management' - ' configuration: boom' % self.uuid) + error='Failed to power off node %s, check its power ' + 'management configuration: boom' % self.uuid + ) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) def test_power_off_enroll_state(self, finished_mock, filters_mock, @@ -462,3 +495,201 @@ class TestProcessNode(BaseTest): self.assertEqual(expected, json.loads(swift_conn.create_object.call_args[0][1])) self.assertCalledWithPatch(self.patch_props, self.cli.node.update) + + +@mock.patch.object(process, '_reapply', autospec=True) +@mock.patch.object(node_cache, 'get_node', autospec=True) +class TestReapply(BaseTest): + def prepare_mocks(func): + @functools.wraps(func) + def wrapper(self, pop_mock, *args, **kw): + pop_mock.return_value = node_cache.NodeInfo( + uuid=self.node.uuid, + started_at=self.started_at) + pop_mock.return_value.finished = mock.Mock() + pop_mock.return_value.acquire_lock = mock.Mock() + return func(self, pop_mock, *args, **kw) + + return wrapper + + def setUp(self): + super(TestReapply, self).setUp() + CONF.set_override('store_data', 'swift', 'processing') + + @prepare_mocks + def test_ok(self, pop_mock, reapply_mock): + process.reapply(self.uuid) + pop_mock.assert_called_once_with(self.uuid, locked=False) + pop_mock.return_value.acquire_lock.assert_called_once_with( + blocking=False + ) + + reapply_mock.assert_called_once_with(pop_mock.return_value) + + @prepare_mocks + def test_locking_failed(self, pop_mock, reapply_mock): + pop_mock.return_value.acquire_lock.return_value = False + exc = utils.Error('Node locked, please, try again later') + + with self.assertRaises(type(exc)) as cm: + process.reapply(self.uuid) + + self.assertEqual(str(exc), str(cm.exception)) + + pop_mock.assert_called_once_with(self.uuid, locked=False) + pop_mock.return_value.acquire_lock.assert_called_once_with( + blocking=False + ) + + +@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') +@mock.patch.object(process.rules, 'apply', autospec=True) +@mock.patch.object(process.swift, 'SwiftAPI', autospec=True) +@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) +@mock.patch.object(node_cache.NodeInfo, 'release_lock', autospec=True) +class TestReapplyNode(BaseTest): + def setUp(self): + super(TestReapplyNode, self).setUp() + CONF.set_override('processing_hooks', + '$processing.default_processing_hooks,example', + 'processing') + CONF.set_override('store_data', 'swift', 'processing') + self.data['macs'] = self.macs + self.data['all_interfaces'] = self.data['interfaces'] + self.ports = self.all_ports + self.node_info = node_cache.NodeInfo(uuid=self.uuid, + started_at=self.started_at, + node=self.node) + self.node_info.invalidate_cache = mock.Mock() + self.new_creds = ('user', 'password') + self.cli = mock.Mock() + self.cli.port.create.side_effect = self.ports + self.cli.node.update.return_value = self.node + self.cli.node.list_ports.return_value = [] + + @mock.patch.object(ir_utils, 'get_client', autospec=True) + def call(self, cli_mock): + cli_mock.return_value = self.cli + process._reapply(self.node_info) + # make sure node_info lock is released after a call + self.node_info.release_lock.assert_called_once_with(self.node_info) + + def prepare_mocks(fn): + @functools.wraps(fn) + def wrapper(self, release_mock, finished_mock, swift_mock, + *args, **kw): + finished_mock.side_effect = lambda *a, **kw: \ + release_mock(self.node_info) + swift_client_mock = swift_mock.return_value + fn(self, finished_mock, swift_client_mock, *args, **kw) + return wrapper + + @prepare_mocks + def test_ok(self, finished_mock, swift_mock, apply_mock, + post_hook_mock): + swift_name = 'inspector_data-%s' % self.uuid + swift_mock.get_object.return_value = json.dumps(self.data) + + with mock.patch.object(process.LOG, 'error', + autospec=True) as log_mock: + self.call() + + # no failures logged + self.assertFalse(log_mock.called) + + post_hook_mock.assert_called_once_with(mock.ANY, self.node_info) + swift_mock.create_object.assert_called_once_with(swift_name, + mock.ANY) + swifted_data = json.loads(swift_mock.create_object.call_args[0][1]) + + self.node_info.invalidate_cache.assert_called_once_with() + apply_mock.assert_called_once_with(self.node_info, swifted_data) + + # assert no power operations were performed + self.assertFalse(self.cli.node.set_power_state.called) + finished_mock.assert_called_once_with(self.node_info) + + # asserting validate_interfaces was called + self.assertEqual({'em2': self.data['interfaces']['em2']}, + swifted_data['interfaces']) + self.assertEqual([self.pxe_mac], swifted_data['macs']) + + # assert ports were created with whatever there was left + # behind validate_interfaces + self.cli.port.create.assert_called_once_with( + node_uuid=self.uuid, + address=swifted_data['macs'][0] + ) + + @prepare_mocks + def test_get_incomming_data_exception(self, finished_mock, + swift_mock, apply_mock, + post_hook_mock, ): + exc = Exception('Oops') + swift_mock.get_object.side_effect = exc + with mock.patch.object(process.LOG, 'exception', + autospec=True) as log_mock: + self.call() + + log_mock.assert_called_once_with('Encountered exception ' + 'while fetching stored ' + 'introspection data', + node_info=self.node_info) + + self.assertFalse(swift_mock.create_object.called) + self.assertFalse(apply_mock.called) + self.assertFalse(post_hook_mock.called) + self.assertFalse(finished_mock.called) + + @prepare_mocks + def test_prehook_failure(self, finished_mock, swift_mock, + apply_mock, post_hook_mock, ): + CONF.set_override('processing_hooks', 'example', + 'processing') + plugins_base._HOOKS_MGR = None + + exc = Exception('Failed.') + swift_mock.get_object.return_value = json.dumps(self.data) + + with mock.patch.object(example_plugin.ExampleProcessingHook, + 'before_processing') as before_processing_mock: + before_processing_mock.side_effect = exc + with mock.patch.object(process.LOG, 'error', + autospec=True) as log_mock: + self.call() + + exc_failure = ('Unexpected exception %(exc_class)s during ' + 'preprocessing in hook example: %(error)s' % + {'exc_class': type(exc).__name__, 'error': + exc}) + log_mock.assert_called_once_with('Pre-processing failures ' + 'detected reapplying ' + 'introspection on stored ' + 'data:\n%s', exc_failure, + node_info=self.node_info) + finished_mock.assert_called_once_with(self.node_info, + error=exc_failure) + # assert _reapply ended having detected the failure + self.assertFalse(swift_mock.create_object.called) + self.assertFalse(apply_mock.called) + self.assertFalse(post_hook_mock.called) + + @prepare_mocks + def test_generic_exception_creating_ports(self, finished_mock, + swift_mock, apply_mock, + post_hook_mock): + swift_mock.get_object.return_value = json.dumps(self.data) + exc = Exception('Oops') + self.cli.port.create.side_effect = exc + + with mock.patch.object(process.LOG, 'exception') as log_mock: + self.call() + + log_mock.assert_called_once_with('Encountered exception reapplying' + ' introspection on stored data', + node_info=self.node_info, + data=mock.ANY) + finished_mock.assert_called_once_with(self.node_info, error=str(exc)) + self.assertFalse(swift_mock.create_object.called) + self.assertFalse(apply_mock.called) + self.assertFalse(post_hook_mock.called) diff --git a/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml b/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml new file mode 100644 index 0000000..22c9fb6 --- /dev/null +++ b/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml @@ -0,0 +1,4 @@ +--- +features: + - Introduced API "POST /v1/introspection/UUID/data/unprocessed" + for reapplying the introspection over stored data. From 2a12cff6411af60b224e914ed6042287ade6e63f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 8 Apr 2016 00:27:32 +0000 Subject: [PATCH 13/83] Updated from global requirements Change-Id: If1d1f8058a180cb8d0661e9d4c174ebd1ef06fd7 --- requirements.txt | 2 +- test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1e1385e..a04bc81 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 -oslo.config>=3.7.0 # Apache-2.0 +oslo.config>=3.9.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index c77cbb7..166089b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,8 +7,8 @@ hacking<0.11,>=0.10.0 mock>=1.2 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=0.1.1 # Apache2 -fixtures>=1.3.1 # Apache-2.0/BSD +reno>=1.6.2 # Apache2 +fixtures<2.0,>=1.3.1 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=1.10.0 # Apache-2.0 From 16594518f3bbf8637d4dad6c42cc8712024e3b2d Mon Sep 17 00:00:00 2001 From: Marcellin Fom Tchassem Date: Tue, 5 Apr 2016 09:19:56 -0500 Subject: [PATCH 14/83] Make tox respect upper-constraints.txt This will force pip install to use the upper-constraints.txt specified version of pip modules. When you don't do this, you are out on the bleeding edge and become unstable everytime some python library in the wide world changes in a way that you don't expect. Post jobs are not yet compatible with the upper-constraints file, so override the install_command there to skip upper-constraints. Credit to the equivalent Ironic patch https://review.openstack.org/#/c/300146/ Change-Id: Ica668afedf622a0be8e8566ac88e1d0020ed9bc7 Closes-Bug: #1563038 --- CONTRIBUTING.rst | 4 ++++ tox.ini | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index d44f1d5..f7f7980 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -58,6 +58,10 @@ interpreter of one of supported versions (currently 2.7 and 3.4), use a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. +.. note:: + Users of Fedora <= 23 will need to run "sudo dnf --releasever=24 update + python-virtualenv" to run unit tests + To run the functional tests, use:: tox -e func diff --git a/tox.ini b/tox.ini index 891c234..ed546c0 100644 --- a/tox.ini +++ b/tox.ini @@ -3,6 +3,7 @@ envlist = py34,py27,pep8,func [testenv] usedevelop = True +install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/plugin-requirements.txt @@ -13,13 +14,31 @@ setenv = PYTHONDONTWRITEBYTECODE=1 passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:venv] +# NOTE(amrith) The setting of the install_command in this location +# is only required because currently infra does not actually +# support constraints files for the environment job, and while +# the environment variable UPPER_CONSTRAINTS_FILE is set, there's +# no file there. It can be removed when infra changes this. +install_command = pip install -U {opts} {packages} commands = {posargs} [testenv:releasenotes] +# NOTE(amrith) The setting of the install_command in this location +# is only required because currently infra does not actually +# support constraints files for the release notes job, and while +# the environment variable UPPER_CONSTRAINTS_FILE is set, there's +# no file there. It can be removed when infra changes this. +install_command = pip install -U {opts} {packages} envdir = {toxworkdir}/venv commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:cover] +# NOTE(amrith) The setting of the install_command in this location +# is only required because currently infra does not actually +# support constraints files for the cover job, and while +# the environment variable UPPER_CONSTRAINTS_FILE is set, there's +# no file there. It can be removed when infra changes this. +install_command = pip install -U {opts} {packages} commands = coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test coverage report -m From 50783c14f1ea732c35420a1714c8a5ee849cd009 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 17 Mar 2016 09:38:17 +0200 Subject: [PATCH 15/83] Tempest plugin initial commit This patch contains tempest plugin interface which are generated by tempest-plugin-cookiecutter[1]. Also empty test manager and basic test was added. [1] https://github.com/openstack/tempest-plugin-cookiecutter Change-Id: I333462987bb6cdd1933fbb3550f527364c74ba07 --- .../test/inspector_tempest_plugin/README.rst | 18 ++++++++++ .../test/inspector_tempest_plugin/__init__.py | 0 .../test/inspector_tempest_plugin/config.py | 13 +++++++ .../test/inspector_tempest_plugin/plugin.py | 34 +++++++++++++++++++ .../services/__init__.py | 0 .../tests/__init__.py | 0 .../tests/api/__init__.py | 0 .../tests/scenario/__init__.py | 0 .../tests/scenario/manager.py | 26 ++++++++++++++ .../tests/scenario/test_basic.py | 27 +++++++++++++++ setup.cfg | 3 ++ 11 files changed, 121 insertions(+) create mode 100644 ironic_inspector/test/inspector_tempest_plugin/README.rst create mode 100644 ironic_inspector/test/inspector_tempest_plugin/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/config.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/plugin.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/services/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py diff --git a/ironic_inspector/test/inspector_tempest_plugin/README.rst b/ironic_inspector/test/inspector_tempest_plugin/README.rst new file mode 100644 index 0000000..5ccb57d --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/README.rst @@ -0,0 +1,18 @@ +======================================= +Tempest Integration of ironic-inspector +======================================= + +This directory contains Tempest tests to cover the ironic-inspector project. + +It uses tempest plugin to automatically load these tests into tempest. More +information about tempest plugin could be found here: +`Plugin `_ + +The legacy method of running Tempest is to just treat the Tempest source code +as a python unittest: +`Run tests `_ + +There is also tox configuration for tempest, use following regex for running +introspection tests:: + + $ tox -e all-plugin -- inspector_tempest_plugin diff --git a/ironic_inspector/test/inspector_tempest_plugin/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/inspector_tempest_plugin/config.py b/ironic_inspector/test/inspector_tempest_plugin/config.py new file mode 100644 index 0000000..27c2fb3 --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/config.py @@ -0,0 +1,13 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import config # noqa diff --git a/ironic_inspector/test/inspector_tempest_plugin/plugin.py b/ironic_inspector/test/inspector_tempest_plugin/plugin.py new file mode 100644 index 0000000..5d9a093 --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/plugin.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os + +from tempest import config # noqa +from tempest.test_discover import plugins + +from ironic_inspector.test.inspector_tempest_plugin import config # noqa + + +class InspectorTempestPlugin(plugins.TempestPlugin): + def load_tests(self): + base_path = os.path.split(os.path.dirname( + os.path.abspath(__file__)))[0] + test_dir = "inspector_tempest_plugin/tests" + full_test_dir = os.path.join(base_path, test_dir) + return full_test_dir, base_path + + def register_opts(self, conf): + pass + + def get_opt_lists(self): + pass diff --git a/ironic_inspector/test/inspector_tempest_plugin/services/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py new file mode 100644 index 0000000..ad47d25 --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.scenario import manager + + +class InspectorScenarioTest(manager.BaremetalScenarioTest): + """Provide harness to do Inspector scenario tests.""" + + credentials = ['primary', 'admin'] + + @classmethod + def setup_clients(cls): + super(InspectorScenarioTest, cls).setup_clients() + + def setUp(self): + super(InspectorScenarioTest, self).setUp() diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py new file mode 100644 index 0000000..9b34d33 --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import test # noqa + +from ironic_inspector.test.inspector_tempest_plugin.tests.scenario \ + import manager + + +class InspectorBasicTest(manager.InspectorScenarioTest): + @test.idempotent_id('03bf7990-bee0-4dd7-bf74-b97ad7b52a4b') + @test.services('baremetal', 'compute', 'image', + 'network', 'object_storage') + def test_berametal_introspection_ops(self): + """This smoke test case follows this basic set of operations: + + """ + pass diff --git a/setup.cfg b/setup.cfg index 72b60d0..8e0338f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -61,6 +61,9 @@ oslo.config.opts = oslo.config.opts.defaults = ironic_inspector = ironic_inspector.conf:set_config_defaults +tempest.test_plugins = + ironic_inspector_tests = ironic_inspector.test.inspector_tempest_plugin.plugin:InspectorTempestPlugin + [compile_catalog] directory = ironic_inspector/locale domain = ironic_inspector From d02be94e4dc99061a0f6a99d25eaa385413aa9c9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 13 Apr 2016 12:43:25 +0000 Subject: [PATCH 16/83] Updated from global requirements Change-Id: I01b21e72df7e635ff910b5b2e71d00efbf075fc0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a04bc81..f2bf27d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. alembic>=0.8.4 # MIT -Babel>=1.3 # BSD +Babel!=2.3.0,!=2.3.1,!=2.3.2,!=2.3.3,>=1.3 # BSD eventlet!=0.18.3,>=0.18.2 # MIT Flask<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 From 41580add20b20cb28dcacb5012c85a89cb46d830 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 14 Apr 2016 18:02:27 +0200 Subject: [PATCH 17/83] Make sure to clean the blacklist cache when disabling the firewall Otherwise on reintrospection the firewall code may not update iptables rules, assuming they are already correct. Change-Id: Icc05174854bd9ab51bfed9d1360873bf5db9ed54 Closes-Bug: #1570447 --- ironic_inspector/firewall.py | 3 ++- ironic_inspector/test/unit/test_firewall.py | 6 ++++++ releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml | 4 ++++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml diff --git a/ironic_inspector/firewall.py b/ironic_inspector/firewall.py index be8cb4b..c8e4036 100644 --- a/ironic_inspector/firewall.py +++ b/ironic_inspector/firewall.py @@ -135,7 +135,7 @@ def _temporary_chain(chain, main_chain): def _disable_dhcp(): """Disable DHCP completely.""" - global ENABLED + global ENABLED, BLACKLIST_CACHE if not ENABLED: LOG.debug('DHCP is already disabled, not updating') @@ -143,6 +143,7 @@ def _disable_dhcp(): LOG.debug('No nodes on introspection and node_not_found_hook is ' 'not set - disabling DHCP') + BLACKLIST_CACHE = None with _temporary_chain(NEW_CHAIN, CHAIN): # Blacklist everything _iptables('-A', NEW_CHAIN, '-j', 'REJECT') diff --git a/ironic_inspector/test/unit/test_firewall.py b/ironic_inspector/test/unit/test_firewall.py index 27f27d1..d0d0a5c 100644 --- a/ironic_inspector/test/unit/test_firewall.py +++ b/ironic_inspector/test/unit/test_firewall.py @@ -288,6 +288,9 @@ class TestFirewall(test_base.NodeTest): mock_get_client, mock_iptables): firewall.init() + firewall.BLACKLIST_CACHE = ['foo'] + mock_get_client.return_value.port.list.return_value = [ + mock.Mock(address='foobar')] update_filters_expected_args = [ ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', @@ -317,6 +320,8 @@ class TestFirewall(test_base.NodeTest): call_args_list): self.assertEqual(args, call[0]) + self.assertIsNone(firewall.BLACKLIST_CACHE) + # Check caching enabled flag mock_iptables.reset_mock() @@ -330,3 +335,4 @@ class TestFirewall(test_base.NodeTest): firewall.update_filters() mock_iptables.assert_any_call('-A', firewall.NEW_CHAIN, '-j', 'ACCEPT') + self.assertEqual({'foobar'}, firewall.BLACKLIST_CACHE) diff --git a/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml b/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml new file mode 100644 index 0000000..65068a2 --- /dev/null +++ b/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fixed a regression in the firewall code, which causes re-running + introspection for an already inspected node to fail. From 73c7582b8cd8a9bea59e86c85703b64ce885e4a5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 19 Apr 2016 12:24:02 +0000 Subject: [PATCH 18/83] Updated from global requirements Change-Id: I9eec28b3ac03322c0f54b3463845611cf247e087 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f2bf27d..4f3698f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,5 +23,5 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 six>=1.9.0 # MIT -stevedore>=1.5.0 # Apache-2.0 +stevedore>=1.9.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From 154a1bd18e13411669a31a524d454dee2fce96c5 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Tue, 19 Apr 2016 15:58:55 +0800 Subject: [PATCH 19/83] use openstack cli instead of keystone cli keystone cli is no longer supported, let's use openstack cli instead Change-Id: Ia3f764fb9c829098fdb89f73259a9f3b1c3119a7 --- devstack/exercise.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/devstack/exercise.sh b/devstack/exercise.sh index dfe7b8f..f11fa83 100755 --- a/devstack/exercise.sh +++ b/devstack/exercise.sh @@ -44,9 +44,7 @@ disk_size=$(openstack flavor show baremetal -f value -c disk) ephemeral_size=$(openstack flavor show baremetal -f value -c "OS-FLV-EXT-DATA:ephemeral") expected_local_gb=$(($disk_size + $ephemeral_size)) -# FIXME(dtantsur): switch to OSC as soon as `openstack endpoint list` actually -# works on devstack -ironic_url=$(keystone endpoint-get --service baremetal | tail -n +4 | head -n -1 | tr '|' ' ' | awk '{ print $2; }') +ironic_url=$(openstack endpoint show baremetal -f value -c publicurl) if [ -z "$ironic_url" ]; then echo "Cannot find Ironic URL" exit 1 From 5074e3248a53f65249967763fc11a517d357937a Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 11 Apr 2016 17:09:39 +0200 Subject: [PATCH 20/83] Refactor test_process * Use fixtures instead of a self-invented decorator * Give proper names to mocks * Swift from inline mocks to @decorators * Split TestProcess into several test cases * Remove the old style of setting side_effect (via iter) Change-Id: If74221642723b0a6bea439dbcbdd360a43e7172f --- ironic_inspector/test/unit/test_process.py | 444 ++++++++++----------- 1 file changed, 208 insertions(+), 236 deletions(-) diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 01dc4a7..5f79ede 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -17,6 +17,7 @@ import json import time import eventlet +import fixtures from ironicclient import exceptions import mock from oslo_config import cfg @@ -56,33 +57,35 @@ class BaseTest(test_base.NodeTest): address=mac) for mac in self.macs] self.ports = [self.all_ports[1]] self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD'] - - -@mock.patch.object(process, '_process_node', autospec=True) -@mock.patch.object(node_cache, 'find_node', autospec=True) -@mock.patch.object(ir_utils, 'get_client', autospec=True) -class TestProcess(BaseTest): - def setUp(self): - super(TestProcess, self).setUp() self.fake_result_json = 'node json' - def prepare_mocks(func): - @functools.wraps(func) - def wrapper(self, client_mock, pop_mock, process_mock, *args, **kw): - cli = client_mock.return_value - pop_mock.return_value = node_cache.NodeInfo( - uuid=self.node.uuid, - started_at=self.started_at) - pop_mock.return_value.finished = mock.Mock() - cli.node.get.return_value = self.node - process_mock.return_value = self.fake_result_json + self.cli_fixture = self.useFixture( + fixtures.MockPatchObject(ir_utils, 'get_client', autospec=True)) + self.cli = self.cli_fixture.mock.return_value - return func(self, cli, pop_mock, process_mock, *args, **kw) - return wrapper +class BaseProcessTest(BaseTest): + def setUp(self): + super(BaseProcessTest, self).setUp() - @prepare_mocks - def test_ok(self, cli, pop_mock, process_mock): + self.cache_fixture = self.useFixture( + fixtures.MockPatchObject(node_cache, 'find_node', autospec=True)) + self.process_fixture = self.useFixture( + fixtures.MockPatchObject(process, '_process_node', autospec=True)) + + self.find_mock = self.cache_fixture.mock + self.node_info = node_cache.NodeInfo( + uuid=self.node.uuid, + started_at=self.started_at) + self.node_info.finished = mock.Mock() + self.find_mock.return_value = self.node_info + self.cli.node.get.return_value = self.node + self.process_mock = self.process_fixture.mock + self.process_mock.return_value = self.fake_result_json + + +class TestProcess(BaseProcessTest): + def test_ok(self): res = process.process(self.data) self.assertEqual(self.fake_result_json, res) @@ -91,194 +94,169 @@ class TestProcess(BaseTest): self.assertEqual(['em2'], sorted(self.data['interfaces'])) self.assertEqual([self.pxe_mac], self.data['macs']) - pop_mock.assert_called_once_with(bmc_address=self.bmc_address, - mac=mock.ANY) - actual_macs = pop_mock.call_args[1]['mac'] + self.find_mock.assert_called_once_with(bmc_address=self.bmc_address, + mac=mock.ANY) + actual_macs = self.find_mock.call_args[1]['mac'] self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) - cli.node.get.assert_called_once_with(self.uuid) - process_mock.assert_called_once_with(cli.node.get.return_value, - self.data, pop_mock.return_value) + self.cli.node.get.assert_called_once_with(self.uuid) + self.process_mock.assert_called_once_with( + self.node, self.data, self.node_info) - @prepare_mocks - def test_save_unprocessed_data(self, cli, pop_mock, process_mock): + def test_no_ipmi(self): + del self.data['ipmi_address'] + process.process(self.data) + + self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) + actual_macs = self.find_mock.call_args[1]['mac'] + self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) + self.cli.node.get.assert_called_once_with(self.uuid) + self.process_mock.assert_called_once_with(self.node, self.data, + self.node_info) + + def test_not_found_in_cache(self): + self.find_mock.side_effect = utils.Error('not found') + + self.assertRaisesRegexp(utils.Error, + 'not found', + process.process, self.data) + self.assertFalse(self.cli.node.get.called) + self.assertFalse(self.process_mock.called) + + def test_not_found_in_ironic(self): + self.cli.node.get.side_effect = exceptions.NotFound() + + self.assertRaisesRegexp(utils.Error, + 'not found', + process.process, self.data) + self.cli.node.get.assert_called_once_with(self.uuid) + self.assertFalse(self.process_mock.called) + self.node_info.finished.assert_called_once_with(error=mock.ANY) + + def test_already_finished(self): + self.node_info.finished_at = time.time() + self.assertRaisesRegexp(utils.Error, 'already finished', + process.process, self.data) + self.assertFalse(self.process_mock.called) + self.assertFalse(self.find_mock.return_value.finished.called) + + def test_expected_exception(self): + self.process_mock.side_effect = utils.Error('boom') + + self.assertRaisesRegexp(utils.Error, 'boom', + process.process, self.data) + + self.node_info.finished.assert_called_once_with(error='boom') + + def test_unexpected_exception(self): + self.process_mock.side_effect = RuntimeError('boom') + + self.assertRaisesRegexp(utils.Error, 'Unexpected exception', + process.process, self.data) + + self.node_info.finished.assert_called_once_with( + error='Unexpected exception RuntimeError during processing: boom') + + def test_hook_unexpected_exceptions(self): + for ext in plugins_base.processing_hooks_manager(): + patcher = mock.patch.object(ext.obj, 'before_processing', + side_effect=RuntimeError('boom')) + patcher.start() + self.addCleanup(lambda p=patcher: p.stop()) + + self.assertRaisesRegexp(utils.Error, 'Unexpected exception', + process.process, self.data) + + self.node_info.finished.assert_called_once_with( + error=mock.ANY) + error_message = self.node_info.finished.call_args[1]['error'] + self.assertIn('RuntimeError', error_message) + self.assertIn('boom', error_message) + + def test_hook_unexpected_exceptions_no_node(self): + # Check that error from hooks is raised, not "not found" + self.find_mock.side_effect = utils.Error('not found') + for ext in plugins_base.processing_hooks_manager(): + patcher = mock.patch.object(ext.obj, 'before_processing', + side_effect=RuntimeError('boom')) + patcher.start() + self.addCleanup(lambda p=patcher: p.stop()) + + self.assertRaisesRegexp(utils.Error, 'Unexpected exception', + process.process, self.data) + + self.assertFalse(self.node_info.finished.called) + + def test_error_if_node_not_found_hook(self): + plugins_base._NOT_FOUND_HOOK_MGR = None + self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') + self.assertRaisesRegexp(utils.Error, + 'Look up error: BOOM', + process.process, self.data) + + +@mock.patch.object(example_plugin, 'example_not_found_hook', + autospec=True) +class TestNodeNotFoundHook(BaseProcessTest): + def test_node_not_found_hook_run_ok(self, hook_mock): + CONF.set_override('node_not_found_hook', 'example', 'processing') + plugins_base._NOT_FOUND_HOOK_MGR = None + self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') + hook_mock.return_value = node_cache.NodeInfo( + uuid=self.node.uuid, + started_at=self.started_at) + res = process.process(self.data) + self.assertEqual(self.fake_result_json, res) + hook_mock.assert_called_once_with(self.data) + + def test_node_not_found_hook_run_none(self, hook_mock): + CONF.set_override('node_not_found_hook', 'example', 'processing') + plugins_base._NOT_FOUND_HOOK_MGR = None + self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') + hook_mock.return_value = None + self.assertRaisesRegexp(utils.Error, + 'Node not found hook returned nothing', + process.process, self.data) + hook_mock.assert_called_once_with(self.data) + + def test_node_not_found_hook_exception(self, hook_mock): + CONF.set_override('node_not_found_hook', 'example', 'processing') + plugins_base._NOT_FOUND_HOOK_MGR = None + self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') + hook_mock.side_effect = Exception('Hook Error') + self.assertRaisesRegexp(utils.Error, + 'Node not found hook failed: Hook Error', + process.process, self.data) + hook_mock.assert_called_once_with(self.data) + + +class TestUnprocessedData(BaseProcessTest): + @mock.patch.object(process, '_store_unprocessed_data', autospec=True) + def test_save_unprocessed_data(self, store_mock): CONF.set_override('store_data', 'swift', 'processing') expected = copy.deepcopy(self.data) - with mock.patch.object(process, '_store_unprocessed_data', - autospec=True) as store_mock: - process.process(self.data) + process.process(self.data) store_mock.assert_called_once_with(mock.ANY, expected) - @prepare_mocks - def test_save_unprocessed_data_failure(self, cli, pop_mock, - process_mock): + @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) + def test_save_unprocessed_data_failure(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') name = 'inspector_data-%s-%s' % ( self.uuid, process._UNPROCESSED_DATA_STORE_SUFFIX ) - with mock.patch.object(process.swift, 'SwiftAPI', - autospec=True) as swift_mock: - swift_conn = swift_mock.return_value - swift_conn.create_object.side_effect = iter([utils.Error('Oops')]) + swift_conn = swift_mock.return_value + swift_conn.create_object.side_effect = utils.Error('Oops') - res = process.process(self.data) + res = process.process(self.data) # assert store failure doesn't break processing self.assertEqual(self.fake_result_json, res) swift_conn.create_object.assert_called_once_with(name, mock.ANY) - @prepare_mocks - def test_no_ipmi(self, cli, pop_mock, process_mock): - del self.data['ipmi_address'] - process.process(self.data) - pop_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) - actual_macs = pop_mock.call_args[1]['mac'] - self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) - cli.node.get.assert_called_once_with(self.uuid) - process_mock.assert_called_once_with(cli.node.get.return_value, - self.data, pop_mock.return_value) - - @prepare_mocks - def test_not_found_in_cache(self, cli, pop_mock, process_mock): - pop_mock.side_effect = iter([utils.Error('not found')]) - - self.assertRaisesRegexp(utils.Error, - 'not found', - process.process, self.data) - self.assertFalse(cli.node.get.called) - self.assertFalse(process_mock.called) - - @prepare_mocks - def test_not_found_in_ironic(self, cli, pop_mock, process_mock): - cli.node.get.side_effect = exceptions.NotFound() - - self.assertRaisesRegexp(utils.Error, - 'not found', - process.process, self.data) - cli.node.get.assert_called_once_with(self.uuid) - self.assertFalse(process_mock.called) - pop_mock.return_value.finished.assert_called_once_with(error=mock.ANY) - - @prepare_mocks - def test_already_finished(self, cli, pop_mock, process_mock): - old_finished_at = pop_mock.return_value.finished_at - pop_mock.return_value.finished_at = time.time() - try: - self.assertRaisesRegexp(utils.Error, 'already finished', - process.process, self.data) - self.assertFalse(process_mock.called) - self.assertFalse(pop_mock.return_value.finished.called) - finally: - pop_mock.return_value.finished_at = old_finished_at - - @prepare_mocks - def test_expected_exception(self, cli, pop_mock, process_mock): - process_mock.side_effect = iter([utils.Error('boom')]) - - self.assertRaisesRegexp(utils.Error, 'boom', - process.process, self.data) - - pop_mock.return_value.finished.assert_called_once_with(error='boom') - - @prepare_mocks - def test_unexpected_exception(self, cli, pop_mock, process_mock): - process_mock.side_effect = iter([RuntimeError('boom')]) - - self.assertRaisesRegexp(utils.Error, 'Unexpected exception', - process.process, self.data) - - pop_mock.return_value.finished.assert_called_once_with( - error='Unexpected exception RuntimeError during processing: boom') - - @prepare_mocks - def test_hook_unexpected_exceptions(self, cli, pop_mock, process_mock): - for ext in plugins_base.processing_hooks_manager(): - patcher = mock.patch.object(ext.obj, 'before_processing', - side_effect=RuntimeError('boom')) - patcher.start() - self.addCleanup(lambda p=patcher: p.stop()) - - self.assertRaisesRegexp(utils.Error, 'Unexpected exception', - process.process, self.data) - - pop_mock.return_value.finished.assert_called_once_with( - error=mock.ANY) - error_message = pop_mock.return_value.finished.call_args[1]['error'] - self.assertIn('RuntimeError', error_message) - self.assertIn('boom', error_message) - - @prepare_mocks - def test_hook_unexpected_exceptions_no_node(self, cli, pop_mock, - process_mock): - # Check that error from hooks is raised, not "not found" - pop_mock.side_effect = iter([utils.Error('not found')]) - for ext in plugins_base.processing_hooks_manager(): - patcher = mock.patch.object(ext.obj, 'before_processing', - side_effect=RuntimeError('boom')) - patcher.start() - self.addCleanup(lambda p=patcher: p.stop()) - - self.assertRaisesRegexp(utils.Error, 'Unexpected exception', - process.process, self.data) - - self.assertFalse(pop_mock.return_value.finished.called) - - @prepare_mocks - def test_error_if_node_not_found_hook(self, cli, pop_mock, process_mock): - plugins_base._NOT_FOUND_HOOK_MGR = None - pop_mock.side_effect = iter([utils.NotFoundInCacheError('BOOM')]) - self.assertRaisesRegexp(utils.Error, - 'Look up error: BOOM', - process.process, self.data) - - @prepare_mocks - def test_node_not_found_hook_run_ok(self, cli, pop_mock, process_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - pop_mock.side_effect = iter([utils.NotFoundInCacheError('BOOM')]) - with mock.patch.object(example_plugin, - 'example_not_found_hook') as hook_mock: - hook_mock.return_value = node_cache.NodeInfo( - uuid=self.node.uuid, - started_at=self.started_at) - res = process.process(self.data) - self.assertEqual(self.fake_result_json, res) - hook_mock.assert_called_once_with(self.data) - - @prepare_mocks - def test_node_not_found_hook_run_none(self, cli, pop_mock, process_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - pop_mock.side_effect = iter([utils.NotFoundInCacheError('BOOM')]) - with mock.patch.object(example_plugin, - 'example_not_found_hook') as hook_mock: - hook_mock.return_value = None - self.assertRaisesRegexp(utils.Error, - 'Node not found hook returned nothing', - process.process, self.data) - hook_mock.assert_called_once_with(self.data) - - @prepare_mocks - def test_node_not_found_hook_exception(self, cli, pop_mock, process_mock): - CONF.set_override('node_not_found_hook', 'example', 'processing') - plugins_base._NOT_FOUND_HOOK_MGR = None - pop_mock.side_effect = iter([utils.NotFoundInCacheError('BOOM')]) - with mock.patch.object(example_plugin, - 'example_not_found_hook') as hook_mock: - hook_mock.side_effect = Exception('Hook Error') - self.assertRaisesRegexp(utils.Error, - 'Node not found hook failed: Hook Error', - process.process, self.data) - hook_mock.assert_called_once_with(self.data) - - -@mock.patch.object(eventlet.greenthread, 'sleep', lambda _: None) -@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') -@mock.patch.object(firewall, 'update_filters', autospec=True) class TestProcessNode(BaseTest): def setUp(self): super(TestProcessNode, self).setUp() @@ -289,9 +267,7 @@ class TestProcessNode(BaseTest): self.data['macs'] = self.macs # validate_interfaces hook self.data['all_interfaces'] = self.data['interfaces'] self.ports = self.all_ports - self.node_info = node_cache.NodeInfo(uuid=self.uuid, - started_at=self.started_at, - node=self.node) + self.patch_props = [ {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, @@ -306,37 +282,40 @@ class TestProcessNode(BaseTest): 'value': self.new_creds[1]}, ] - self.cli = mock.Mock() self.cli.node.get_boot_device.side_effect = ( [RuntimeError()] * self.validate_attempts + [None]) self.cli.port.create.side_effect = self.ports self.cli.node.update.return_value = self.node self.cli.node.list_ports.return_value = [] - @mock.patch.object(ir_utils, 'get_client') - def call(self, mock_cli): - mock_cli.return_value = self.cli - return process._process_node(self.node, self.data, self.node_info) + self.useFixture(fixtures.MockPatchObject( + firewall, 'update_filters', autospec=True)) - def test_return_includes_uuid(self, filters_mock, post_hook_mock): - ret_val = self.call() + self.useFixture(fixtures.MockPatchObject( + eventlet.greenthread, 'sleep', autospec=True)) + + def test_return_includes_uuid(self): + ret_val = process._process_node(self.node, self.data, self.node_info) self.assertEqual(self.uuid, ret_val.get('uuid')) - def test_return_includes_uuid_with_ipmi_creds(self, filters_mock, - post_hook_mock): + def test_return_includes_uuid_with_ipmi_creds(self): self.node_info.set_option('new_ipmi_credentials', self.new_creds) - ret_val = self.call() + ret_val = process._process_node(self.node, self.data, self.node_info) self.assertEqual(self.uuid, ret_val.get('uuid')) self.assertTrue(ret_val.get('ipmi_setup_credentials')) - def test_wrong_provision_state(self, filters_mock, post_hook_mock): + @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') + def test_wrong_provision_state(self, post_hook_mock): self.node.provision_state = 'active' - self.assertRaises(utils.Error, self.call) + + self.assertRaises(utils.Error, process._process_node, + self.node, self.data, self.node_info) self.assertFalse(post_hook_mock.called) + @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_ok(self, finished_mock, filters_mock, post_hook_mock): - self.call() + def test_ok(self, finished_mock, post_hook_mock): + process._process_node(self.node, self.data, self.node_info) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[0]) @@ -349,22 +328,22 @@ class TestProcessNode(BaseTest): post_hook_mock.assert_called_once_with(self.data, self.node_info) finished_mock.assert_called_once_with(mock.ANY) - def test_overwrite_disabled(self, filters_mock, post_hook_mock): + def test_overwrite_disabled(self): CONF.set_override('overwrite_existing', False, 'processing') patch = [ {'op': 'add', 'path': '/properties/cpus', 'value': '2'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'}, ] - self.call() + process._process_node(self.node, self.data, self.node_info) self.assertCalledWithPatch(patch, self.cli.node.update) - def test_port_failed(self, filters_mock, post_hook_mock): + def test_port_failed(self): self.cli.port.create.side_effect = ( [exceptions.Conflict()] + self.ports[1:]) - self.call() + process._process_node(self.node, self.data, self.node_info) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[0]) @@ -372,10 +351,10 @@ class TestProcessNode(BaseTest): address=self.macs[1]) self.assertCalledWithPatch(self.patch_props, self.cli.node.update) - def test_set_ipmi_credentials(self, filters_mock, post_hook_mock): + def test_set_ipmi_credentials(self): self.node_info.set_option('new_ipmi_credentials', self.new_creds) - self.call() + process._process_node(self.node, self.data, self.node_info) self.cli.node.update.assert_any_call(self.uuid, self.patch_credentials) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') @@ -383,15 +362,14 @@ class TestProcessNode(BaseTest): self.assertEqual(self.validate_attempts + 1, self.cli.node.get_boot_device.call_count) - def test_set_ipmi_credentials_no_address(self, filters_mock, - post_hook_mock): + def test_set_ipmi_credentials_no_address(self): self.node_info.set_option('new_ipmi_credentials', self.new_creds) del self.node.driver_info['ipmi_address'] self.patch_credentials.append({'op': 'add', 'path': '/driver_info/ipmi_address', 'value': self.bmc_address}) - self.call() + process._process_node(self.node, self.data, self.node_info) self.cli.node.update.assert_any_call(self.uuid, self.patch_credentials) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') @@ -400,12 +378,11 @@ class TestProcessNode(BaseTest): self.cli.node.get_boot_device.call_count) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_set_ipmi_credentials_timeout(self, finished_mock, - filters_mock, post_hook_mock): + def test_set_ipmi_credentials_timeout(self, finished_mock): self.node_info.set_option('new_ipmi_credentials', self.new_creds) self.cli.node.get_boot_device.side_effect = RuntimeError('boom') - self.call() + process._process_node(self.node, self.data, self.node_info) self.cli.node.update.assert_any_call(self.uuid, self.patch_credentials) self.assertEqual(2, self.cli.node.update.call_count) @@ -418,11 +395,10 @@ class TestProcessNode(BaseTest): 'node might require maintenance' % self.uuid) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_power_off_failed(self, finished_mock, filters_mock, - post_hook_mock): + def test_power_off_failed(self, finished_mock): self.cli.node.set_power_state.side_effect = RuntimeError('boom') - self.call() + process._process_node(self.node, self.data, self.node_info) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.assertCalledWithPatch(self.patch_props, self.cli.node.update) @@ -432,26 +408,26 @@ class TestProcessNode(BaseTest): 'management configuration: boom' % self.uuid ) + @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) - def test_power_off_enroll_state(self, finished_mock, filters_mock, - post_hook_mock): + def test_power_off_enroll_state(self, finished_mock, post_hook_mock): self.node.provision_state = 'enroll' self.node_info.node = mock.Mock(return_value=self.node) - self.call() + process._process_node(self.node, self.data, self.node_info) self.assertTrue(post_hook_mock.called) self.assertTrue(self.cli.node.set_power_state.called) finished_mock.assert_called_once_with(self.node_info) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data(self, swift_mock, filters_mock, post_hook_mock): + def test_store_data(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid expected = self.data - self.call() + process._process_node(self.node, self.data, self.node_info) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, @@ -459,15 +435,14 @@ class TestProcessNode(BaseTest): self.assertCalledWithPatch(self.patch_props, self.cli.node.update) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data_no_logs(self, swift_mock, filters_mock, - post_hook_mock): + def test_store_data_no_logs(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid expected = self.data.copy() self.data['logs'] = 'something' - self.call() + process._process_node(self.node, self.data, self.node_info) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, @@ -475,8 +450,7 @@ class TestProcessNode(BaseTest): self.assertCalledWithPatch(self.patch_props, self.cli.node.update) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) - def test_store_data_location(self, swift_mock, filters_mock, - post_hook_mock): + def test_store_data_location(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') CONF.set_override('store_data_location', 'inspector_data_object', 'processing') @@ -489,7 +463,7 @@ class TestProcessNode(BaseTest): ) expected = self.data - self.call() + process._process_node(self.node, self.data, self.node_info) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, @@ -562,14 +536,12 @@ class TestReapplyNode(BaseTest): node=self.node) self.node_info.invalidate_cache = mock.Mock() self.new_creds = ('user', 'password') - self.cli = mock.Mock() + self.cli.port.create.side_effect = self.ports self.cli.node.update.return_value = self.node self.cli.node.list_ports.return_value = [] - @mock.patch.object(ir_utils, 'get_client', autospec=True) - def call(self, cli_mock): - cli_mock.return_value = self.cli + def call(self): process._reapply(self.node_info) # make sure node_info lock is released after a call self.node_info.release_lock.assert_called_once_with(self.node_info) From 3505a8e9328fd5b74262a5fe94be1d4440727b03 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Wed, 4 May 2016 17:36:09 +0800 Subject: [PATCH 21/83] Use PortOpt type for port options Change-Id: Icdc27af7a10ff249235e927431a41ec0874bd154 --- example.conf | 8 +++++--- ironic_inspector/conf.py | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/example.conf b/example.conf index 7905ff4..0e05ac5 100644 --- a/example.conf +++ b/example.conf @@ -8,7 +8,9 @@ # Deprecated group/name - [discoverd]/listen_address #listen_address = 0.0.0.0 -# Port to listen on. (integer value) +# Port to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 # Deprecated group/name - [discoverd]/listen_port #listen_port = 5050 @@ -528,7 +530,7 @@ #user_id = # Username (unknown value) -# Deprecated group/name - [DEFAULT]/username +# Deprecated group/name - [DEFAULT]/user-name #username = @@ -873,5 +875,5 @@ #user_id = # Username (unknown value) -# Deprecated group/name - [DEFAULT]/username +# Deprecated group/name - [DEFAULT]/user-name #username = diff --git a/ironic_inspector/conf.py b/ironic_inspector/conf.py index 3860e5f..5fd437d 100644 --- a/ironic_inspector/conf.py +++ b/ironic_inspector/conf.py @@ -146,10 +146,10 @@ SERVICE_OPTS = [ default='0.0.0.0', help='IP to listen on.', deprecated_group='discoverd'), - cfg.IntOpt('listen_port', - default=5050, - help='Port to listen on.', - deprecated_group='discoverd'), + cfg.PortOpt('listen_port', + default=5050, + help='Port to listen on.', + deprecated_group='discoverd'), cfg.StrOpt('auth_strategy', default='keystone', choices=('keystone', 'noauth'), From b65ab065ea41ab1ae3c0e7b4ccb16d2627bf9e22 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 31 Mar 2016 16:46:36 +0200 Subject: [PATCH 22/83] Store ramdisk logs on all processing failures, not only reported by the ramdisk Previously the ramdisk logs were only stored if the ramdisk reported an error. However, we are moving from ramdisk-side validation to server-side, so we need ramdisk logs to be available if processing fails too. This change moves storing ramdisk logs from a ramdisk_error plugin to core processing code. As before, it can be disabled by setting ramdisk_logs_dir to an empty value. Change-Id: Ib3742ee1c1d4f2f96d29466626e1121694610dc3 Closes-Bug: #1564448 --- ironic_inspector/plugins/standard.py | 35 -------- ironic_inspector/process.py | 53 +++++++++++- .../test/unit/test_plugins_standard.py | 75 ---------------- ironic_inspector/test/unit/test_process.py | 85 +++++++++++++++++++ ...logs-on-all-failures-24da41edf3a98400.yaml | 11 +++ 5 files changed, 146 insertions(+), 113 deletions(-) create mode 100644 releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml diff --git a/ironic_inspector/plugins/standard.py b/ironic_inspector/plugins/standard.py index 5cf7d66..8c9befe 100644 --- a/ironic_inspector/plugins/standard.py +++ b/ironic_inspector/plugins/standard.py @@ -13,9 +13,6 @@ """Standard set of plugins.""" -import base64 -import datetime -import os import sys import netaddr @@ -311,40 +308,8 @@ class ValidateInterfacesHook(base.ProcessingHook): class RamdiskErrorHook(base.ProcessingHook): """Hook to process error send from the ramdisk.""" - DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f' - def before_processing(self, introspection_data, **kwargs): error = introspection_data.get('error') - logs = introspection_data.get('logs') - - if error or CONF.processing.always_store_ramdisk_logs: - if logs: - self._store_logs(logs, introspection_data) - else: - LOG.debug('No logs received from the ramdisk', - data=introspection_data) - if error: raise utils.Error(_('Ramdisk reported error: %s') % error, data=introspection_data) - - def _store_logs(self, logs, introspection_data): - if not CONF.processing.ramdisk_logs_dir: - LOG.warning( - _LW('Failed to store logs received from the ramdisk ' - 'because ramdisk_logs_dir configuration option ' - 'is not set'), - data=introspection_data) - return - - if not os.path.exists(CONF.processing.ramdisk_logs_dir): - os.makedirs(CONF.processing.ramdisk_logs_dir) - - time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT) - bmc_address = introspection_data.get('ipmi_address', 'unknown') - file_name = 'bmc_%s_%s' % (bmc_address, time_fmt) - with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), - 'wb') as fp: - fp.write(base64.b64decode(logs)) - LOG.info(_LI('Ramdisk logs stored in file %s'), file_name, - data=introspection_data) diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 42b9e73..e4ae86a 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -13,14 +13,19 @@ """Handling introspection data from the ramdisk.""" +import base64 import copy +import datetime +import os + import eventlet import json from ironicclient import exceptions from oslo_config import cfg +from oslo_utils import excutils -from ironic_inspector.common.i18n import _, _LE, _LI +from ironic_inspector.common.i18n import _, _LE, _LI, _LW from ironic_inspector.common import ironic as ir_utils from ironic_inspector.common import swift from ironic_inspector import firewall @@ -37,6 +42,40 @@ _CREDENTIALS_WAIT_RETRIES = 10 _CREDENTIALS_WAIT_PERIOD = 3 _STORAGE_EXCLUDED_KEYS = {'logs'} _UNPROCESSED_DATA_STORE_SUFFIX = 'UNPROCESSED' +_DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f' + + +def _store_logs(introspection_data, node_info): + logs = introspection_data.get('logs') + if not logs: + LOG.warning(_LW('No logs were passed by the ramdisk'), + data=introspection_data, node_info=node_info) + return + + if not CONF.processing.ramdisk_logs_dir: + LOG.warning(_LW('Failed to store logs received from the ramdisk ' + 'because ramdisk_logs_dir configuration option ' + 'is not set'), + data=introspection_data, node_info=node_info) + return + + time_fmt = datetime.datetime.utcnow().strftime(_DATETIME_FORMAT) + bmc_address = (utils.get_ipmi_address_from_data(introspection_data) + or 'unknown') + file_name = 'bmc_%s_%s' % (bmc_address, time_fmt) + + try: + if not os.path.exists(CONF.processing.ramdisk_logs_dir): + os.makedirs(CONF.processing.ramdisk_logs_dir) + with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), + 'wb') as fp: + fp.write(base64.b64decode(logs)) + except EnvironmentError: + LOG.exception(_LE('Could not store the ramdisk logs'), + data=introspection_data, node_info=node_info) + else: + LOG.info(_LI('Ramdisk logs were stored in file %s'), file_name, + data=introspection_data, node_info=node_info) def _find_node_info(introspection_data, failures): @@ -158,6 +197,7 @@ def process(introspection_data): 'pre-processing hooks:\n%s') % '\n'.join(failures) if node_info is not None: node_info.finished(error='\n'.join(failures)) + _store_logs(introspection_data, node_info) raise utils.Error(msg, node_info=node_info, data=introspection_data) LOG.info(_LI('Matching node is %s'), node_info.uuid, @@ -180,22 +220,29 @@ def process(introspection_data): except exceptions.NotFound: msg = _('Node was found in cache, but is not found in Ironic') node_info.finished(error=msg) + _store_logs(introspection_data, node_info) raise utils.Error(msg, code=404, node_info=node_info, data=introspection_data) try: - return _process_node(node, introspection_data, node_info) + result = _process_node(node, introspection_data, node_info) except utils.Error as exc: node_info.finished(error=str(exc)) - raise + with excutils.save_and_reraise_exception(): + _store_logs(introspection_data, node_info) except Exception as exc: LOG.exception(_LE('Unexpected exception during processing')) msg = _('Unexpected exception %(exc_class)s during processing: ' '%(error)s') % {'exc_class': exc.__class__.__name__, 'error': exc} node_info.finished(error=msg) + _store_logs(introspection_data, node_info) raise utils.Error(msg, node_info=node_info, data=introspection_data) + if CONF.processing.always_store_ramdisk_logs: + _store_logs(introspection_data, node_info) + return result + def _run_post_hooks(node_info, introspection_data): hooks = plugins_base.processing_hooks_manager() diff --git a/ironic_inspector/test/unit/test_plugins_standard.py b/ironic_inspector/test/unit/test_plugins_standard.py index 2434db8..2892a88 100644 --- a/ironic_inspector/test/unit/test_plugins_standard.py +++ b/ironic_inspector/test/unit/test_plugins_standard.py @@ -11,11 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import base64 -import os -import shutil -import tempfile - import mock from oslo_config import cfg from oslo_utils import units @@ -404,77 +399,7 @@ class TestRamdiskError(test_base.BaseTest): 'ipmi_address': self.bmc_address, } - self.tempdir = tempfile.mkdtemp() - self.addCleanup(lambda: shutil.rmtree(self.tempdir)) - CONF.set_override('ramdisk_logs_dir', self.tempdir, 'processing') - def test_no_logs(self): self.assertRaisesRegexp(utils.Error, self.msg, process.process, self.data) - self.assertEqual([], os.listdir(self.tempdir)) - - def test_logs_disabled(self): - self.data['logs'] = 'some log' - CONF.set_override('ramdisk_logs_dir', None, 'processing') - - self.assertRaisesRegexp(utils.Error, - self.msg, - process.process, self.data) - self.assertEqual([], os.listdir(self.tempdir)) - - def test_logs(self): - log = b'log contents' - self.data['logs'] = base64.b64encode(log) - - self.assertRaisesRegexp(utils.Error, - self.msg, - process.process, self.data) - - files = os.listdir(self.tempdir) - self.assertEqual(1, len(files)) - filename = files[0] - self.assertTrue(filename.startswith('bmc_%s_' % self.bmc_address), - '%s does not start with bmc_%s' - % (filename, self.bmc_address)) - with open(os.path.join(self.tempdir, filename), 'rb') as fp: - self.assertEqual(log, fp.read()) - - def test_logs_create_dir(self): - shutil.rmtree(self.tempdir) - self.data['logs'] = base64.b64encode(b'log') - - self.assertRaisesRegexp(utils.Error, - self.msg, - process.process, self.data) - - files = os.listdir(self.tempdir) - self.assertEqual(1, len(files)) - - def test_logs_without_error(self): - log = b'log contents' - del self.data['error'] - self.data['logs'] = base64.b64encode(log) - - std_plugins.RamdiskErrorHook().before_processing(self.data) - - files = os.listdir(self.tempdir) - self.assertFalse(files) - - def test_always_store_logs(self): - CONF.set_override('always_store_ramdisk_logs', True, 'processing') - - log = b'log contents' - del self.data['error'] - self.data['logs'] = base64.b64encode(log) - - std_plugins.RamdiskErrorHook().before_processing(self.data) - - files = os.listdir(self.tempdir) - self.assertEqual(1, len(files)) - filename = files[0] - self.assertTrue(filename.startswith('bmc_%s_' % self.bmc_address), - '%s does not start with bmc_%s' - % (filename, self.bmc_address)) - with open(os.path.join(self.tempdir, filename), 'rb') as fp: - self.assertEqual(log, fp.read()) diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 5f79ede..79ef8c6 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -11,9 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 import copy import functools import json +import os +import shutil +import tempfile import time import eventlet @@ -257,6 +261,87 @@ class TestUnprocessedData(BaseProcessTest): swift_conn.create_object.assert_called_once_with(name, mock.ANY) +@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing', + autospec=True) +class TestStoreLogs(BaseProcessTest): + def setUp(self): + super(TestStoreLogs, self).setUp() + CONF.set_override('processing_hooks', 'ramdisk_error,example', + 'processing') + + self.tempdir = tempfile.mkdtemp() + self.addCleanup(lambda: shutil.rmtree(self.tempdir)) + CONF.set_override('ramdisk_logs_dir', self.tempdir, 'processing') + + self.logs = b'test logs' + self.data['logs'] = base64.b64encode(self.logs) + + def _check_contents(self): + files = os.listdir(self.tempdir) + self.assertEqual(1, len(files)) + filename = files[0] + self.assertTrue(filename.startswith('bmc_%s_' % self.bmc_address), + '%s does not start with bmc_%s' + % (filename, self.bmc_address)) + with open(os.path.join(self.tempdir, filename), 'rb') as fp: + self.assertEqual(self.logs, fp.read()) + + def test_store_on_preprocess_failure(self, hook_mock): + hook_mock.side_effect = Exception('Hook Error') + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + def test_store_on_process_failure(self, hook_mock): + self.process_mock.side_effect = utils.Error('boom') + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + def test_store_on_unexpected_process_failure(self, hook_mock): + self.process_mock.side_effect = RuntimeError('boom') + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + def test_store_on_ramdisk_error(self, hook_mock): + self.data['error'] = 'boom' + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + def test_store_find_node_error(self, hook_mock): + self.cli.node.get.side_effect = exceptions.NotFound('boom') + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + def test_no_error_no_logs(self, hook_mock): + process.process(self.data) + self.assertEqual([], os.listdir(self.tempdir)) + + def test_logs_disabled(self, hook_mock): + CONF.set_override('ramdisk_logs_dir', None, 'processing') + hook_mock.side_effect = Exception('Hook Error') + self.assertRaises(utils.Error, process.process, self.data) + self.assertEqual([], os.listdir(self.tempdir)) + + def test_always_store_logs(self, hook_mock): + CONF.set_override('always_store_ramdisk_logs', True, 'processing') + process.process(self.data) + self._check_contents() + + @mock.patch.object(process.LOG, 'exception', autospec=True) + def test_failure_to_write(self, log_mock, hook_mock): + CONF.set_override('always_store_ramdisk_logs', True, 'processing') + CONF.set_override('ramdisk_logs_dir', '/I/cannot/write/here', + 'processing') + process.process(self.data) + self.assertEqual([], os.listdir(self.tempdir)) + self.assertTrue(log_mock.called) + + def test_directory_is_created(self, hook_mock): + shutil.rmtree(self.tempdir) + self.data['error'] = 'boom' + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents() + + class TestProcessNode(BaseTest): def setUp(self): super(TestProcessNode, self).setUp() diff --git a/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml b/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml new file mode 100644 index 0000000..3e2a461 --- /dev/null +++ b/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml @@ -0,0 +1,11 @@ +--- +fixes: + - The ramdisk logs are now stored on all preprocessing errors, not only + ones reported by the ramdisk itself. This required moving the ramdisk + logs handling from the "ramdisk_error" plugin to the generic processing + code. +upgrade: + - Handling ramdisk logs was moved out of the "ramdisk_error" plugin, so + disabling it will no longer disable handling ramdisk logs. As before, + you can set "ramdisk_logs_dir" option to an empty value (the default) + to disable storing ramdisk logs. From 50b989474d874c6004dd2b4f0c1ec598007a2fcb Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 25 Apr 2016 19:37:03 +0200 Subject: [PATCH 23/83] is-empty conditions should accept missing values Returning False from is-empty condition on missing values seems extremely confusing and rules out some potential use cases. Closes-Bug: #1578184 Change-Id: I8f976516f89367512e2ffae2815085be1776b6f6 --- ironic_inspector/plugins/rules.py | 1 + ironic_inspector/test/functional.py | 1 + releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml | 3 +++ 3 files changed, 5 insertions(+) create mode 100644 releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml diff --git a/ironic_inspector/plugins/rules.py b/ironic_inspector/plugins/rules.py index 6d96fcb..436965f 100644 --- a/ironic_inspector/plugins/rules.py +++ b/ironic_inspector/plugins/rules.py @@ -69,6 +69,7 @@ class NeCondition(SimpleCondition): class EmptyCondition(base.RuleConditionPlugin): REQUIRED_PARAMS = set() + ALLOW_NONE = True def check(self, node_info, field, params, **kwargs): return field in ('', None, [], {}) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index 023c9d8..88241e0 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -320,6 +320,7 @@ class Test(Base): {'field': 'inventory.interfaces[*].ipv4_address', 'op': 'contains', 'value': r'127\.0\.0\.1', 'invert': True, 'multiple': 'all'}, + {'field': 'i.do.not.exist', 'op': 'is-empty'}, ], 'actions': [ {'action': 'set-attribute', 'path': '/extra/foo', diff --git a/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml b/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml new file mode 100644 index 0000000..c048fdc --- /dev/null +++ b/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fixed the "is-empty" condition to return True on missing values. From cdf6c7435e5e421e9349d79df4a09a1872a6cbac Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 6 May 2016 22:17:36 +0000 Subject: [PATCH 24/83] Updated from global requirements Change-Id: I16b44f34881cd0a15011cb8ebbdbf2a300e1dc57 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4f3698f..7bfe968 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. alembic>=0.8.4 # MIT -Babel!=2.3.0,!=2.3.1,!=2.3.2,!=2.3.3,>=1.3 # BSD +Babel>=2.3.4 # BSD eventlet!=0.18.3,>=0.18.2 # MIT Flask<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 From af6fbf0717de9e9b92007615ba0d9760c7964d89 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 4 Feb 2016 17:21:53 +0100 Subject: [PATCH 25/83] Support Ironic node names in our API This change drops check on UUID validness from our API. It also has a subtle effect of doing Ironic node fetching in the introspection status and data fetch calls, which might make them slightly longer (but only when name is used). A new helper common.ironic.get_node is created to unify how we fetch nodes from Ironic. It also provides nicer exceptions. Change-Id: I20cf65e57910568b70a62c3f9269a962e78a07e2 Closes-Bug: #1523902 --- doc/source/http-api.rst | 1 + ironic_inspector/common/ironic.py | 28 +++++++++++++ ironic_inspector/introspect.py | 22 ++++------- ironic_inspector/main.py | 39 +++++++++---------- ironic_inspector/node_cache.py | 22 +++++++---- ironic_inspector/process.py | 19 ++++----- ironic_inspector/test/unit/test_introspect.py | 4 +- ironic_inspector/test/unit/test_main.py | 30 +++++++++++--- ironic_inspector/test/unit/test_node_cache.py | 20 +++++++++- ironic_inspector/test/unit/test_process.py | 2 +- .../notes/names-82d9f84153a228ec.yaml | 5 +++ 11 files changed, 128 insertions(+), 64 deletions(-) create mode 100644 releasenotes/notes/names-82d9f84153a228ec.yaml diff --git a/doc/source/http-api.rst b/doc/source/http-api.rst index 88d472b..103247a 100644 --- a/doc/source/http-api.rst +++ b/doc/source/http-api.rst @@ -343,3 +343,4 @@ Version History * **1.2** endpoints for manipulating introspection rules. * **1.3** endpoint for canceling running introspection * **1.4** endpoint for reapplying the introspection over stored data. +* **1.5** support for Ironic node names. diff --git a/ironic_inspector/common/ironic.py b/ironic_inspector/common/ironic.py index 131be00..d220a36 100644 --- a/ironic_inspector/common/ironic.py +++ b/ironic_inspector/common/ironic.py @@ -14,6 +14,7 @@ import socket from ironicclient import client +from ironicclient import exceptions as ironic_exc from oslo_config import cfg from ironic_inspector.common.i18n import _ @@ -116,6 +117,14 @@ LEGACY_MAP = { } +class NotFound(utils.Error): + """Node not found in Ironic.""" + + def __init__(self, node_ident, code=404, *args, **kwargs): + msg = _('Node %s was not found in Ironic') % node_ident + super(NotFound, self).__init__(msg, code, *args, **kwargs) + + def reset_ironic_session(): """Reset the global session variable. @@ -200,5 +209,24 @@ def dict_to_capabilities(caps_dict): if value is not None]) +def get_node(node_id, ironic=None, **kwargs): + """Get a node from Ironic. + + :param node_id: node UUID or name. + :param ironic: ironic client instance. + :param kwargs: arguments to pass to Ironic client. + :raises: Error on failure + """ + ironic = ironic if ironic is not None else get_client() + + try: + return ironic.node.get(node_id, **kwargs) + except ironic_exc.NotFound: + raise NotFound(node_id) + except ironic_exc.HttpError as exc: + raise utils.Error(_("Cannot get node %(node)s: %(exc)s") % + {'node': node_id, 'exc': exc}) + + def list_opts(): return keystone.add_auth_options(IRONIC_OPTS, IRONIC_GROUP) diff --git a/ironic_inspector/introspect.py b/ironic_inspector/introspect.py index 95632c9..f819c8f 100644 --- a/ironic_inspector/introspect.py +++ b/ironic_inspector/introspect.py @@ -18,7 +18,6 @@ import string import time from eventlet import semaphore -from ironicclient import exceptions from oslo_config import cfg from ironic_inspector.common.i18n import _, _LI, _LW @@ -64,23 +63,16 @@ def _validate_ipmi_credentials(node, new_ipmi_credentials): return new_username, new_password -def introspect(uuid, new_ipmi_credentials=None, token=None): +def introspect(node_id, new_ipmi_credentials=None, token=None): """Initiate hardware properties introspection for a given node. - :param uuid: node uuid + :param node_id: node UUID or name :param new_ipmi_credentials: tuple (new username, new password) or None :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) - - try: - node = ironic.node.get(uuid) - except exceptions.NotFound: - raise utils.Error(_("Cannot find node %s") % uuid, code=404) - except exceptions.HttpError as exc: - raise utils.Error(_("Cannot get node %(node)s: %(exc)s") % - {'node': uuid, 'exc': exc}) + node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node, with_credentials=new_ipmi_credentials) @@ -179,16 +171,16 @@ def _background_introspect_locked(ironic, node_info): node_info=node_info) -def abort(uuid, token=None): +def abort(node_id, token=None): """Abort running introspection. - :param uuid: node uuid + :param node_id: node UUID or name :param token: authentication token :raises: Error """ - LOG.debug('Aborting introspection for node %s', uuid) + LOG.debug('Aborting introspection for node %s', node_id) ironic = ir_utils.get_client(token) - node_info = node_cache.get_node(uuid, ironic=ironic, locked=False) + node_info = node_cache.get_node(node_id, ironic=ironic, locked=False) # check pending operations locked = node_info.acquire_lock(blocking=False) diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py index fb64f6b..de913ff 100644 --- a/ironic_inspector/main.py +++ b/ironic_inspector/main.py @@ -47,7 +47,7 @@ app = flask.Flask(__name__) LOG = utils.getProcessingLogger(__name__) MINIMUM_API_VERSION = (1, 0) -CURRENT_API_VERSION = (1, 4) +CURRENT_API_VERSION = (1, 5) _LOGGING_EXCLUDED_KEYS = ('logs',) @@ -178,14 +178,11 @@ def api_continue(): # TODO(sambetts) Add API discovery for this endpoint -@app.route('/v1/introspection/', methods=['GET', 'POST']) +@app.route('/v1/introspection/', methods=['GET', 'POST']) @convert_exceptions -def api_introspection(uuid): +def api_introspection(node_id): utils.check_auth(flask.request) - if not uuidutils.is_uuid_like(uuid): - raise utils.Error(_('Invalid UUID value'), code=400) - if flask.request.method == 'POST': new_ipmi_password = flask.request.args.get('new_ipmi_password', type=str, @@ -198,34 +195,34 @@ def api_introspection(uuid): else: new_ipmi_credentials = None - introspect.introspect(uuid, + introspect.introspect(node_id, new_ipmi_credentials=new_ipmi_credentials, token=flask.request.headers.get('X-Auth-Token')) return '', 202 else: - node_info = node_cache.get_node(uuid) + node_info = node_cache.get_node(node_id) return flask.json.jsonify(finished=bool(node_info.finished_at), error=node_info.error or None) -@app.route('/v1/introspection//abort', methods=['POST']) +@app.route('/v1/introspection//abort', methods=['POST']) @convert_exceptions -def api_introspection_abort(uuid): +def api_introspection_abort(node_id): utils.check_auth(flask.request) - - if not uuidutils.is_uuid_like(uuid): - raise utils.Error(_('Invalid UUID value'), code=400) - - introspect.abort(uuid, token=flask.request.headers.get('X-Auth-Token')) + introspect.abort(node_id, token=flask.request.headers.get('X-Auth-Token')) return '', 202 -@app.route('/v1/introspection//data', methods=['GET']) +@app.route('/v1/introspection//data', methods=['GET']) @convert_exceptions -def api_introspection_data(uuid): +def api_introspection_data(node_id): utils.check_auth(flask.request) + if CONF.processing.store_data == 'swift': - res = swift.get_introspection_data(uuid) + if not uuidutils.is_uuid_like(node_id): + node = ir_utils.get_node(node_id, fields=['uuid']) + node_id = node.uuid + res = swift.get_introspection_data(node_id) return res, 200, {'Content-Type': 'application/json'} else: return error_response(_('Inspector is not configured to store data. ' @@ -234,9 +231,9 @@ def api_introspection_data(uuid): code=404) -@app.route('/v1/introspection//data/unprocessed', methods=['POST']) +@app.route('/v1/introspection//data/unprocessed', methods=['POST']) @convert_exceptions -def api_introspection_reapply(uuid): +def api_introspection_reapply(node_id): utils.check_auth(flask.request) if flask.request.content_length: @@ -244,7 +241,7 @@ def api_introspection_reapply(uuid): 'supported yet'), code=400) if CONF.processing.store_data == 'swift': - process.reapply(uuid) + process.reapply(node_id) return '', 202 else: return error_response(_('Inspector is not configured to store' diff --git a/ironic_inspector/node_cache.py b/ironic_inspector/node_cache.py index 67d02dc..e06a084 100644 --- a/ironic_inspector/node_cache.py +++ b/ironic_inspector/node_cache.py @@ -22,6 +22,7 @@ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import excutils +from oslo_utils import uuidutils from sqlalchemy import text from ironic_inspector import db @@ -201,11 +202,11 @@ class NodeInfo(object): self._attributes = None @classmethod - def from_row(cls, row, ironic=None, lock=None): + def from_row(cls, row, ironic=None, lock=None, node=None): """Construct NodeInfo from a database row.""" fields = {key: row[key] for key in ('uuid', 'started_at', 'finished_at', 'error')} - return cls(ironic=ironic, lock=lock, **fields) + return cls(ironic=ironic, lock=lock, node=node, **fields) def invalidate_cache(self): """Clear all cached info, so that it's reloaded next time.""" @@ -218,7 +219,7 @@ class NodeInfo(object): def node(self): """Get Ironic node object associated with the cached node record.""" if self._node is None: - self._node = self.ironic.node.get(self.uuid) + self._node = ir_utils.get_node(self.uuid, ironic=self.ironic) return self._node def create_ports(self, macs): @@ -438,14 +439,21 @@ def _list_node_uuids(): return {x.uuid for x in db.model_query(db.Node.uuid)} -def get_node(uuid, ironic=None, locked=False): - """Get node from cache by it's UUID. +def get_node(node_id, ironic=None, locked=False): + """Get node from cache. - :param uuid: node UUID. + :param node_id: node UUID or name. :param ironic: optional ironic client instance :param locked: if True, get a lock on node before fetching its data :returns: structure NodeInfo. """ + if uuidutils.is_uuid_like(node_id): + node = None + uuid = node_id + else: + node = ir_utils.get_node(node_id, ironic=ironic) + uuid = node.uuid + if locked: lock = _get_lock(uuid) lock.acquire() @@ -457,7 +465,7 @@ def get_node(uuid, ironic=None, locked=False): if row is None: raise utils.Error(_('Could not find node %s in cache') % uuid, code=404) - return NodeInfo.from_row(row, ironic=ironic, lock=lock) + return NodeInfo.from_row(row, ironic=ironic, lock=lock, node=node) except Exception: with excutils.save_and_reraise_exception(): if lock is not None: diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index e4ae86a..0cf267b 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -21,7 +21,6 @@ import os import eventlet import json -from ironicclient import exceptions from oslo_config import cfg from oslo_utils import excutils @@ -217,12 +216,10 @@ def process(introspection_data): try: node = node_info.node() - except exceptions.NotFound: - msg = _('Node was found in cache, but is not found in Ironic') - node_info.finished(error=msg) - _store_logs(introspection_data, node_info) - raise utils.Error(msg, code=404, node_info=node_info, - data=introspection_data) + except ir_utils.NotFound as exc: + with excutils.save_and_reraise_exception(): + node_info.finished(error=str(exc)) + _store_logs(introspection_data, node_info) try: result = _process_node(node, introspection_data, node_info) @@ -343,20 +340,20 @@ def _finish(ironic, node_info, introspection_data, power_off=True): node_info=node_info, data=introspection_data) -def reapply(uuid): +def reapply(node_ident): """Re-apply introspection steps. Re-apply preprocessing, postprocessing and introspection rules on stored data. - :param uuid: node uuid to use + :param node_ident: node UUID or name :raises: utils.Error """ LOG.debug('Processing re-apply introspection request for node ' - 'UUID: %s', uuid) - node_info = node_cache.get_node(uuid, locked=False) + 'UUID: %s', node_ident) + node_info = node_cache.get_node(node_ident, locked=False) if not node_info.acquire_lock(blocking=False): # Note (mkovacik): it should be sufficient to check data # presence & locking. If either introspection didn't start diff --git a/ironic_inspector/test/unit/test_introspect.py b/ironic_inspector/test/unit/test_introspect.py index f7a0c25..3a093fa 100644 --- a/ironic_inspector/test/unit/test_introspect.py +++ b/ironic_inspector/test/unit/test_introspect.py @@ -189,12 +189,12 @@ class TestIntrospect(BaseTest): cli = client_mock.return_value cli.node.get.side_effect = exceptions.NotFound() self.assertRaisesRegexp(utils.Error, - 'Cannot find node', + 'Node %s was not found' % self.uuid, introspect.introspect, self.uuid) cli.node.get.side_effect = exceptions.BadRequest() self.assertRaisesRegexp(utils.Error, - 'Cannot get node', + '%s: Bad Request' % self.uuid, introspect.introspect, self.uuid) self.assertEqual(0, self.node_info.ports.call_count) diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index ec3b825..d7b727d 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -104,12 +104,6 @@ class TestApiIntrospect(BaseAPITest): self.assertEqual(403, res.status_code) self.assertFalse(introspect_mock.called) - @mock.patch.object(introspect, 'introspect', autospec=True) - def test_introspect_invalid_uuid(self, introspect_mock): - uuid_dummy = 'invalid-uuid' - res = self.app.post('/v1/introspection/%s' % uuid_dummy) - self.assertEqual(400, res.status_code) - @mock.patch.object(process, 'process', autospec=True) class TestApiContinue(BaseAPITest): @@ -233,6 +227,30 @@ class TestApiGetData(BaseAPITest): self.assertFalse(swift_conn.get_object.called) self.assertEqual(404, res.status_code) + @mock.patch.object(ir_utils, 'get_node', autospec=True) + @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) + def test_with_name(self, swift_mock, get_mock): + get_mock.return_value = mock.Mock(uuid=self.uuid) + CONF.set_override('store_data', 'swift', 'processing') + data = { + 'ipmi_address': '1.2.3.4', + 'cpus': 2, + 'cpu_arch': 'x86_64', + 'memory_mb': 1024, + 'local_gb': 20, + 'interfaces': { + 'em1': {'mac': '11:22:33:44:55:66', 'ip': '1.2.0.1'}, + } + } + swift_conn = swift_mock.return_value + swift_conn.get_object.return_value = json.dumps(data) + res = self.app.get('/v1/introspection/name1/data') + name = 'inspector_data-%s' % self.uuid + swift_conn.get_object.assert_called_once_with(name) + self.assertEqual(200, res.status_code) + self.assertEqual(data, json.loads(res.data.decode('utf-8'))) + get_mock.assert_called_once_with('name1', fields=['uuid']) + @mock.patch.object(process, 'reapply', autospec=True) class TestApiReapply(BaseAPITest): diff --git a/ironic_inspector/test/unit/test_node_cache.py b/ironic_inspector/test/unit/test_node_cache.py index 0c10134..bf48183 100644 --- a/ironic_inspector/test/unit/test_node_cache.py +++ b/ironic_inspector/test/unit/test_node_cache.py @@ -336,7 +336,25 @@ class TestNodeCacheGetNode(test_base.NodeTest): self.assertTrue(info._locked) def test_not_found(self): - self.assertRaises(utils.Error, node_cache.get_node, 'foo') + self.assertRaises(utils.Error, node_cache.get_node, + uuidutils.generate_uuid()) + + def test_with_name(self): + started_at = time.time() - 42 + session = db.get_session() + with session.begin(): + db.Node(uuid=self.uuid, started_at=started_at).save(session) + ironic = mock.Mock() + ironic.node.get.return_value = self.node + + info = node_cache.get_node('name', ironic=ironic) + + self.assertEqual(self.uuid, info.uuid) + self.assertEqual(started_at, info.started_at) + self.assertIsNone(info.finished_at) + self.assertIsNone(info.error) + self.assertFalse(info._locked) + ironic.node.get.assert_called_once_with('name') @mock.patch.object(time, 'time', lambda: 42.0) diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 79ef8c6..0e948ae 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -130,7 +130,7 @@ class TestProcess(BaseProcessTest): self.cli.node.get.side_effect = exceptions.NotFound() self.assertRaisesRegexp(utils.Error, - 'not found', + 'Node %s was not found' % self.uuid, process.process, self.data) self.cli.node.get.assert_called_once_with(self.uuid) self.assertFalse(self.process_mock.called) diff --git a/releasenotes/notes/names-82d9f84153a228ec.yaml b/releasenotes/notes/names-82d9f84153a228ec.yaml new file mode 100644 index 0000000..ffcf468 --- /dev/null +++ b/releasenotes/notes/names-82d9f84153a228ec.yaml @@ -0,0 +1,5 @@ +--- +features: + - Add support for using Ironic node names in API instead of UUIDs. + Note that using node names in the introspection status API will require + a call to Ironic to be made by the service. From a48fc0c00763300e316ac4109319f020fcc03849 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 10 May 2016 00:44:37 +0000 Subject: [PATCH 26/83] Updated from global requirements Change-Id: I899b99e437fdbe30f1b8899931577ff8750a8756 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7bfe968..fe6f250 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,5 +23,5 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 six>=1.9.0 # MIT -stevedore>=1.9.0 # Apache-2.0 +stevedore>=1.10.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From ef8033679639854b1e86f0dbcda3088f96333f80 Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Wed, 11 May 2016 11:25:50 +0100 Subject: [PATCH 27/83] Ensure rules documentation examples are valid JSON Current documentation uses python syntax to explain the structure of the introspection rules, this is misleading as we're actually expecting JSON input on the rules API. This patch converts all rule examples to use JSON syntax to prevent confusion. Change-Id: If442fe5db8484900a5bd688e02d77d5bed69b326 Closes-Bug: #1564238 --- doc/source/usage.rst | 74 +++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/doc/source/usage.rst b/doc/source/usage.rst index b0c946e..23f49d6 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -79,8 +79,8 @@ Starting with the Mitaka release, you can also apply conditions to ironic node field. Prefix field with schema (``data://`` or ``node://``) to distinguish between values from introspection data and node. Both schemes use JSON path:: - {'field': 'node://property.path', 'op': 'eq', 'value': 'val'} - {'field': 'data://introspection.path', 'op': 'eq', 'value': 'val'} + {"field": "node://property.path", "op": "eq", "value": "val"} + {"field": "data://introspection.path", "op": "eq", "value": "val"} if scheme (node or data) is missing, condition compares data with introspection data. @@ -127,8 +127,8 @@ Starting from Mitaka release, ``value`` field in actions supports fetching data from introspection, it's using `python string formatting notation `_ :: - {'action': 'set-attribute', 'path': '/driver_info/ipmi_address', - 'value': '{data[inventory][bmc_address]}'} + {"action": "set-attribute", "path": "/driver_info/ipmi_address", + "value": "{data[inventory][bmc_address]}"} .. _setting-ipmi-creds: @@ -241,43 +241,47 @@ see :ref:`rules`. A rule to set a node's Ironic driver to the ``agent_ipmitool`` driver and populate the required driver_info for that driver would look like:: - "description": "Set IPMI driver_info if no credentials", - "actions": [ - {'action': 'set-attribute', 'path': 'driver', 'value': 'agent_ipmitool'}, - {'action': 'set-attribute', 'path': 'driver_info/ipmi_username', - 'value': 'username'}, - {'action': 'set-attribute', 'path': 'driver_info/ipmi_password', - 'value': 'password'} - ] - "conditions": [ - {'op': 'is-empty', 'field': 'node://driver_info.ipmi_password'}, - {'op': 'is-empty', 'field': 'node://driver_info.ipmi_username'} - ] - - "description": "Set deploy info if not already set on node", - "actions": [ - {'action': 'set-attribute', 'path': 'driver_info/deploy_kernel', - 'value': ''}, - {'action': 'set-attribute', 'path': 'driver_info/deploy_ramdisk', - 'value': ''}, - ] - "conditions": [ - {'op': 'is-empty', 'field': 'node://driver_info.deploy_ramdisk'}, - {'op': 'is-empty', 'field': 'node://driver_info.deploy_kernel'} - ] + [{ + "description": "Set IPMI driver_info if no credentials", + "actions": [ + {"action": "set-attribute", "path": "driver", "value": "agent_ipmitool"}, + {"action": "set-attribute", "path": "driver_info/ipmi_username", + "value": "username"}, + {"action": "set-attribute", "path": "driver_info/ipmi_password", + "value": "password"} + ], + "conditions": [ + {"op": "is-empty", "field": "node://driver_info.ipmi_password"}, + {"op": "is-empty", "field": "node://driver_info.ipmi_username"} + ] + },{ + "description": "Set deploy info if not already set on node", + "actions": [ + {"action": "set-attribute", "path": "driver_info/deploy_kernel", + "value": ""}, + {"action": "set-attribute", "path": "driver_info/deploy_ramdisk", + "value": ""} + ], + "conditions": [ + {"op": "is-empty", "field": "node://driver_info.deploy_ramdisk"}, + {"op": "is-empty", "field": "node://driver_info.deploy_kernel"} + ] + }] All nodes discovered and enrolled via the ``enroll`` hook, will contain an ``auto_discovered`` flag in the introspection data, this flag makes it possible to distinguish between manually enrolled nodes and auto-discovered nodes in the introspection rules using the rule condition ``eq``:: - "description": "Enroll auto-discovered nodes with fake driver", - "actions": [ - {'action': 'set-attribute', 'path': 'driver', 'value': 'fake'} - ] - "conditions": [ - {'op': 'eq', 'field': 'data://auto_discovered', 'value': True} - ] + { + "description": "Enroll auto-discovered nodes with fake driver", + "actions": [ + {"action": "set-attribute", "path": "driver", "value": "fake"} + ], + "conditions": [ + {"op": "eq", "field": "data://auto_discovered", "value": true} + ] + } Reapplying introspection on stored data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From e3a357732bf79519fb1f667eaa86831e72d8aa6e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 19 May 2016 18:42:56 +0000 Subject: [PATCH 28/83] Updated from global requirements Change-Id: I8b85e3a7a76c439f3129b072f0d894b769455c67 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fe6f250..60dc95a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 -oslo.concurrency>=3.5.0 # Apache-2.0 +oslo.concurrency>=3.8.0 # Apache-2.0 oslo.config>=3.9.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 From 80730ac80b70a316b63a3bc9861acda271f56f90 Mon Sep 17 00:00:00 2001 From: Yosef Hoffman Date: Fri, 20 May 2016 11:56:56 -0400 Subject: [PATCH 29/83] Update Introspection API Docs from UUID to Node ID Nodes now accept names in addition to UUIDs, so this updates the docs accordingly. Change-Id: Ifdd3cea73f7c19a8cacdd662e44898300b43f5cf Closes-Bug: #1584116 --- doc/source/http-api.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/http-api.rst b/doc/source/http-api.rst index 103247a..1cfa4a6 100644 --- a/doc/source/http-api.rst +++ b/doc/source/http-api.rst @@ -9,9 +9,9 @@ can be changed in configuration. Protocol is JSON over HTTP. Start Introspection ~~~~~~~~~~~~~~~~~~~ -``POST /v1/introspection/`` initiate hardware introspection for node -````. All power management configuration for this node needs to be done -prior to calling the endpoint (except when :ref:`setting-ipmi-creds`). +``POST /v1/introspection/`` initiate hardware introspection for node +````. All power management configuration for this node needs to be +done prior to calling the endpoint (except when :ref:`setting-ipmi-creds`). Requires X-Auth-Token header with Keystone token for authentication. @@ -36,7 +36,7 @@ Response: Get Introspection Status ~~~~~~~~~~~~~~~~~~~~~~~~ -``GET /v1/introspection/`` get hardware introspection status. +``GET /v1/introspection/`` get hardware introspection status. Requires X-Auth-Token header with Keystone token for authentication. @@ -58,7 +58,7 @@ Response body: JSON dictionary with keys: Abort Running Introspection ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``POST /v1/introspection//abort`` abort running introspection. +``POST /v1/introspection//abort`` abort running introspection. Requires X-Auth-Token header with Keystone token for authentication. @@ -74,7 +74,7 @@ Response: Get Introspection Data ~~~~~~~~~~~~~~~~~~~~~~ -``GET /v1/introspection//data`` get stored data from successful +``GET /v1/introspection//data`` get stored data from successful introspection. Requires X-Auth-Token header with Keystone token for authentication. @@ -96,7 +96,7 @@ Response body: JSON dictionary with introspection data Reapply introspection on stored data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``POST /v1/introspection//data/unprocessed`` to trigger +``POST /v1/introspection//data/unprocessed`` to trigger introspection on stored unprocessed data. No data is allowed to be sent along with the request. @@ -109,7 +109,7 @@ Response: * 202 - accepted * 400 - bad request or store not configured * 401, 403 - missing or invalid authentication -* 404 - node not found for UUID +* 404 - node not found for Node ID * 409 - inspector locked node for processing Introspection Rules From 8fe0344cfd195c6405820ab6e234b0783054e7e0 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 11 May 2016 17:51:09 +0200 Subject: [PATCH 30/83] Always convert the size root device hints to an integer This matches the IPA behaviour and is more user-friendly. Change-Id: Ic7134c338e703361fedbf9917a3484c0bcc9d493 Closes-Bug: #1580664 --- ironic_inspector/plugins/standard.py | 9 +++++++++ ironic_inspector/test/unit/test_plugins_standard.py | 13 +++++++++++++ releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml | 4 ++++ 3 files changed, 26 insertions(+) create mode 100644 releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml diff --git a/ironic_inspector/plugins/standard.py b/ironic_inspector/plugins/standard.py index 8c9befe..83601d7 100644 --- a/ironic_inspector/plugins/standard.py +++ b/ironic_inspector/plugins/standard.py @@ -53,6 +53,15 @@ class RootDiskSelectionHook(base.ProcessingHook): 'as an inspection ramdisk'), node_info=node_info, data=introspection_data) + if 'size' in hints: + # Special case to match IPA behaviour + try: + hints['size'] = int(hints['size']) + except (TypeError, ValueError): + raise utils.Error(_('Invalid root device size hint, expected ' + 'an integer, got %s') % hints['size'], + node_info=node_info, data=introspection_data) + disks = inventory.get('disks', []) if not disks: raise utils.Error(_('No disks found'), diff --git a/ironic_inspector/test/unit/test_plugins_standard.py b/ironic_inspector/test/unit/test_plugins_standard.py index 2892a88..b5f76b3 100644 --- a/ironic_inspector/test/unit/test_plugins_standard.py +++ b/ironic_inspector/test/unit/test_plugins_standard.py @@ -388,6 +388,19 @@ class TestRootDiskSelection(test_base.NodeTest): self.assertNotIn('local_gb', self.data) self.assertNotIn('root_disk', self.data) + def test_size_string(self): + self.node.properties['root_device'] = {'size': '10'} + self.hook.before_update(self.data, self.node_info) + self.assertEqual(self.matched, self.data['root_disk']) + + def test_size_invalid(self): + for bad_size in ('foo', None, {}): + self.node.properties['root_device'] = {'size': bad_size} + self.assertRaisesRegexp(utils.Error, + 'Invalid root device size hint', + self.hook.before_update, + self.data, self.node_info) + class TestRamdiskError(test_base.BaseTest): def setUp(self): diff --git a/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml b/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml new file mode 100644 index 0000000..e75afa3 --- /dev/null +++ b/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - The "size" root device hint is now always converted to an integer for + consistency with IPA. From ace6a5bb6c0bd69c1f364fa29098c3bc2d73d58a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 23 May 2016 11:59:17 +0000 Subject: [PATCH 31/83] Updated from global requirements Change-Id: I8c12481dbb8be26a9d88064451df9437c5f489f2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 60dc95a..d4e93ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ futurist>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT keystoneauth1>=2.1.0 # Apache-2.0 -keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 +keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 From e33d60e8467a1078dd57260b2cf10061e36f42b7 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 26 May 2016 15:33:48 +0300 Subject: [PATCH 32/83] Fix py3 issue in functional tests Change expected result bytes -> string, requests returns result as string, which is unicode in py3. Change-Id: I4fe6792303d9f126173045da038dcaf518a5bcc3 --- ironic_inspector/test/functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index 88241e0..74118e5 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -463,7 +463,7 @@ class Test(Base): res = self.call_reapply(self.uuid) self.assertEqual(202, res.status_code) - self.assertEqual(b'', res.text) + self.assertEqual('', res.text) eventlet.greenthread.sleep(DEFAULT_SLEEP) # reapply request data @@ -491,7 +491,7 @@ class Test(Base): get_mock.return_value = ramdisk_data res = self.call_reapply(self.uuid) self.assertEqual(202, res.status_code) - self.assertEqual(b'', res.text) + self.assertEqual('', res.text) eventlet.greenthread.sleep(DEFAULT_SLEEP) # reapply saves the result From 1d8cfe919a3cbf3e83f8349ea069231d09673add Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 26 May 2016 16:58:50 +0000 Subject: [PATCH 33/83] Updated from global requirements Change-Id: I5434e11d9a9bd9b4caa5008603481a67135b6d1e --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 166089b..47bca46 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ coverage>=3.6 # Apache-2.0 doc8 # Apache-2.0 hacking<0.11,>=0.10.0 -mock>=1.2 # BSD +mock>=2.0 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.6.2 # Apache2 From f66592b4f03dcb0440692afd3ade01b86c74e8e3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 30 May 2016 00:34:05 +0000 Subject: [PATCH 34/83] Updated from global requirements Change-Id: I743db75923339185f3a7d2152dcbccf02c0e8071 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 47bca46..9878825 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,7 +8,7 @@ mock>=2.0 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.6.2 # Apache2 -fixtures<2.0,>=1.3.1 # Apache-2.0/BSD +fixtures>=3.0.0 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=1.10.0 # Apache-2.0 From c2569c3a60167fd6ac24de32f0693150d3fa8db6 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 17 Mar 2016 12:11:30 +0200 Subject: [PATCH 35/83] Tempest: add basic test Change-Id: I7155e797fecf18b867eeb7c63ebbcb35d3cbb9c3 Co-Authored-By: dparalen Depends-On: Ibf0c73aa6795aaa52e945fd6baa821de20a599e7 Depends-On: I067504e49f68929298c91e61819aa9a61169fe52 --- .../test/inspector_tempest_plugin/config.py | 39 +++++ .../scenario/manager.py => exceptions.py} | 17 +- .../test/inspector_tempest_plugin/plugin.py | 11 +- .../rules/basic_ops_rule.json | 25 +++ .../services/introspection_client.py | 70 ++++++++ .../tests/api/__init__.py | 0 .../inspector_tempest_plugin/tests/manager.py | 140 ++++++++++++++++ .../tests/scenario/__init__.py | 0 .../tests/scenario/test_basic.py | 27 ---- .../tests/test_basic.py | 149 ++++++++++++++++++ 10 files changed, 438 insertions(+), 40 deletions(-) rename ironic_inspector/test/inspector_tempest_plugin/{tests/scenario/manager.py => exceptions.py} (61%) create mode 100644 ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json create mode 100644 ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/manager.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py delete mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py create mode 100644 ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py diff --git a/ironic_inspector/test/inspector_tempest_plugin/config.py b/ironic_inspector/test/inspector_tempest_plugin/config.py index 27c2fb3..d41e01e 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/config.py +++ b/ironic_inspector/test/inspector_tempest_plugin/config.py @@ -10,4 +10,43 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg + from tempest import config # noqa + + +baremetal_introspection_group = cfg.OptGroup( + name="baremetal_introspection", + title="Baremetal introspection service options", + help="When enabling baremetal introspection tests," + "Ironic must be configured.") + +BaremetalIntrospectionGroup = [ + cfg.StrOpt('catalog_type', + default='baremetal-introspection', + help="Catalog type of the baremetal provisioning service"), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the baremetal introspection" + " service"), + cfg.IntOpt('introspection_sleep', + default=30, + help="Introspection sleep before check status"), + cfg.IntOpt('introspection_timeout', + default=600, + help="Introspection time out"), + cfg.IntOpt('hypervisor_update_sleep', + default=60, + help="Time to wait until nova becomes aware of " + "bare metal instances"), + cfg.IntOpt('hypervisor_update_timeout', + default=300, + help="Time out for wait until nova becomes aware of " + "bare metal instances"), + cfg.IntOpt('ironic_sync_timeout', + default=60, + help="Time it might take for Ironic--Inspector " + "sync to happen"), +] diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py b/ironic_inspector/test/inspector_tempest_plugin/exceptions.py similarity index 61% rename from ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py rename to ironic_inspector/test/inspector_tempest_plugin/exceptions.py index ad47d25..7791c40 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/manager.py +++ b/ironic_inspector/test/inspector_tempest_plugin/exceptions.py @@ -10,17 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -from tempest.scenario import manager +from tempest import exceptions -class InspectorScenarioTest(manager.BaremetalScenarioTest): - """Provide harness to do Inspector scenario tests.""" +class IntrospectionFailed(exceptions.TempestException): + message = "Introspection failed" - credentials = ['primary', 'admin'] - @classmethod - def setup_clients(cls): - super(InspectorScenarioTest, cls).setup_clients() +class IntrospectionTimeout(exceptions.TempestException): + message = "Introspection time out" - def setUp(self): - super(InspectorScenarioTest, self).setUp() + +class HypervisorUpdateTimeout(exceptions.TempestException): + message = "Hypervisor stats update time out" diff --git a/ironic_inspector/test/inspector_tempest_plugin/plugin.py b/ironic_inspector/test/inspector_tempest_plugin/plugin.py index 5d9a093..06a167f 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/plugin.py +++ b/ironic_inspector/test/inspector_tempest_plugin/plugin.py @@ -13,10 +13,10 @@ import os -from tempest import config # noqa +from tempest import config as tempest_config from tempest.test_discover import plugins -from ironic_inspector.test.inspector_tempest_plugin import config # noqa +from ironic_inspector.test.inspector_tempest_plugin import config class InspectorTempestPlugin(plugins.TempestPlugin): @@ -28,7 +28,10 @@ class InspectorTempestPlugin(plugins.TempestPlugin): return full_test_dir, base_path def register_opts(self, conf): - pass + tempest_config.register_opt_group( + conf, config.baremetal_introspection_group, + config.BaremetalIntrospectionGroup) def get_opt_lists(self): - pass + return [(config.baremetal_introspection_group.name, + config.BaremetalIntrospectionGroup)] diff --git a/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json b/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json new file mode 100644 index 0000000..f1cfb0b --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/rules/basic_ops_rule.json @@ -0,0 +1,25 @@ +[ + { + "description": "Successful Rule", + "conditions": [ + {"op": "ge", "field": "memory_mb", "value": 256}, + {"op": "ge", "field": "local_gb", "value": 1} + ], + "actions": [ + {"action": "set-attribute", "path": "/extra/rule_success", + "value": "yes"} + ] + }, + { + "description": "Failing Rule", + "conditions": [ + {"op": "lt", "field": "memory_mb", "value": 42}, + {"op": "eq", "field": "local_gb", "value": 0} + ], + "actions": [ + {"action": "set-attribute", "path": "/extra/rule_success", + "value": "no"}, + {"action": "fail", "message": "This rule should not have run"} + ] + } +] diff --git a/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py b/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py new file mode 100644 index 0000000..346e06c --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py @@ -0,0 +1,70 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from tempest import clients +from tempest.common import credentials_factory as common_creds +from tempest import config +from tempest.services.baremetal import base + + +CONF = config.CONF +ADMIN_CREDS = common_creds.get_configured_admin_credentials() + + +class Manager(clients.Manager): + def __init__(self, + credentials=ADMIN_CREDS, + service=None, + api_microversions=None): + super(Manager, self).__init__(credentials, service) + self.introspection_client = BaremetalIntrospectionClient( + self.auth_provider, + CONF.baremetal_introspection.catalog_type, + CONF.identity.region, + endpoint_type=CONF.baremetal_introspection.endpoint_type, + **self.default_params_with_timeout_values) + + +class BaremetalIntrospectionClient(base.BaremetalClient): + """Base Tempest REST client for Ironic Inspector API v1.""" + version = '1' + uri_prefix = 'v1' + + @base.handle_errors + def purge_rules(self): + """Purge all existing rules.""" + return self._delete_request('rules', uuid=None) + + @base.handle_errors + def import_rule(self, rule_path): + """Import introspection rules from a json file.""" + with open(rule_path, 'r') as fp: + rules = json.load(fp) + if not isinstance(rules, list): + rules = [rules] + + for rule in rules: + self._create_request('rules', rule) + + @base.handle_errors + def get_status(self, uuid): + """Get introspection status for a node.""" + return self._show_request('introspection', uuid=uuid) + + @base.handle_errors + def get_data(self, uuid): + """Get introspection data for a node.""" + return self._show_request('introspection', uuid=uuid, + uri='/%s/introspection/%s/data' % + (self.uri_prefix, uuid)) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py new file mode 100644 index 0000000..dd2a18e --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import time + +from tempest import config + +from ironic_inspector.test.inspector_tempest_plugin import exceptions +from ironic_inspector.test.inspector_tempest_plugin.services import \ + introspection_client +from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ + BaremetalScenarioTest + + +CONF = config.CONF + + +class InspectorScenarioTest(BaremetalScenarioTest): + """Provide harness to do Inspector scenario tests.""" + + credentials = ['primary', 'admin'] + + @classmethod + def setup_clients(cls): + super(InspectorScenarioTest, cls).setup_clients() + inspector_manager = introspection_client.Manager() + cls.introspection_client = inspector_manager.introspection_client + + def setUp(self): + super(InspectorScenarioTest, self).setUp() + self.flavor = self.baremetal_flavor() + + def item_filter(self, list_method, show_method, + filter=lambda item: True, items=None): + if items is None: + items = [show_method(item['uuid']) for item in + list_method()] + return [item for item in items if filter(item)] + + def node_list(self): + return self.baremetal_client.list_nodes()[1]['nodes'] + + def node_update(self, uuid, patch): + return self.baremetal_client.update_node(uuid, **patch) + + def node_show(self, uuid): + return self.baremetal_client.show_node(uuid)[1] + + def node_filter(self, filter=lambda node: True, nodes=None): + return self.item_filter(self.node_list, self.node_show, + filter=filter, items=nodes) + + def hypervisor_stats(self): + return (self.admin_manager.hypervisor_client. + show_hypervisor_statistics()) + + def server_show(self, uuid): + self.servers_client.show_server(uuid) + + def rule_purge(self): + self.introspection_client.purge_rules() + + def rule_import(self, rule_path): + self.introspection_client.import_rule(rule_path) + + def introspection_status(self, uuid): + return self.introspection_client.get_status(uuid)[1] + + def introspection_data(self, uuid): + return self.introspection_client.get_data(uuid)[1] + + def baremetal_flavor(self): + flavor_id = CONF.compute.flavor_ref + flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] + flavor['properties'] = self.flavors_client.list_flavor_extra_specs( + flavor_id)['extra_specs'] + return flavor + + def get_rule_path(self, rule_file): + base_path = os.path.split( + os.path.dirname(os.path.abspath(__file__)))[0] + base_path = os.path.split(base_path)[0] + return os.path.join(base_path, "inspector_tempest_plugin", + "rules", rule_file) + + # TODO(aarefiev): switch to call_until_true + def wait_for_introspection_finished(self, node_ids): + """Waits for introspection of baremetal nodes to finish. + + """ + start = int(time.time()) + not_introspected = {node_id for node_id in node_ids} + + while not_introspected: + time.sleep(CONF.baremetal_introspection.introspection_sleep) + for node_id in node_ids: + status = self.introspection_status(node_id) + if status['finished']: + if status['error']: + message = ('Node %(node_id)s introspection failed ' + 'with %(error)s.' % + {'node_id': node_id, + 'error': status['error']}) + raise exceptions.IntrospectionFailed(message) + not_introspected = not_introspected - {node_id} + + if (int(time.time()) - start >= + CONF.baremetal_introspection.introspection_timeout): + message = ('Introspection timed out for nodes: %s' % + not_introspected) + raise exceptions.IntrospectionTimeout(message) + + def wait_for_nova_aware_of_bvms(self): + start = int(time.time()) + while True: + time.sleep(CONF.baremetal_introspection.hypervisor_update_sleep) + stats = self.hypervisor_stats() + expected_cpus = self.baremetal_flavor()['vcpus'] + if int(stats['hypervisor_statistics']['vcpus']) >= expected_cpus: + break + + timeout = CONF.baremetal_introspection.hypervisor_update_timeout + if (int(time.time()) - start >= timeout): + message = ( + 'Timeout while waiting for nova hypervisor-stats: ' + '%(stats)s required time (%(timeout)s s).' % + {'stats': stats, + 'timeout': timeout}) + raise exceptions.HypervisorUpdateTimeout(message) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py deleted file mode 100644 index 9b34d33..0000000 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/scenario/test_basic.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import test # noqa - -from ironic_inspector.test.inspector_tempest_plugin.tests.scenario \ - import manager - - -class InspectorBasicTest(manager.InspectorScenarioTest): - @test.idempotent_id('03bf7990-bee0-4dd7-bf74-b97ad7b52a4b') - @test.services('baremetal', 'compute', 'image', - 'network', 'object_storage') - def test_berametal_introspection_ops(self): - """This smoke test case follows this basic set of operations: - - """ - pass diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py new file mode 100644 index 0000000..6830b78 --- /dev/null +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py @@ -0,0 +1,149 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempest + +from tempest.config import CONF +from tempest import test # noqa + +from ironic_inspector.test.inspector_tempest_plugin.tests import manager +from ironic_tempest_plugin.tests.api.admin.api_microversion_fixture import \ + APIMicroversionFixture as IronicMicroversionFixture +from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ + BaremetalProvisionStates +from tempest.lib.common.api_version_utils import LATEST_MICROVERSION + + +class InspectorBasicTest(manager.InspectorScenarioTest): + wait_provisioning_state_interval = 15 + + def node_cleanup(self, node_id): + if (self.node_show(node_id)['provision_state'] == + BaremetalProvisionStates.AVAILABLE): + return + try: + self.baremetal_client.set_node_provision_state(node_id, 'provide') + except tempest.lib.exceptions.RestClientException: + # maybe node already cleaning or available + pass + + self.wait_provisioning_state( + node_id, [BaremetalProvisionStates.AVAILABLE, + BaremetalProvisionStates.NOSTATE], + timeout=CONF.baremetal.unprovision_timeout, + interval=self.wait_provisioning_state_interval) + + def introspect_node(self, node_id): + # in case there are properties remove those + patch = {('properties/%s' % key): None for key in + self.node_show(node_id)['properties']} + # reset any previous rule result + patch['extra/rule_success'] = None + self.node_update(node_id, patch) + + self.baremetal_client.set_node_provision_state(node_id, 'manage') + self.baremetal_client.set_node_provision_state(node_id, 'inspect') + self.addCleanup(self.node_cleanup, node_id) + + def setUp(self): + super(InspectorBasicTest, self).setUp() + # we rely on the 'available' provision_state; using latest + # microversion + self.useFixture(IronicMicroversionFixture(LATEST_MICROVERSION)) + # avoid testing nodes that aren't available + self.node_ids = {node['uuid'] for node in + self.node_filter(filter=lambda node: + node['provision_state'] == + BaremetalProvisionStates.AVAILABLE)} + if not self.node_ids: + self.skipTest('no available nodes detected') + self.rule_purge() + + def verify_node_introspection_data(self, node): + self.assertEqual('yes', node['extra']['rule_success']) + data = self.introspection_data(node['uuid']) + self.assertEqual(data['cpu_arch'], + self.flavor['properties']['cpu_arch']) + self.assertEqual(int(data['memory_mb']), + int(self.flavor['ram'])) + self.assertEqual(int(data['cpus']), int(self.flavor['vcpus'])) + + def verify_node_flavor(self, node): + expected_cpus = self.flavor['vcpus'] + expected_memory_mb = self.flavor['ram'] + expected_cpu_arch = self.flavor['properties']['cpu_arch'] + disk_size = self.flavor['disk'] + ephemeral_size = self.flavor['OS-FLV-EXT-DATA:ephemeral'] + expected_local_gb = disk_size + ephemeral_size + + self.assertEqual(expected_cpus, + int(node['properties']['cpus'])) + self.assertEqual(expected_memory_mb, + int(node['properties']['memory_mb'])) + self.assertEqual(expected_local_gb, + int(node['properties']['local_gb'])) + self.assertEqual(expected_cpu_arch, + node['properties']['cpu_arch']) + + @test.idempotent_id('03bf7990-bee0-4dd7-bf74-b97ad7b52a4b') + @test.services('baremetal', 'compute', 'image', + 'network', 'object_storage') + def test_baremetal_introspection(self): + """This smoke test case follows this basic set of operations: + + * Fetches expected properties from baremetal flavor + * Removes all properties from nodes + * Sets nodes to manageable state + * Imports introspection rule basic_ops_rule.json + * Inspects nodes + * Verifies all properties are inspected + * Verifies introspection data + * Sets node to available state + * Creates a keypair + * Boots an instance using the keypair + * Deletes the instance + + """ + # prepare introspection rule + rule_path = self.get_rule_path("basic_ops_rule.json") + self.rule_import(rule_path) + self.addCleanup(self.rule_purge) + + for node_id in self.node_ids: + self.introspect_node(node_id) + + # settle down introspection + self.wait_for_introspection_finished(self.node_ids) + for node_id in self.node_ids: + self.wait_provisioning_state( + node_id, 'manageable', + timeout=CONF.baremetal_introspection.ironic_sync_timeout, + interval=self.wait_provisioning_state_interval) + + for node_id in self.node_ids: + node = self.node_show(node_id) + self.verify_node_introspection_data(node) + self.verify_node_flavor(node) + + for node_id in self.node_ids: + self.baremetal_client.set_node_provision_state(node_id, 'provide') + + for node_id in self.node_ids: + self.wait_provisioning_state( + node_id, BaremetalProvisionStates.AVAILABLE, + timeout=CONF.baremetal.active_timeout, + interval=self.wait_provisioning_state_interval) + + self.wait_for_nova_aware_of_bvms() + self.add_keypair() + self.boot_instance() + self.terminate_instance() From 1369235bf9f5064fc4d787a62b419802679fac17 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 31 May 2016 03:01:00 +0000 Subject: [PATCH 36/83] Updated from global requirements Change-Id: I0dd3331fd38d2fbd1a865e3899130fbd9dd578b7 --- requirements.txt | 2 +- test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index d4e93ac..8d257e7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.utils>=3.5.0 # Apache-2.0 +oslo.utils>=3.9.0 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.10.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT diff --git a/test-requirements.txt b/test-requirements.txt index 9878825..47bca46 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,7 +8,7 @@ mock>=2.0 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.6.2 # Apache2 -fixtures>=3.0.0 # Apache-2.0/BSD +fixtures<2.0,>=1.3.1 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=1.10.0 # Apache-2.0 From c13cbd3643becb49ce9b964b2164a5fd3c3c3cb5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 1 Jun 2016 19:01:10 +0000 Subject: [PATCH 37/83] Updated from global requirements Change-Id: Id3494379f96fb9b67ae47f35e6bcccaae02e8c1b --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8d257e7..68887ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ alembic>=0.8.4 # MIT Babel>=2.3.4 # BSD eventlet!=0.18.3,>=0.18.2 # MIT -Flask<1.0,>=0.10 # BSD +Flask!=0.11,<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT @@ -21,7 +21,7 @@ oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.utils>=3.9.0 # Apache-2.0 +oslo.utils>=3.11.0 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.10.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From 52991a1b4331767bd179765d7c67a1e9110b27b3 Mon Sep 17 00:00:00 2001 From: JinLi Date: Wed, 27 Apr 2016 22:34:02 +0000 Subject: [PATCH 38/83] Remove iterated form of side effects to simplify code remove iterated form of side effects from some test cases that are simulating single exception/error Change-Id: I5e9cb760587a48d8bbe059191f3605f6ed547a44 Closes-Bug: #1564392 --- ironic_inspector/test/unit/test_introspect.py | 6 +++--- ironic_inspector/test/unit/test_main.py | 16 ++++++++-------- ironic_inspector/test/unit/test_process.py | 1 - 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/ironic_inspector/test/unit/test_introspect.py b/ironic_inspector/test/unit/test_introspect.py index 3a093fa..bf752c7 100644 --- a/ironic_inspector/test/unit/test_introspect.py +++ b/ironic_inspector/test/unit/test_introspect.py @@ -444,7 +444,7 @@ class TestAbort(BaseTest): def test_node_not_found(self, client_mock, get_mock, filters_mock): cli = self._prepare(client_mock) exc = utils.Error('Not found.', code=404) - get_mock.side_effect = iter([exc]) + get_mock.side_effect = exc self.assertRaisesRegexp(utils.Error, str(exc), introspect.abort, self.uuid) @@ -487,7 +487,7 @@ class TestAbort(BaseTest): self.node_info.acquire_lock.return_value = True self.node_info.started_at = time.time() self.node_info.finished_at = None - filters_mock.side_effect = iter([Exception('Boom')]) + filters_mock.side_effect = Exception('Boom') introspect.abort(self.uuid) @@ -506,7 +506,7 @@ class TestAbort(BaseTest): self.node_info.acquire_lock.return_value = True self.node_info.started_at = time.time() self.node_info.finished_at = None - cli.node.set_power_state.side_effect = iter([Exception('BadaBoom')]) + cli.node.set_power_state.side_effect = Exception('BadaBoom') introspect.abort(self.uuid) diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index d7b727d..daa77d7 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -82,7 +82,7 @@ class TestApiIntrospect(BaseAPITest): @mock.patch.object(introspect, 'introspect', autospec=True) def test_intospect_failed(self, introspect_mock): - introspect_mock.side_effect = iter([utils.Error("boom")]) + introspect_mock.side_effect = utils.Error("boom") res = self.app.post('/v1/introspection/%s' % self.uuid) self.assertEqual(400, res.status_code) self.assertEqual( @@ -98,7 +98,7 @@ class TestApiIntrospect(BaseAPITest): def test_introspect_failed_authentication(self, introspect_mock, auth_mock): CONF.set_override('auth_strategy', 'keystone') - auth_mock.side_effect = iter([utils.Error('Boom', code=403)]) + auth_mock.side_effect = utils.Error('Boom', code=403) res = self.app.post('/v1/introspection/%s' % self.uuid, headers={'X-Auth-Token': 'token'}) self.assertEqual(403, res.status_code) @@ -117,7 +117,7 @@ class TestApiContinue(BaseAPITest): self.assertEqual({"result": 42}, json.loads(res.data.decode())) def test_continue_failed(self, process_mock): - process_mock.side_effect = iter([utils.Error("boom")]) + process_mock.side_effect = utils.Error("boom") res = self.app.post('/v1/continue', data='{"foo": "bar"}') self.assertEqual(400, res.status_code) process_mock.assert_called_once_with({"foo": "bar"}) @@ -154,7 +154,7 @@ class TestApiAbort(BaseAPITest): def test_node_not_found(self, abort_mock): exc = utils.Error("Not Found.", code=404) - abort_mock.side_effect = iter([exc]) + abort_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/abort' % self.uuid) @@ -165,7 +165,7 @@ class TestApiAbort(BaseAPITest): def test_abort_failed(self, abort_mock): exc = utils.Error("Locked.", code=409) - abort_mock.side_effect = iter([exc]) + abort_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/abort' % self.uuid) @@ -411,7 +411,7 @@ class TestApiRules(BaseAPITest): class TestApiMisc(BaseAPITest): @mock.patch.object(node_cache, 'get_node', autospec=True) def test_404_expected(self, get_mock): - get_mock.side_effect = iter([utils.Error('boom', code=404)]) + get_mock.side_effect = utils.Error('boom', code=404) res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(404, res.status_code) self.assertEqual('boom', _get_error(res)) @@ -424,7 +424,7 @@ class TestApiMisc(BaseAPITest): @mock.patch.object(node_cache, 'get_node', autospec=True) def test_500_with_debug(self, get_mock): CONF.set_override('debug', True) - get_mock.side_effect = iter([RuntimeError('boom')]) + get_mock.side_effect = RuntimeError('boom') res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(500, res.status_code) self.assertEqual('Internal server error (RuntimeError): boom', @@ -433,7 +433,7 @@ class TestApiMisc(BaseAPITest): @mock.patch.object(node_cache, 'get_node', autospec=True) def test_500_without_debug(self, get_mock): CONF.set_override('debug', False) - get_mock.side_effect = iter([RuntimeError('boom')]) + get_mock.side_effect = RuntimeError('boom') res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(500, res.status_code) self.assertEqual('Internal server error', diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 0e948ae..c9ff752 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -119,7 +119,6 @@ class TestProcess(BaseProcessTest): def test_not_found_in_cache(self): self.find_mock.side_effect = utils.Error('not found') - self.assertRaisesRegexp(utils.Error, 'not found', process.process, self.data) From 6e39b637e8e2e1565baaf968a2ede2b513c5adc1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 3 Jun 2016 18:13:18 +0000 Subject: [PATCH 39/83] Updated from global requirements Change-Id: I74be32fcb5fb3ca7c70149b68dd6f6c02a08518a --- requirements.txt | 2 +- test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 68887ad..ef9ebe7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.9.0 # Apache-2.0 +oslo.config>=3.10.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 47bca46..9878825 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,7 +8,7 @@ mock>=2.0 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.6.2 # Apache2 -fixtures<2.0,>=1.3.1 # Apache-2.0/BSD +fixtures>=3.0.0 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=1.10.0 # Apache-2.0 From 0b58e31e3e5466cbed7a4fbae5827cd227b0f47f Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 20 May 2016 14:35:19 +0200 Subject: [PATCH 40/83] Remove support for the old bash ramdisk From now on only rely on the IPA inventory and 2 additional fields: boot_interface and root_device. Also updated unit tests to use one inventory example. Also removed duplicating unit tests and checks in test_process. Also removed devstack support for the old ramdisk. Change-Id: Ib382328295fc2c1b9143171b1047304febadcaca --- CONTRIBUTING.rst | 50 ---- devstack/plugin.sh | 46 ++-- doc/source/http-api.rst | 16 -- doc/source/install.rst | 37 +-- ironic_inspector/plugins/standard.py | 118 ++++----- ironic_inspector/process.py | 6 +- ironic_inspector/test/base.py | 61 ++++- ironic_inspector/test/functional.py | 72 ------ .../test/unit/test_plugins_standard.py | 236 ++++++------------ ironic_inspector/test/unit/test_process.py | 60 +---- ironic_inspector/utils.py | 19 ++ .../no-old-ramdisk-095b05e1245131d8.yaml | 7 + 12 files changed, 226 insertions(+), 502 deletions(-) create mode 100644 releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f7f7980..9255e3d 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -86,9 +86,6 @@ components. There is a plugin for installing **ironic-inspector** on DevStack. Example local.conf ------------------ -Using IPA -~~~~~~~~~ - :: [[local|localrc]] @@ -108,7 +105,6 @@ Using IPA IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True IRONIC_BUILD_DEPLOY_RAMDISK=False - IRONIC_INSPECTOR_RAMDISK_ELEMENT=ironic-agent IRONIC_INSPECTOR_BUILD_RAMDISK=False VIRT_DRIVER=ironic @@ -136,52 +132,6 @@ Notes * This configuration disables Heat and Cinder, adjust it if you need these services. -Using simple ramdisk -~~~~~~~~~~~~~~~~~~~~ - -.. note:: - This ramdisk is deprecated and should not be used. - -:: - - [[local|localrc]] - enable_service ironic ir-api ir-cond - disable_service n-net n-novnc - enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta - enable_service s-proxy s-object s-container s-account - disable_service heat h-api h-api-cfn h-api-cw h-eng - disable_service cinder c-sch c-api c-vol - - enable_plugin ironic https://github.com/openstack/ironic - enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector - - IRONIC_BAREMETAL_BASIC_OPS=True - IRONIC_VM_COUNT=2 - IRONIC_VM_SPECS_RAM=1024 - IRONIC_DEPLOY_FLAVOR="fedora deploy-ironic" - - IRONIC_INSPECTOR_RAMDISK_FLAVOR="fedora ironic-discoverd-ramdisk" - - VIRT_DRIVER=ironic - - LOGDAYS=1 - LOGFILE=~/logs/stack.sh.log - SCREEN_LOGDIR=~/logs/screen - - DEFAULT_INSTANCE_TYPE=baremetal - TEMPEST_ALLOW_TENANT_ISOLATION=False - -Notes ------ - -* Replace "fedora" with whatever you have - -* You need at least 1G of RAM for VMs, default value of 512 MB won't work - -* Before restarting stack.sh:: - - rm -rf /opt/stack/ironic-inspector - Test ---- diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 39d595e..4eaeaa4 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -18,8 +18,6 @@ IRONIC_INSPECTOR_URI="http://$IRONIC_INSPECTOR_HOST:$IRONIC_INSPECTOR_PORT" IRONIC_INSPECTOR_BUILD_RAMDISK=$(trueorfalse False IRONIC_INSPECTOR_BUILD_RAMDISK) IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz} IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} -IRONIC_INSPECTOR_RAMDISK_ELEMENT=${IRONIC_INSPECTOR_RAMDISK_ELEMENT:-ironic-discoverd-ramdisk} -IRONIC_INSPECTOR_RAMDISK_FLAVOR=${IRONIC_INSPECTOR_RAMDISK_FLAVOR:-fedora $IRONIC_INSPECTOR_RAMDISK_ELEMENT} IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs} IRONIC_INSPECTOR_RAMDISK_LOGDIR=${IRONIC_INSPECTOR_RAMDISK_LOGDIR:-$IRONIC_INSPECTOR_DATA_DIR/ramdisk-logs} IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS=${IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS:-True} @@ -91,11 +89,6 @@ function stop_inspector_dhcp { screen -S $SCREEN_NAME -p ironic-inspector-dhcp -X kill } -function inspector_uses_ipa { - [[ $IRONIC_INSPECTOR_RAMDISK_ELEMENT = "ironic-agent" ]] || [[ $IRONIC_INSPECTOR_RAMDISK_FLAVOR =~ (ironic-agent$|^ironic-agent) ]] && return 0 - return 1 -} - ### Configuration function prepare_tftp { @@ -104,35 +97,24 @@ function prepare_tftp { IRONIC_INSPECTOR_INITRAMFS_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.initramfs" IRONIC_INSPECTOR_CALLBACK_URI="$IRONIC_INSPECTOR_INTERNAL_URI/v1/continue" - if inspector_uses_ipa; then - IRONIC_INSPECTOR_KERNEL_CMDLINE="ipa-inspection-callback-url=$IRONIC_INSPECTOR_CALLBACK_URI systemd.journald.forward_to_console=yes" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE vga=normal console=tty0 console=ttyS0" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-inspection-collectors=$IRONIC_INSPECTOR_COLLECTORS" - IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-debug=1" - if [[ "$IRONIC_INSPECTOR_BUILD_RAMDISK" == "True" ]]; then - if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - build_ipa_coreos_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" - fi - else - # download the agent image tarball - if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - if [ -e "$IRONIC_DEPLOY_KERNEL_PATH" -a -e "$IRONIC_DEPLOY_RAMDISK_PATH" ]; then - cp $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_INSPECTOR_KERNEL_PATH - cp $IRONIC_DEPLOY_RAMDISK_PATH $IRONIC_INSPECTOR_INITRAMFS_PATH - else - wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_INSPECTOR_KERNEL_PATH - wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_INSPECTOR_INITRAMFS_PATH - fi - fi + IRONIC_INSPECTOR_KERNEL_CMDLINE="ipa-inspection-callback-url=$IRONIC_INSPECTOR_CALLBACK_URI systemd.journald.forward_to_console=yes" + IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE vga=normal console=tty0 console=ttyS0" + IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-inspection-collectors=$IRONIC_INSPECTOR_COLLECTORS" + IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-debug=1" + if [[ "$IRONIC_INSPECTOR_BUILD_RAMDISK" == "True" ]]; then + if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then + build_ipa_coreos_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" fi else - IRONIC_INSPECTOR_KERNEL_CMDLINE="discoverd_callback_url=$IRONIC_INSPECTOR_CALLBACK_URI inspector_callback_url=$IRONIC_INSPECTOR_CALLBACK_URI" + # download the agent image tarball if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - if [[ $(type -P ramdisk-image-create) == "" ]]; then - pip_install diskimage_builder + if [ -e "$IRONIC_DEPLOY_KERNEL_PATH" -a -e "$IRONIC_DEPLOY_RAMDISK_PATH" ]; then + cp $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_INSPECTOR_KERNEL_PATH + cp $IRONIC_DEPLOY_RAMDISK_PATH $IRONIC_INSPECTOR_INITRAMFS_PATH + else + wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_INSPECTOR_KERNEL_PATH + wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_INSPECTOR_INITRAMFS_PATH fi - ramdisk-image-create $IRONIC_INSPECTOR_RAMDISK_FLAVOR \ - -o $IRONIC_INSPECTOR_IMAGE_PATH fi fi diff --git a/doc/source/http-api.rst b/doc/source/http-api.rst index 1cfa4a6..aeff7ae 100644 --- a/doc/source/http-api.rst +++ b/doc/source/http-api.rst @@ -217,22 +217,6 @@ Optionally the following keys might be provided: * ``logs`` base64-encoded logs from the ramdisk. -The following keys are supported for backward compatibility with the old -bash-based ramdisk, when ``inventory`` is not provided: - -* ``cpus`` number of CPU - -* ``cpu_arch`` architecture of the CPU - -* ``memory_mb`` RAM in MiB - -* ``local_gb`` hard drive size in GiB - -* ``ipmi_address`` IP address of BMC, may be missing on VM - -* ``block_devices`` block devices information for the ``raid_device`` plugin, - dictionary with one key: ``serials`` list of serial numbers of block devices. - .. note:: This list highly depends on enabled plugins, provided above are expected keys for the default set of plugins. See :ref:`plugins` diff --git a/doc/source/install.rst b/doc/source/install.rst index 53dc691..0ea3a03 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -132,9 +132,8 @@ As for PXE boot environment, you'll need: simultaneously cause conflicts - the same IP address is suggested to several nodes. -* You have to install and configure one of 2 available ramdisks: simple - bash-based (see `Using simple ramdisk`_) or more complex based on - ironic-python-agent_ (See `Using IPA`_). +* You have to install and configure the ramdisk to be run on target machines - + see `Configuring IPA`_. Here is *inspector.conf* you may end up with:: @@ -152,8 +151,8 @@ Here is *inspector.conf* you may end up with:: .. note:: Set ``debug = true`` if you want to see complete logs. -Using IPA -^^^^^^^^^ +Configuring IPA +^^^^^^^^^^^^^^^ ironic-python-agent_ is a ramdisk developed for Ironic. During the Liberty cycle support for **ironic-inspector** was added. This is the default ramdisk @@ -215,34 +214,6 @@ This ramdisk is pluggable: you can insert introspection plugins called .. _diskimage-builder: https://github.com/openstack/diskimage-builder .. _ironic-python-agent: https://github.com/openstack/ironic-python-agent -Using simple ramdisk -^^^^^^^^^^^^^^^^^^^^ - -This ramdisk is deprecated, its use is not recommended. - -* Build and put into your TFTP the kernel and ramdisk created using the - diskimage-builder_ `ironic-discoverd-ramdisk element`_:: - - ramdisk-image-create -o discovery fedora ironic-discoverd-ramdisk - - You need diskimage-builder_ 0.1.38 or newer to do it (using the latest one - is always advised). - -* Configure your ``$TFTPROOT/pxelinux.cfg/default`` with something like:: - - default introspect - - label introspect - kernel discovery.kernel - append initrd=discovery.initramfs discoverd_callback_url=http://{IP}:5050/v1/continue - - ipappend 3 - - Replace ``{IP}`` with IP of the machine (do not use loopback interface, it - will be accessed by ramdisk on a booting machine). - -.. _ironic-discoverd-ramdisk element: https://github.com/openstack/diskimage-builder/tree/master/elements/ironic-discoverd-ramdisk - Managing the **ironic-inspector** database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/ironic_inspector/plugins/standard.py b/ironic_inspector/plugins/standard.py index 83601d7..8cea00f 100644 --- a/ironic_inspector/plugins/standard.py +++ b/ironic_inspector/plugins/standard.py @@ -46,13 +46,6 @@ class RootDiskSelectionHook(base.ProcessingHook): node_info=node_info, data=introspection_data) return - inventory = introspection_data.get('inventory') - if not inventory: - raise utils.Error( - _('Root device selection requires ironic-python-agent ' - 'as an inspection ramdisk'), - node_info=node_info, data=introspection_data) - if 'size' in hints: # Special case to match IPA behaviour try: @@ -62,12 +55,9 @@ class RootDiskSelectionHook(base.ProcessingHook): 'an integer, got %s') % hints['size'], node_info=node_info, data=introspection_data) - disks = inventory.get('disks', []) - if not disks: - raise utils.Error(_('No disks found'), - node_info=node_info, data=introspection_data) - - for disk in disks: + inventory = utils.get_inventory(introspection_data, + node_info=node_info) + for disk in inventory['disks']: properties = disk.copy() # Root device hints are in GiB, data from IPA is in bytes properties['size'] //= units.Gi @@ -100,7 +90,8 @@ class SchedulerHook(base.ProcessingHook): def before_update(self, introspection_data, node_info, **kwargs): """Update node with scheduler properties.""" - inventory = introspection_data.get('inventory') + inventory = utils.get_inventory(introspection_data, + node_info=node_info) errors = [] root_disk = introspection_data.get('root_disk') @@ -108,40 +99,25 @@ class SchedulerHook(base.ProcessingHook): introspection_data['local_gb'] = root_disk['size'] // units.Gi if CONF.processing.disk_partitioning_spacing: introspection_data['local_gb'] -= 1 - elif inventory: + else: errors.append(_('root disk is not supplied by the ramdisk and ' 'root_disk_selection hook is not enabled')) - if inventory: - try: - introspection_data['cpus'] = int(inventory['cpu']['count']) - introspection_data['cpu_arch'] = six.text_type( - inventory['cpu']['architecture']) - except (KeyError, ValueError, TypeError): - errors.append(_('malformed or missing CPU information: %s') % - inventory.get('cpu')) + try: + introspection_data['cpus'] = int(inventory['cpu']['count']) + introspection_data['cpu_arch'] = six.text_type( + inventory['cpu']['architecture']) + except (KeyError, ValueError, TypeError): + errors.append(_('malformed or missing CPU information: %s') % + inventory.get('cpu')) - try: - introspection_data['memory_mb'] = int( - inventory['memory']['physical_mb']) - except (KeyError, ValueError, TypeError): - errors.append(_('malformed or missing memory information: %s; ' - 'introspection requires physical memory size ' - 'from dmidecode') % - inventory.get('memory')) - else: - LOG.warning(_LW('No inventory provided: using old bash ramdisk ' - 'is deprecated, please switch to ' - 'ironic-python-agent'), - node_info=node_info, data=introspection_data) - - missing = [key for key in self.KEYS - if not introspection_data.get(key)] - if missing: - raise utils.Error( - _('The following required parameters are missing: %s') % - missing, - node_info=node_info, data=introspection_data) + try: + introspection_data['memory_mb'] = int( + inventory['memory']['physical_mb']) + except (KeyError, ValueError, TypeError): + errors.append(_('malformed or missing memory information: %s; ' + 'introspection requires physical memory size ' + 'from dmidecode') % inventory.get('memory')) if errors: raise utils.Error(_('The following problems encountered: %s') % @@ -184,42 +160,36 @@ class ValidateInterfacesHook(base.ProcessingHook): :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} - inventory = data.get('inventory', {}) + inventory = utils.get_inventory(data) - if inventory: - for iface in inventory.get('interfaces', ()): - name = iface.get('name') - mac = iface.get('mac_address') - ip = iface.get('ipv4_address') + for iface in inventory['interfaces']: + name = iface.get('name') + mac = iface.get('mac_address') + ip = iface.get('ipv4_address') - if not name: - LOG.error(_LE('Malformed interface record: %s'), - iface, data=data) - continue + if not name: + LOG.error(_LE('Malformed interface record: %s'), + iface, data=data) + continue - if not mac: - LOG.debug('Skipping interface %s without link information', - name, data=data) - continue + if not mac: + LOG.debug('Skipping interface %s without link information', + name, data=data) + continue - if not utils.is_valid_mac(mac): - LOG.warning(_LW('MAC %(mac)s for interface %(name)s is ' - 'not valid, skipping'), - {'mac': mac, 'name': name}, - data=data) - continue + if not utils.is_valid_mac(mac): + LOG.warning(_LW('MAC %(mac)s for interface %(name)s is ' + 'not valid, skipping'), + {'mac': mac, 'name': name}, + data=data) + continue - mac = mac.lower() + mac = mac.lower() - LOG.debug('Found interface %(name)s with MAC "%(mac)s" and ' - 'IP address "%(ip)s"', - {'name': name, 'mac': mac, 'ip': ip}, data=data) - result[name] = {'ip': ip, 'mac': mac} - else: - LOG.warning(_LW('No inventory provided: using old bash ramdisk ' - 'is deprecated, please switch to ' - 'ironic-python-agent'), data=data) - result = data.get('interfaces') + LOG.debug('Found interface %(name)s with MAC "%(mac)s" and ' + 'IP address "%(ip)s"', + {'name': name, 'mac': mac, 'ip': ip}, data=data) + result[name] = {'ip': ip, 'mac': mac} return result diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 0cf267b..325ed82 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -286,10 +286,10 @@ def _finish_set_ipmi_credentials(ironic, node, node_info, introspection_data, 'value': new_username}, {'op': 'add', 'path': '/driver_info/ipmi_password', 'value': new_password}] - if (not ir_utils.get_ipmi_address(node) and - introspection_data.get('ipmi_address')): + new_ipmi_address = utils.get_ipmi_address_from_data(introspection_data) + if not ir_utils.get_ipmi_address(node) and new_ipmi_address: patch.append({'op': 'add', 'path': '/driver_info/ipmi_address', - 'value': introspection_data['ipmi_address']}) + 'value': new_ipmi_address}) node_info.patch(patch) for attempt in range(_CREDENTIALS_WAIT_RETRIES): diff --git a/ironic_inspector/test/base.py b/ironic_inspector/test/base.py index 04dc031..9a6bf7a 100644 --- a/ironic_inspector/test/base.py +++ b/ironic_inspector/test/base.py @@ -19,6 +19,7 @@ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log import log +from oslo_utils import units from oslo_utils import uuidutils from ironic_inspector.common import i18n @@ -78,12 +79,66 @@ class BaseTest(fixtures.TestWithFixtures): self.assertPatchEqual(actual, expected) -class NodeTest(BaseTest): +class InventoryTest(BaseTest): + def setUp(self): + super(InventoryTest, self).setUp() + # Prepare some realistic inventory + # https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa + self.bmc_address = '1.2.3.4' + self.macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] + self.ips = ['1.2.1.2', '1.2.1.1'] + self.inactive_mac = '12:12:21:12:21:12' + self.pxe_mac = self.macs[0] + self.all_macs = self.macs + [self.inactive_mac] + self.pxe_iface_name = 'eth1' + self.data = { + 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), + 'inventory': { + 'interfaces': [ + {'name': 'eth1', 'mac_address': self.macs[0], + 'ipv4_address': self.ips[0]}, + {'name': 'eth2', 'mac_address': self.inactive_mac}, + {'name': 'eth3', 'mac_address': self.macs[1], + 'ipv4_address': self.ips[1]}, + ], + 'disks': [ + {'name': '/dev/sda', 'model': 'Big Data Disk', + 'size': 1000 * units.Gi}, + {'name': '/dev/sdb', 'model': 'Small OS Disk', + 'size': 20 * units.Gi}, + ], + 'cpu': { + 'count': 4, + 'architecture': 'x86_64' + }, + 'memory': { + 'physical_mb': 12288 + }, + 'bmc_address': self.bmc_address + }, + 'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk', + 'size': 1000 * units.Gi, + 'wwn': None}, + } + self.inventory = self.data['inventory'] + self.all_interfaces = { + 'eth1': {'mac': self.macs[0], 'ip': self.ips[0]}, + 'eth2': {'mac': self.inactive_mac, 'ip': None}, + 'eth3': {'mac': self.macs[1], 'ip': self.ips[1]} + } + self.active_interfaces = { + 'eth1': {'mac': self.macs[0], 'ip': self.ips[0]}, + 'eth3': {'mac': self.macs[1], 'ip': self.ips[1]} + } + self.pxe_interfaces = { + self.pxe_iface_name: self.all_interfaces[self.pxe_iface_name] + } + + +class NodeTest(InventoryTest): def setUp(self): super(NodeTest, self).setUp() self.uuid = uuidutils.generate_uuid() - self.bmc_address = '1.2.3.4' - self.macs = ['11:22:33:44:55:66', '66:55:44:33:22:11'] fake_node = { 'driver': 'pxe_ipmitool', 'driver_info': {'ipmi_address': self.bmc_address}, diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index 88241e0..404799e 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -24,7 +24,6 @@ import unittest import mock from oslo_config import cfg -from oslo_utils import units import requests from ironic_inspector.common import ironic as ir_utils @@ -70,62 +69,12 @@ class Base(base.NodeTest): self.cli.node.update.return_value = self.node self.cli.node.list.return_value = [self.node] - # https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa - self.data = { - 'boot_interface': '01-' + self.macs[0].replace(':', '-'), - 'inventory': { - 'interfaces': [ - {'name': 'eth1', 'mac_address': self.macs[0], - 'ipv4_address': '1.2.1.2'}, - {'name': 'eth2', 'mac_address': '12:12:21:12:21:12'}, - {'name': 'eth3', 'mac_address': self.macs[1], - 'ipv4_address': '1.2.1.1'}, - ], - 'disks': [ - {'name': '/dev/sda', 'model': 'Big Data Disk', - 'size': 1000 * units.Gi}, - {'name': '/dev/sdb', 'model': 'Small OS Disk', - 'size': 20 * units.Gi}, - ], - 'cpu': { - 'count': 4, - 'architecture': 'x86_64' - }, - 'memory': { - 'physical_mb': 12288 - }, - 'bmc_address': self.bmc_address - }, - 'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk', - 'size': 1000 * units.Gi, - 'wwn': None}, - } - self.data_old_ramdisk = { - 'cpus': 4, - 'cpu_arch': 'x86_64', - 'memory_mb': 12288, - 'local_gb': 464, - 'interfaces': { - 'eth1': {'mac': self.macs[0], 'ip': '1.2.1.2'}, - 'eth2': {'mac': '12:12:21:12:21:12'}, - 'eth3': {'mac': self.macs[1], 'ip': '1.2.1.1'}, - }, - 'boot_interface': '01-' + self.macs[0].replace(':', '-'), - 'ipmi_address': self.bmc_address, - } - self.patch = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] - self.patch_old_ramdisk = [ - {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, - {'path': '/properties/local_gb', 'value': '464', 'op': 'add'} - ] self.patch_root_hints = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, @@ -211,27 +160,6 @@ class Test(Base): status = self.call_get_status(self.uuid) self.assertEqual({'finished': True, 'error': None}, status) - def test_old_ramdisk(self): - self.call_introspect(self.uuid) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - self.cli.node.set_power_state.assert_called_once_with(self.uuid, - 'reboot') - - status = self.call_get_status(self.uuid) - self.assertEqual({'finished': False, 'error': None}, status) - - res = self.call_continue(self.data_old_ramdisk) - self.assertEqual({'uuid': self.uuid}, res) - eventlet.greenthread.sleep(DEFAULT_SLEEP) - - self.assertCalledWithPatch(self.patch_old_ramdisk, - self.cli.node.update) - self.cli.port.create.assert_called_once_with( - node_uuid=self.uuid, address='11:22:33:44:55:66') - - status = self.call_get_status(self.uuid) - self.assertEqual({'finished': True, 'error': None}, status) - def test_setup_ipmi(self): patch_credentials = [ {'op': 'add', 'path': '/driver_info/ipmi_username', diff --git a/ironic_inspector/test/unit/test_plugins_standard.py b/ironic_inspector/test/unit/test_plugins_standard.py index b5f76b3..5cc26cc 100644 --- a/ironic_inspector/test/unit/test_plugins_standard.py +++ b/ironic_inspector/test/unit/test_plugins_standard.py @@ -29,22 +29,6 @@ class TestSchedulerHook(test_base.NodeTest): def setUp(self): super(TestSchedulerHook, self).setUp() self.hook = std_plugins.SchedulerHook() - self.data = { - 'inventory': { - 'cpu': {'count': 2, 'architecture': 'x86_64'}, - 'memory': {'physical_mb': 1024}, - }, - 'root_disk': { - 'name': '/dev/sda', - 'size': 21 * units.Gi - } - } - self.old_data = { - 'local_gb': 20, - 'memory_mb': 1024, - 'cpus': 2, - 'cpu_arch': 'x86_64' - } self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node) @@ -53,43 +37,24 @@ class TestSchedulerHook(test_base.NodeTest): ext = base.processing_hooks_manager()['scheduler'] self.assertIsInstance(ext.obj, std_plugins.SchedulerHook) - def test_compat_missing(self): - for key in self.old_data: - new_data = self.old_data.copy() - del new_data[key] - self.assertRaisesRegexp(utils.Error, key, - self.hook.before_update, new_data, - self.node_info) - def test_no_root_disk(self): - self.assertRaisesRegexp(utils.Error, 'root disk is not supplied', - self.hook.before_update, - {'inventory': {'disks': []}}, self.node_info) + del self.inventory['disks'] + self.assertRaisesRegexp(utils.Error, 'disks key is missing or empty', + self.hook.before_update, self.data, + self.node_info) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_ok(self, mock_patch): patch = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, + {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '20', 'op': 'add'} + {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, + {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patch, mock_patch) - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_compat_ok(self, mock_patch): - patch = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '20', 'op': 'add'} - ] - - self.hook.before_update(self.old_data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - @mock.patch.object(node_cache.NodeInfo, 'patch') def test_no_overwrite(self, mock_patch): CONF.set_override('overwrite_existing', False, 'processing') @@ -98,36 +63,21 @@ class TestSchedulerHook(test_base.NodeTest): 'cpu_arch': 'i686' } patch = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '20', 'op': 'add'} + {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, + {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patch, mock_patch) - @mock.patch.object(node_cache.NodeInfo, 'patch') - def test_compat_root_disk(self, mock_patch): - self.old_data['root_disk'] = {'name': '/dev/sda', - 'size': 42 * units.Gi} - patch = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '41', 'op': 'add'} - ] - - self.hook.before_update(self.old_data, self.node_info) - self.assertCalledWithPatch(patch, mock_patch) - @mock.patch.object(node_cache.NodeInfo, 'patch') def test_root_disk_no_spacing(self, mock_patch): CONF.set_override('disk_partitioning_spacing', False, 'processing') - self.data['root_disk'] = {'name': '/dev/sda', 'size': 42 * units.Gi} patch = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, + {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '42', 'op': 'add'} + {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, + {'path': '/properties/local_gb', 'value': '1000', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) @@ -138,38 +88,9 @@ class TestValidateInterfacesHook(test_base.NodeTest): def setUp(self): super(TestValidateInterfacesHook, self).setUp() self.hook = std_plugins.ValidateInterfacesHook() - self.data = { - 'inventory': { - 'interfaces': [ - {'name': 'em1', 'mac_address': '11:11:11:11:11:11', - 'ipv4_address': '1.1.1.1'}, - {'name': 'em2', 'mac_address': '22:22:22:22:22:22', - 'ipv4_address': '2.2.2.2'}, - {'name': 'em3', 'mac_address': '33:33:33:33:33:33', - 'ipv4_address': None}, - ], - }, - 'boot_interface': '01-22-22-22-22-22-22' - } - self.old_data = { - 'interfaces': { - 'em1': {'mac': '11:11:11:11:11:11', 'ip': '1.1.1.1'}, - 'em2': {'mac': '22:22:22:22:22:22', 'ip': '2.2.2.2'}, - 'em3': {'mac': '33:33:33:33:33:33'} - }, - 'boot_interface': '01-22-22-22-22-22-22', - } - self.orig_interfaces = self.old_data['interfaces'].copy() - self.orig_interfaces['em3']['ip'] = None - self.pxe_interface = self.old_data['interfaces']['em2'] - self.active_interfaces = { - 'em1': {'mac': '11:11:11:11:11:11', 'ip': '1.1.1.1'}, - 'em2': {'mac': '22:22:22:22:22:22', 'ip': '2.2.2.2'}, - } - self.existing_ports = [mock.Mock(spec=['address', 'uuid'], address=a) - for a in ('11:11:11:11:11:11', + for a in (self.macs[1], '44:44:44:44:44:44')] self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node, @@ -190,29 +111,31 @@ class TestValidateInterfacesHook(test_base.NodeTest): self.assertRaises(SystemExit, std_plugins.ValidateInterfacesHook) def test_no_interfaces(self): - self.assertRaisesRegexp(utils.Error, 'No interfaces', + self.assertRaisesRegexp(utils.Error, + 'Hardware inventory is empty or missing', self.hook.before_processing, {}) - self.assertRaisesRegexp(utils.Error, 'No interfaces', + self.assertRaisesRegexp(utils.Error, + 'Hardware inventory is empty or missing', self.hook.before_processing, {'inventory': {}}) - self.assertRaisesRegexp(utils.Error, 'No interfaces', - self.hook.before_processing, {'inventory': { - 'interfaces': [] - }}) + del self.inventory['interfaces'] + self.assertRaisesRegexp(utils.Error, + 'interfaces key is missing or empty', + self.hook.before_processing, self.data) def test_only_pxe(self): self.hook.before_processing(self.data) - self.assertEqual({'em2': self.pxe_interface}, self.data['interfaces']) - self.assertEqual([self.pxe_interface['mac']], self.data['macs']) - self.assertEqual(self.orig_interfaces, self.data['all_interfaces']) + self.assertEqual(self.pxe_interfaces, self.data['interfaces']) + self.assertEqual([self.pxe_mac], self.data['macs']) + self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_pxe_mac_format(self): - self.data['boot_interface'] = '22:22:22:22:22:22' + self.data['boot_interface'] = self.pxe_mac self.hook.before_processing(self.data) - self.assertEqual({'em2': self.pxe_interface}, self.data['interfaces']) - self.assertEqual([self.pxe_interface['mac']], self.data['macs']) - self.assertEqual(self.orig_interfaces, self.data['all_interfaces']) + self.assertEqual(self.pxe_interfaces, self.data['interfaces']) + self.assertEqual([self.pxe_mac], self.data['macs']) + self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_pxe_not_found(self): self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff' @@ -227,7 +150,7 @@ class TestValidateInterfacesHook(test_base.NodeTest): self.assertEqual(sorted(i['mac'] for i in self.active_interfaces.values()), sorted(self.data['macs'])) - self.assertEqual(self.orig_interfaces, self.data['all_interfaces']) + self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_active(self): CONF.set_override('add_ports', 'active', 'processing') @@ -237,52 +160,43 @@ class TestValidateInterfacesHook(test_base.NodeTest): self.assertEqual(sorted(i['mac'] for i in self.active_interfaces.values()), sorted(self.data['macs'])) - self.assertEqual(self.orig_interfaces, self.data['all_interfaces']) + self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_all(self): CONF.set_override('add_ports', 'all', 'processing') self.hook.before_processing(self.data) - self.assertEqual(self.orig_interfaces, self.data['interfaces']) + self.assertEqual(self.all_interfaces, self.data['interfaces']) self.assertEqual(sorted(i['mac'] for i in - self.orig_interfaces.values()), + self.all_interfaces.values()), sorted(self.data['macs'])) - self.assertEqual(self.orig_interfaces, self.data['all_interfaces']) + self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_malformed_interfaces(self): - self.data = { - 'inventory': { - 'interfaces': [ - # no name - {'mac_address': '11:11:11:11:11:11', - 'ipv4_address': '1.1.1.1'}, - # empty - {}, - ], - }, - } + self.inventory['interfaces'] = [ + # no name + {'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'}, + # empty + {}, + ] self.assertRaisesRegexp(utils.Error, 'No interfaces supplied', self.hook.before_processing, self.data) def test_skipped_interfaces(self): CONF.set_override('add_ports', 'all', 'processing') - self.data = { - 'inventory': { - 'interfaces': [ - # local interface (by name) - {'name': 'lo', 'mac_address': '11:11:11:11:11:11', - 'ipv4_address': '1.1.1.1'}, - # local interface (by IP address) - {'name': 'em1', 'mac_address': '22:22:22:22:22:22', - 'ipv4_address': '127.0.0.1'}, - # no MAC provided - {'name': 'em3', 'ipv4_address': '2.2.2.2'}, - # malformed MAC provided - {'name': 'em4', 'mac_address': 'foobar', - 'ipv4_address': '2.2.2.2'}, - ], - }, - } + self.inventory['interfaces'] = [ + # local interface (by name) + {'name': 'lo', 'mac_address': '11:11:11:11:11:11', + 'ipv4_address': '1.1.1.1'}, + # local interface (by IP address) + {'name': 'em1', 'mac_address': '22:22:22:22:22:22', + 'ipv4_address': '127.0.0.1'}, + # no MAC provided + {'name': 'em3', 'ipv4_address': '2.2.2.2'}, + # malformed MAC provided + {'name': 'em4', 'mac_address': 'foobar', + 'ipv4_address': '2.2.2.2'}, + ] self.assertRaisesRegexp(utils.Error, 'No suitable interfaces found', self.hook.before_processing, self.data) @@ -294,7 +208,7 @@ class TestValidateInterfacesHook(test_base.NodeTest): @mock.patch.object(node_cache.NodeInfo, 'delete_port') def test_keep_present(self, mock_delete_port): CONF.set_override('keep_ports', 'present', 'processing') - self.data['all_interfaces'] = self.orig_interfaces + self.data['all_interfaces'] = self.all_interfaces self.hook.before_update(self.data, self.node_info) mock_delete_port.assert_called_once_with(self.existing_ports[1]) @@ -302,7 +216,7 @@ class TestValidateInterfacesHook(test_base.NodeTest): @mock.patch.object(node_cache.NodeInfo, 'delete_port') def test_keep_added(self, mock_delete_port): CONF.set_override('keep_ports', 'added', 'processing') - self.data['macs'] = [self.pxe_interface['mac']] + self.data['macs'] = [self.pxe_mac] self.hook.before_update(self.data, self.node_info) mock_delete_port.assert_any_call(self.existing_ports[0]) @@ -313,28 +227,21 @@ class TestRootDiskSelection(test_base.NodeTest): def setUp(self): super(TestRootDiskSelection, self).setUp() self.hook = std_plugins.RootDiskSelectionHook() - self.data = { - 'inventory': { - 'disks': [ - {'model': 'Model 1', 'size': 20 * units.Gi, - 'name': '/dev/sdb'}, - {'model': 'Model 2', 'size': 5 * units.Gi, - 'name': '/dev/sda'}, - {'model': 'Model 3', 'size': 10 * units.Gi, - 'name': '/dev/sdc'}, - {'model': 'Model 4', 'size': 4 * units.Gi, - 'name': '/dev/sdd'}, - {'model': 'Too Small', 'size': 1 * units.Gi, - 'name': '/dev/sde'}, - ] - } - } - self.matched = self.data['inventory']['disks'][2].copy() + self.inventory['disks'] = [ + {'model': 'Model 1', 'size': 20 * units.Gi, 'name': '/dev/sdb'}, + {'model': 'Model 2', 'size': 5 * units.Gi, 'name': '/dev/sda'}, + {'model': 'Model 3', 'size': 10 * units.Gi, 'name': '/dev/sdc'}, + {'model': 'Model 4', 'size': 4 * units.Gi, 'name': '/dev/sdd'}, + {'model': 'Too Small', 'size': 1 * units.Gi, 'name': '/dev/sde'}, + ] + self.matched = self.inventory['disks'][2].copy() self.node_info = mock.Mock(spec=node_cache.NodeInfo, uuid=self.uuid, **{'node.return_value': self.node}) def test_no_hints(self): + del self.data['root_disk'] + self.hook.before_update(self.data, self.node_info) self.assertNotIn('local_gb', self.data) @@ -343,9 +250,10 @@ class TestRootDiskSelection(test_base.NodeTest): def test_no_inventory(self): self.node.properties['root_device'] = {'model': 'foo'} del self.data['inventory'] + del self.data['root_disk'] self.assertRaisesRegexp(utils.Error, - 'requires ironic-python-agent', + 'Hardware inventory is empty or missing', self.hook.before_update, self.data, self.node_info) @@ -354,10 +262,10 @@ class TestRootDiskSelection(test_base.NodeTest): def test_no_disks(self): self.node.properties['root_device'] = {'size': 10} - self.data['inventory']['disks'] = [] + self.inventory['disks'] = [] self.assertRaisesRegexp(utils.Error, - 'No disks found', + 'disks key is missing or empty', self.hook.before_update, self.data, self.node_info) @@ -379,6 +287,7 @@ class TestRootDiskSelection(test_base.NodeTest): def test_one_fails(self): self.node.properties['root_device'] = {'size': 10, 'model': 'Model 42'} + del self.data['root_disk'] self.assertRaisesRegexp(utils.Error, 'No disks satisfied root device hints', @@ -402,15 +311,12 @@ class TestRootDiskSelection(test_base.NodeTest): self.data, self.node_info) -class TestRamdiskError(test_base.BaseTest): +class TestRamdiskError(test_base.InventoryTest): def setUp(self): super(TestRamdiskError, self).setUp() self.msg = 'BOOM' self.bmc_address = '1.2.3.4' - self.data = { - 'error': self.msg, - 'ipmi_address': self.bmc_address, - } + self.data['error'] = self.msg def test_no_logs(self): self.assertRaisesRegexp(utils.Error, diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 0e948ae..4653ca7 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -43,24 +43,9 @@ class BaseTest(test_base.NodeTest): def setUp(self): super(BaseTest, self).setUp() self.started_at = time.time() - self.pxe_mac = self.macs[1] - self.data = { - 'ipmi_address': self.bmc_address, - 'cpus': 2, - 'cpu_arch': 'x86_64', - 'memory_mb': 1024, - 'local_gb': 20, - 'interfaces': { - 'em1': {'mac': self.macs[0], 'ip': '1.2.0.1'}, - 'em2': {'mac': self.macs[1], 'ip': '1.2.0.2'}, - 'em3': {'mac': 'DE:AD:BE:EF:DE:AD'}, - }, - 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), - } self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(), address=mac) for mac in self.macs] self.ports = [self.all_ports[1]] - self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD'] self.fake_result_json = 'node json' self.cli_fixture = self.useFixture( @@ -94,10 +79,6 @@ class TestProcess(BaseProcessTest): self.assertEqual(self.fake_result_json, res) - # Only boot interface is added by default - self.assertEqual(['em2'], sorted(self.data['interfaces'])) - self.assertEqual([self.pxe_mac], self.data['macs']) - self.find_mock.assert_called_once_with(bmc_address=self.bmc_address, mac=mock.ANY) actual_macs = self.find_mock.call_args[1]['mac'] @@ -107,7 +88,7 @@ class TestProcess(BaseProcessTest): self.node, self.data, self.node_info) def test_no_ipmi(self): - del self.data['ipmi_address'] + del self.inventory['bmc_address'] process.process(self.data) self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) @@ -350,15 +331,8 @@ class TestProcessNode(BaseTest): 'processing') self.validate_attempts = 5 self.data['macs'] = self.macs # validate_interfaces hook - self.data['all_interfaces'] = self.data['interfaces'] self.ports = self.all_ports - self.patch_props = [ - {'path': '/properties/cpus', 'value': '2', 'op': 'add'}, - {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, - {'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'}, - {'path': '/properties/local_gb', 'value': '20', 'op': 'add'} - ] # scheduler hook self.new_creds = ('user', 'password') self.patch_credentials = [ {'op': 'add', 'path': '/driver_info/ipmi_username', @@ -406,24 +380,12 @@ class TestProcessNode(BaseTest): address=self.macs[0]) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[1]) - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.assertFalse(self.cli.node.validate.called) post_hook_mock.assert_called_once_with(self.data, self.node_info) finished_mock.assert_called_once_with(mock.ANY) - def test_overwrite_disabled(self): - CONF.set_override('overwrite_existing', False, 'processing') - patch = [ - {'op': 'add', 'path': '/properties/cpus', 'value': '2'}, - {'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'}, - ] - - process._process_node(self.node, self.data, self.node_info) - - self.assertCalledWithPatch(patch, self.cli.node.update) - def test_port_failed(self): self.cli.port.create.side_effect = ( [exceptions.Conflict()] + self.ports[1:]) @@ -434,7 +396,6 @@ class TestProcessNode(BaseTest): address=self.macs[0]) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[1]) - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) def test_set_ipmi_credentials(self): self.node_info.set_option('new_ipmi_credentials', self.new_creds) @@ -486,7 +447,6 @@ class TestProcessNode(BaseTest): process._process_node(self.node, self.data, self.node_info) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) finished_mock.assert_called_once_with( mock.ANY, error='Failed to power off node %s, check its power ' @@ -517,22 +477,19 @@ class TestProcessNode(BaseTest): swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, json.loads(swift_conn.create_object.call_args[0][1])) - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data_no_logs(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid - expected = self.data.copy() self.data['logs'] = 'something' process._process_node(self.node, self.data, self.node_info) swift_conn.create_object.assert_called_once_with(name, mock.ANY) - self.assertEqual(expected, + self.assertNotIn('logs', json.loads(swift_conn.create_object.call_args[0][1])) - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data_location(self, swift_mock): @@ -541,11 +498,8 @@ class TestProcessNode(BaseTest): 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid - self.patch_props.append( - {'path': '/extra/inspector_data_object', - 'value': name, - 'op': 'add'} - ) + patch = [{'path': '/extra/inspector_data_object', + 'value': name, 'op': 'add'}] expected = self.data process._process_node(self.node, self.data, self.node_info) @@ -553,7 +507,7 @@ class TestProcessNode(BaseTest): swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, json.loads(swift_conn.create_object.call_args[0][1])) - self.assertCalledWithPatch(self.patch_props, self.cli.node.update) + self.cli.node.update.assert_any_call(self.uuid, patch) @mock.patch.object(process, '_reapply', autospec=True) @@ -614,7 +568,6 @@ class TestReapplyNode(BaseTest): 'processing') CONF.set_override('store_data', 'swift', 'processing') self.data['macs'] = self.macs - self.data['all_interfaces'] = self.data['interfaces'] self.ports = self.all_ports self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=self.started_at, @@ -667,8 +620,7 @@ class TestReapplyNode(BaseTest): finished_mock.assert_called_once_with(self.node_info) # asserting validate_interfaces was called - self.assertEqual({'em2': self.data['interfaces']['em2']}, - swifted_data['interfaces']) + self.assertEqual(self.pxe_interfaces, swifted_data['interfaces']) self.assertEqual([self.pxe_mac], swifted_data['macs']) # assert ports were created with whatever there was left diff --git a/ironic_inspector/utils.py b/ironic_inspector/utils.py index bf9da0f..ebdba76 100644 --- a/ironic_inspector/utils.py +++ b/ironic_inspector/utils.py @@ -205,3 +205,22 @@ def get_valid_macs(data): return [m['mac'] for m in data.get('all_interfaces', {}).values() if m.get('mac')] + + +_INVENTORY_MANDATORY_KEYS = ('disks', 'memory', 'cpu', 'interfaces') + + +def get_inventory(data, node_info=None): + """Get and validate the hardware inventory from introspection data.""" + inventory = data.get('inventory') + # TODO(dtantsur): validate inventory using JSON schema + if not inventory: + raise Error(_('Hardware inventory is empty or missing'), + data=data, node_info=node_info) + + for key in _INVENTORY_MANDATORY_KEYS: + if not inventory.get(key): + raise Error(_('Invalid hardware inventory: %s key is missing ' + 'or empty') % key, data=data, node_info=node_info) + + return inventory diff --git a/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml b/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml new file mode 100644 index 0000000..e139c27 --- /dev/null +++ b/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml @@ -0,0 +1,7 @@ +--- +prelude: > + Starting with this release only ironic-python-agent (IPA) is supported + as an introspection ramdisk. +upgrade: + - Support for the old bash-based ramdisk was removed. Please switch to IPA + before upgrading. From b2c276714762c568aa879f938db2f76e665f2603 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 6 Jun 2016 09:40:27 +0200 Subject: [PATCH 41/83] Add a plugin for capabilities detection Supports boot_mode and CPU flags. Change-Id: Idee87a9fa0c89e51993735e69906f5688bfe23aa Closes-Bug: #1571580 --- config-generator.conf | 1 + doc/source/usage.rst | 52 ++++++++- example.conf | 70 ++++++++---- ironic_inspector/conf.py | 2 +- ironic_inspector/plugins/capabilities.py | 101 ++++++++++++++++++ .../test/unit/test_plugins_capabilities.py | 77 +++++++++++++ .../notes/capabilities-15cc2268d661f0a0.yaml | 4 + setup.cfg | 2 + 8 files changed, 282 insertions(+), 27 deletions(-) create mode 100644 ironic_inspector/plugins/capabilities.py create mode 100644 ironic_inspector/test/unit/test_plugins_capabilities.py create mode 100644 releasenotes/notes/capabilities-15cc2268d661f0a0.yaml diff --git a/config-generator.conf b/config-generator.conf index 054f5f6..f3c5a58 100644 --- a/config-generator.conf +++ b/config-generator.conf @@ -3,6 +3,7 @@ output_file = example.conf namespace = ironic_inspector namespace = ironic_inspector.common.ironic namespace = ironic_inspector.common.swift +namespace = ironic_inspector.plugins.capabilities namespace = ironic_inspector.plugins.discovery namespace = keystonemiddleware.auth_token namespace = oslo.db diff --git a/doc/source/usage.rst b/doc/source/usage.rst index 23f49d6..2ee84f5 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -184,15 +184,22 @@ introspection data. Note that order does matter in this option. These are plugins that are enabled by default and should not be disabled, unless you understand what you're doing: -``ramdisk_error`` - reports error, if ``error`` field is set by the ramdisk, also optionally - stores logs from ``logs`` field, see :ref:`api` for details. ``scheduler`` validates and updates basic hardware scheduling properties: CPU number and architecture, memory and disk size. ``validate_interfaces`` validates network interfaces information. +The following plugins are enabled by default, but can be disabled if not +needed: + +``ramdisk_error`` + reports error, if ``error`` field is set by the ramdisk, also optionally + stores logs from ``logs`` field, see :ref:`api` for details. +``capabilities`` + detect node capabilities: CPU, boot mode, etc. See `Capabilities + Detection`_ for more details. + Here are some plugins that can be additionally enabled: ``example`` @@ -330,3 +337,42 @@ Limitations: * the unprocessed data is never cleaned from the store * check for stored data presence is performed in background; missing data situation still results in a ``202`` response + +Capabilities Detection +~~~~~~~~~~~~~~~~~~~~~~ + +Starting with the Newton release, **Ironic Inspector** can optionally discover +several node capabilities. A recent (Newton or newer) IPA image is required +for it to work. + +Boot mode +^^^^^^^^^ + +The current boot mode (BIOS or UEFI) can be detected and recorded as +``boot_mode`` capability in Ironic. It will make some drivers to change their +behaviour to account for this capability. Set the ``[capabilities]boot_mode`` +configuration option to ``True`` to enable. + +CPU capabilities +^^^^^^^^^^^^^^^^ + +Several CPU flags are detected by default and recorded as following +capabilities: + +* ``cpu_aes`` AES instructions. + +* ``cpu_vt`` virtualization support. + +* ``cpu_txt`` TXT support. + +* ``cpu_hugepages`` huge pages (2 MiB) support. + +* ``cpu_hugepages_1g`` huge pages (1 GiB) support. + +It is possible to define your own rules for detecting CPU capabilities. +Set the ``[capabilities]cpu_flags`` configuration option to a mapping between +a CPU flag and a capability, for example:: + + cpu_flags = aes:cpu_aes,svm:cpu_vt,vmx:cpu_vt + +See the default value of this option for a more detail example. diff --git a/example.conf b/example.conf index 0e05ac5..64c8037 100644 --- a/example.conf +++ b/example.conf @@ -76,10 +76,11 @@ # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) +# Note: This option can be changed without restarting. #debug = false -# If set to false, the logging level will be set to WARNING instead of -# the default INFO level. (boolean value) +# DEPRECATED: If set to false, the logging level will be set to +# WARNING instead of the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true @@ -168,6 +169,20 @@ #fatal_deprecations = false +[capabilities] + +# +# From ironic_inspector.plugins.capabilities +# + +# Whether to store the boot mode (BIOS or UEFI). (boolean value) +#boot_mode = false + +# Mapping between a CPU flag and a capability to set if this flag is +# present. (dict value) +#cpu_flags = aes:cpu_aes,pdpe1gb:cpu_hugepages_1g,pse:cpu_hugepages,smx:cpu_txt,svm:cpu_vt,vmx:cpu_vt + + [cors] # @@ -175,7 +190,9 @@ # # Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. (list value) +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) #allowed_origin = # Indicate that the actual request can include user credentials @@ -184,7 +201,7 @@ # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) -#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma +#expose_headers = # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 @@ -205,7 +222,9 @@ # # Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. (list value) +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) #allowed_origin = # Indicate that the actual request can include user credentials @@ -214,7 +233,7 @@ # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) -#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma +#expose_headers = # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 @@ -340,8 +359,8 @@ # From ironic_inspector # -# SQLite3 database to store nodes under introspection, required. Do -# not use :memory: here, it won't work. DEPRECATED: use +# DEPRECATED: SQLite3 database to store nodes under introspection, +# required. Do not use :memory: here, it won't work. DEPRECATED: use # [database]/connection. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. @@ -423,8 +442,9 @@ # Domain name to scope to (unknown value) #domain_name = -# Keystone admin endpoint. DEPRECATED: Use [keystone_authtoken] -# section for keystone token validation. (string value) +# DEPRECATED: Keystone admin endpoint. DEPRECATED: Use +# [keystone_authtoken] section for keystone token validation. (string +# value) # Deprecated group/name - [discoverd]/identity_uri # This option is deprecated for removal. # Its value may be silently ignored in the future. @@ -445,9 +465,9 @@ # (integer value) #max_retries = 30 -# Keystone authentication endpoint for accessing Ironic API. Use -# [keystone_authtoken] section for keystone token validation. (string -# value) +# DEPRECATED: Keystone authentication endpoint for accessing Ironic +# API. Use [keystone_authtoken] section for keystone token validation. +# (string value) # Deprecated group/name - [discoverd]/os_auth_url # This option is deprecated for removal. # Its value may be silently ignored in the future. @@ -457,8 +477,9 @@ # Ironic endpoint type. (string value) #os_endpoint_type = internalURL -# Password for accessing Ironic API. Use [keystone_authtoken] section -# for keystone token validation. (string value) +# DEPRECATED: Password for accessing Ironic API. Use +# [keystone_authtoken] section for keystone token validation. (string +# value) # Deprecated group/name - [discoverd]/os_password # This option is deprecated for removal. # Its value may be silently ignored in the future. @@ -471,16 +492,18 @@ # Ironic service type. (string value) #os_service_type = baremetal -# Tenant name for accessing Ironic API. Use [keystone_authtoken] -# section for keystone token validation. (string value) +# DEPRECATED: Tenant name for accessing Ironic API. Use +# [keystone_authtoken] section for keystone token validation. (string +# value) # Deprecated group/name - [discoverd]/os_tenant_name # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Use options presented by configured keystone auth plugin. #os_tenant_name = -# User name for accessing Ironic API. Use [keystone_authtoken] section -# for keystone token validation. (string value) +# DEPRECATED: User name for accessing Ironic API. Use +# [keystone_authtoken] section for keystone token validation. (string +# value) # Deprecated group/name - [discoverd]/os_username # This option is deprecated for removal. # Its value may be silently ignored in the future. @@ -598,7 +621,8 @@ # Determines the frequency at which the list of revoked tokens is # retrieved from the Identity service (in seconds). A high number of # revocation events combined with a low cache duration may -# significantly reduce performance. (integer value) +# significantly reduce performance. Only valid for PKI tokens. +# (integer value) #revocation_cache_time = 10 # (Optional) If defined, indicate whether token data should be @@ -718,7 +742,7 @@ # the Nova scheduler. Hook 'validate_interfaces' ensures that valid # NIC data was provided by the ramdisk.Do not exclude these two unless # you really know what you're doing. (string value) -#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces +#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces,capabilities # Comma-separated list of enabled hooks for processing pipeline. The # default for this is $default_processing_hooks, hooks can be added @@ -815,13 +839,13 @@ # (integer value) #max_retries = 2 -# Keystone authentication URL (string value) +# DEPRECATED: Keystone authentication URL (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Use options presented by configured keystone auth plugin. #os_auth_url = -# Keystone authentication API version (string value) +# DEPRECATED: Keystone authentication API version (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Use options presented by configured keystone auth plugin. diff --git a/ironic_inspector/conf.py b/ironic_inspector/conf.py index 5fd437d..8f8748a 100644 --- a/ironic_inspector/conf.py +++ b/ironic_inspector/conf.py @@ -79,7 +79,7 @@ PROCESSING_OPTS = [ deprecated_group='discoverd'), cfg.StrOpt('default_processing_hooks', default='ramdisk_error,root_disk_selection,scheduler,' - 'validate_interfaces', + 'validate_interfaces,capabilities', help='Comma-separated list of default hooks for processing ' 'pipeline. Hook \'scheduler\' updates the node with the ' 'minimum properties required by the Nova scheduler. ' diff --git a/ironic_inspector/plugins/capabilities.py b/ironic_inspector/plugins/capabilities.py new file mode 100644 index 0000000..dc9d5b3 --- /dev/null +++ b/ironic_inspector/plugins/capabilities.py @@ -0,0 +1,101 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Gather capabilities from inventory.""" + +from oslo_config import cfg + +from ironic_inspector.common.i18n import _LI, _LW +from ironic_inspector.plugins import base +from ironic_inspector import utils + + +DEFAULT_CPU_FLAGS_MAPPING = { + 'vmx': 'cpu_vt', + 'svm': 'cpu_vt', + 'aes': 'cpu_aes', + 'pse': 'cpu_hugepages', + 'pdpe1gb': 'cpu_hugepages_1g', + 'smx': 'cpu_txt', +} + +CAPABILITIES_OPTS = [ + cfg.BoolOpt('boot_mode', + default=False, + help='Whether to store the boot mode (BIOS or UEFI).'), + cfg.DictOpt('cpu_flags', + default=DEFAULT_CPU_FLAGS_MAPPING, + help='Mapping between a CPU flag and a capability to set ' + 'if this flag is present.'), +] + + +def list_opts(): + return [ + ('capabilities', CAPABILITIES_OPTS) + ] + +CONF = cfg.CONF +CONF.register_opts(CAPABILITIES_OPTS, group='capabilities') +LOG = utils.getProcessingLogger(__name__) + + +class CapabilitiesHook(base.ProcessingHook): + """Processing hook for detecting capabilities.""" + + def _detect_boot_mode(self, inventory, node_info, data=None): + boot_mode = inventory.get('boot', {}).get('current_boot_mode') + if boot_mode is not None: + LOG.info(_LI('Boot mode was %s'), boot_mode, + data=data, node_info=node_info) + return {'boot_mode': boot_mode} + else: + LOG.warning(_LW('No boot mode information available'), + data=data, node_info=node_info) + return {} + + def _detect_cpu_flags(self, inventory, node_info, data=None): + flags = inventory['cpu'].get('flags') + if not flags: + LOG.warning(_LW('No CPU flags available, please update your ' + 'introspection ramdisk'), + data=data, node_info=node_info) + return {} + + flags = set(flags) + caps = {} + for flag, name in CONF.capabilities.cpu_flags.items(): + if flag in flags: + caps[name] = 'true' + + LOG.info(_LI('CPU capabilities: %s'), list(caps), + data=data, node_info=node_info) + return caps + + def before_update(self, introspection_data, node_info, **kwargs): + inventory = utils.get_inventory(introspection_data) + caps = {} + if CONF.capabilities.boot_mode: + caps.update(self._detect_boot_mode(inventory, node_info, + introspection_data)) + + caps.update(self._detect_cpu_flags(inventory, node_info, + introspection_data)) + + if caps: + LOG.debug('New capabilities: %s', caps, node_info=node_info, + data=introspection_data) + node_info.update_capabilities(**caps) + else: + LOG.debug('No new capabilities detected', node_info=node_info, + data=introspection_data) diff --git a/ironic_inspector/test/unit/test_plugins_capabilities.py b/ironic_inspector/test/unit/test_plugins_capabilities.py new file mode 100644 index 0000000..41eafaf --- /dev/null +++ b/ironic_inspector/test/unit/test_plugins_capabilities.py @@ -0,0 +1,77 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from oslo_config import cfg + +from ironic_inspector import node_cache +from ironic_inspector.plugins import base +from ironic_inspector.plugins import capabilities +from ironic_inspector.test import base as test_base + + +CONF = cfg.CONF + + +@mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) +class TestCapabilitiesHook(test_base.NodeTest): + hook = capabilities.CapabilitiesHook() + + def test_loadable_by_name(self, mock_caps): + base.CONF.set_override('processing_hooks', 'capabilities', + 'processing') + ext = base.processing_hooks_manager()['capabilities'] + self.assertIsInstance(ext.obj, capabilities.CapabilitiesHook) + + def test_no_data(self, mock_caps): + self.hook.before_update(self.data, self.node_info) + self.assertFalse(mock_caps.called) + + def test_boot_mode(self, mock_caps): + CONF.set_override('boot_mode', True, 'capabilities') + self.inventory['boot'] = {'current_boot_mode': 'uefi'} + + self.hook.before_update(self.data, self.node_info) + mock_caps.assert_called_once_with(self.node_info, boot_mode='uefi') + + def test_boot_mode_disabled(self, mock_caps): + self.inventory['boot'] = {'current_boot_mode': 'uefi'} + + self.hook.before_update(self.data, self.node_info) + self.assertFalse(mock_caps.called) + + def test_cpu_flags(self, mock_caps): + self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse', 'smx'] + + self.hook.before_update(self.data, self.node_info) + mock_caps.assert_called_once_with(self.node_info, + cpu_vt='true', + cpu_hugepages='true', + cpu_txt='true', + cpu_aes='true') + + def test_cpu_no_known_flags(self, mock_caps): + self.inventory['cpu']['flags'] = ['fpu'] + + self.hook.before_update(self.data, self.node_info) + self.assertFalse(mock_caps.called) + + def test_cpu_flags_custom(self, mock_caps): + CONF.set_override('cpu_flags', {'fpu': 'new_cap'}, + 'capabilities') + self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse'] + + self.hook.before_update(self.data, self.node_info) + mock_caps.assert_called_once_with(self.node_info, + new_cap='true') diff --git a/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml b/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml new file mode 100644 index 0000000..f2b28d6 --- /dev/null +++ b/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added a new "capabilities" processing hook detecting the CPU and boot mode + capabilities (the latter disabled by default). diff --git a/setup.cfg b/setup.cfg index 8e0338f..b56a3e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,6 +31,7 @@ ironic_inspector.hooks.processing = example = ironic_inspector.plugins.example:ExampleProcessingHook extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection + capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook # Deprecated name for raid_device, don't confuse with root_disk_selection root_device_hint = ironic_inspector.plugins.raid_device:RootDeviceHintHook ironic_inspector.hooks.node_not_found = @@ -58,6 +59,7 @@ oslo.config.opts = ironic_inspector.common.ironic = ironic_inspector.common.ironic:list_opts ironic_inspector.common.swift = ironic_inspector.common.swift:list_opts ironic_inspector.plugins.discovery = ironic_inspector.plugins.discovery:list_opts + ironic_inspector.plugins.capabilities = ironic_inspector.plugins.capabilities:list_opts oslo.config.opts.defaults = ironic_inspector = ironic_inspector.conf:set_config_defaults From 457c1277de7ce4a9e6953a1630e86ef54be2ac90 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 3 Jun 2016 16:57:29 +0200 Subject: [PATCH 42/83] Remove deprecated alias "root_device_hint" for "raid_device" hook Deprecated in 3.1.0 (mid-Mitaka). Probably also completely unused. Change-Id: Id1ea1e63e212a94dce61bd03c4217611cca3cd2e --- doc/source/usage.rst | 2 +- ironic_inspector/plugins/raid_device.py | 7 ------- ironic_inspector/test/unit/test_plugins_raid_device.py | 9 +++------ .../notes/no-root_device_hint-0e7676d481d503bb.yaml | 3 +++ setup.cfg | 2 -- 5 files changed, 7 insertions(+), 16 deletions(-) create mode 100644 releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml diff --git a/doc/source/usage.rst b/doc/source/usage.rst index 2ee84f5..01bbe4f 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -204,7 +204,7 @@ Here are some plugins that can be additionally enabled: ``example`` example plugin logging it's input and output. -``raid_device`` (deprecated name ``root_device_hint``) +``raid_device`` gathers block devices from ramdisk and exposes root device in multiple runs. ``extra_hardware`` diff --git a/ironic_inspector/plugins/raid_device.py b/ironic_inspector/plugins/raid_device.py index d7cb6e4..027fadd 100644 --- a/ironic_inspector/plugins/raid_device.py +++ b/ironic_inspector/plugins/raid_device.py @@ -102,10 +102,3 @@ class RaidDeviceDetection(base.ProcessingHook): node_info.patch([{'op': 'add', 'path': '/extra/block_devices', 'value': {'serials': current_devices}}]) - - -class RootDeviceHintHook(RaidDeviceDetection): - def __init__(self): - LOG.warning(_LW('Using the root_device_hint alias for the ' - 'raid_device plugin is deprecated')) - super(RaidDeviceDetection, self).__init__() diff --git a/ironic_inspector/test/unit/test_plugins_raid_device.py b/ironic_inspector/test/unit/test_plugins_raid_device.py index a9a48fb..61785cc 100644 --- a/ironic_inspector/test/unit/test_plugins_raid_device.py +++ b/ironic_inspector/test/unit/test_plugins_raid_device.py @@ -23,12 +23,9 @@ class TestRaidDeviceDetection(test_base.NodeTest): hook = raid_device.RaidDeviceDetection() def test_loadable_by_name(self): - names = ('raid_device', 'root_device_hint') - base.CONF.set_override('processing_hooks', ','.join(names), - 'processing') - for name in names: - ext = base.processing_hooks_manager()[name] - self.assertIsInstance(ext.obj, raid_device.RaidDeviceDetection) + base.CONF.set_override('processing_hooks', 'raid_device', 'processing') + ext = base.processing_hooks_manager()['raid_device'] + self.assertIsInstance(ext.obj, raid_device.RaidDeviceDetection) def test_missing_local_gb(self): introspection_data = {} diff --git a/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml b/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml new file mode 100644 index 0000000..0f87039 --- /dev/null +++ b/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml @@ -0,0 +1,3 @@ +--- +upgrade: + - Removed the deprecated "root_device_hint" alias for the "raid_device" hook. diff --git a/setup.cfg b/setup.cfg index b56a3e2..2557d1e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,8 +32,6 @@ ironic_inspector.hooks.processing = extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook - # Deprecated name for raid_device, don't confuse with root_disk_selection - root_device_hint = ironic_inspector.plugins.raid_device:RootDeviceHintHook ironic_inspector.hooks.node_not_found = example = ironic_inspector.plugins.example:example_not_found_hook enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook From 223ff38dcfc065fcc2f3f6293b87fc0ca84a1a88 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 8 Jun 2016 09:56:04 +0200 Subject: [PATCH 43/83] Return HTTP 500 from /v1/continue on unexpected exceptions Currently it returns HTTP 400 which is obviously wrong. Closes-Bug: #1590302 Change-Id: I56114aa1c0a3248cc80f79d1a5da3ace6c615e52 --- ironic_inspector/process.py | 3 ++- ironic_inspector/test/unit/test_process.py | 6 ++++-- releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml | 4 ++++ 3 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 325ed82..83b7ddc 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -234,7 +234,8 @@ def process(introspection_data): 'error': exc} node_info.finished(error=msg) _store_logs(introspection_data, node_info) - raise utils.Error(msg, node_info=node_info, data=introspection_data) + raise utils.Error(msg, node_info=node_info, data=introspection_data, + code=500) if CONF.processing.always_store_ramdisk_logs: _store_logs(introspection_data, node_info) diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 6b90b43..befc4ed 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -134,9 +134,11 @@ class TestProcess(BaseProcessTest): def test_unexpected_exception(self): self.process_mock.side_effect = RuntimeError('boom') - self.assertRaisesRegexp(utils.Error, 'Unexpected exception', - process.process, self.data) + with self.assertRaisesRegexp(utils.Error, + 'Unexpected exception') as ctx: + process.process(self.data) + self.assertEqual(500, ctx.exception.http_code) self.node_info.finished.assert_called_once_with( error='Unexpected exception RuntimeError during processing: boom') diff --git a/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml b/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml new file mode 100644 index 0000000..d5ba268 --- /dev/null +++ b/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fixed "/v1/continue" to return HTTP 500 on unexpected exceptions, not + HTTP 400. From 21230baaaaf473c0fa09edd1e60aeba49818e0da Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Wed, 8 Jun 2016 17:10:48 +0300 Subject: [PATCH 44/83] Add config fixture to functional tests Changing config options in one test effects other test, this change adds config fixture to base functional class to setup default config options. Change-Id: I926cdbcf56e0300dbff401152e0f1018d43cc55a --- ironic_inspector/test/functional.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index 20c9cc1..af5beb4 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -24,6 +24,7 @@ import unittest import mock from oslo_config import cfg +from oslo_config import fixture as config_fixture import requests from ironic_inspector.common import ironic as ir_utils @@ -53,6 +54,19 @@ connection = sqlite:///%(db_file)s DEFAULT_SLEEP = 2 +TEST_CONF_FILE = None + + +def get_test_conf_file(): + global TEST_CONF_FILE + if not TEST_CONF_FILE: + d = tempfile.mkdtemp() + TEST_CONF_FILE = os.path.join(d, 'test.conf') + db_file = os.path.join(d, 'test.db') + with open(TEST_CONF_FILE, 'wb') as fp: + content = CONF % {'db_file': db_file} + fp.write(content.encode('utf-8')) + return TEST_CONF_FILE class Base(base.NodeTest): @@ -84,6 +98,10 @@ class Base(base.NodeTest): self.node.power_state = 'power off' + self.cfg = self.useFixture(config_fixture.Config()) + conf_file = get_test_conf_file() + self.cfg.set_config_files([conf_file]) + def call(self, method, endpoint, data=None, expect_error=None, api_version=None): if data is not None: @@ -432,12 +450,7 @@ class Test(Base): def mocked_server(): d = tempfile.mkdtemp() try: - conf_file = os.path.join(d, 'test.conf') - db_file = os.path.join(d, 'test.db') - with open(conf_file, 'wb') as fp: - content = CONF % {'db_file': db_file} - fp.write(content.encode('utf-8')) - + conf_file = get_test_conf_file() with mock.patch.object(ir_utils, 'get_client'): dbsync.main(args=['--config-file', conf_file, 'upgrade']) From aa01aa7206695ff8c85b6d489f82b7555625e065 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 8 Jun 2016 17:26:57 +0200 Subject: [PATCH 45/83] Update terribly outdated installation instructions * Update configuration options to reflect switch to keystoneauth * Add iPXE and UEFI configuration * Mention puppet-ironic and bifrost * Make ordering more logical now that we only have 1 ramdisk * Various small fixes Change-Id: I8c4a64b260db801622bd30d6c4f2c93b41580af0 Closes-Bug: #1416371 --- doc/source/install.rst | 238 ++++++++++++++++++++++++++++------------- 1 file changed, 166 insertions(+), 72 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 0ea3a03..1284802 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -15,6 +15,13 @@ status. Finally, some distributions (e.g. Fedora) provide **ironic-inspector** packaged, some of them - under its old name *ironic-discoverd*. +There are several projects you can use to set up **ironic-inspector** in +production. `puppet-ironic +`_ provides Puppet +manifests, while `bifrost `_ +provides an Ansible-based standalone installer. Refer to Configuration_ +if you plan on installing **ironic-inspector** manually. + .. _PyPI: https://pypi.python.org/pypi/ironic-inspector Note for Ubuntu users @@ -40,6 +47,7 @@ Ironic Version Standalone Inspection Interface Juno 1.0 N/A Kilo 1.0 - 2.2 1.0 - 1.1 Liberty 1.1 - 2.X 2.0 - 2.X +Mitaka+ 2.0 - 2.X 2.0 - 2.X ============== ========== ==================== .. note:: @@ -53,11 +61,10 @@ Copy ``example.conf`` to some permanent place (e.g. ``/etc/ironic-inspector/inspector.conf``). Fill in at least these configuration values: -* ``os_username``, ``os_password``, ``os_tenant_name`` - Keystone credentials - to use when accessing other services and check client authentication tokens; +* The ``keystone_authtoken`` section - credentials to use when checking user + authentication. -* ``os_auth_url``, ``identity_uri`` - Keystone endpoints for validating - authentication tokens and checking user roles; +* The ``ironic`` section - credentials to use when accessing the Ironic API. * ``connection`` in the ``database`` section - SQLAlchemy connection string for the database; @@ -75,6 +82,49 @@ for the other possible configuration options. Configuration file contains a password and thus should be owned by ``root`` and should have access rights like ``0600``. +Here is an example *inspector.conf* (adapted from a gate run):: + + [DEFAULT] + debug = false + rootwrap_config = /etc/ironic-inspector/rootwrap.conf + + [database] + connection = mysql+pymysql://root:@127.0.0.1/ironic_inspector?charset=utf8 + + [firewall] + dnsmasq_interface = br-ctlplane + + [ironic] + os_region = RegionOne + project_name = service + password = + username = ironic-inspector + auth_url = http://127.0.0.1/identity + auth_type = password + + [keystone_authtoken] + auth_uri = http://127.0.0.1/identity + project_name = service + password = + username = ironic-inspector + auth_url = http://127.0.0.1/identity_v2_admin + auth_type = password + + [processing] + ramdisk_logs_dir = /var/log/ironic-inspector/ramdisk + store_data = swift + + [swift] + os_region = RegionOne + project_name = service + password = + username = ironic-inspector + auth_url = http://127.0.0.1/identity + auth_type = password + +.. note:: + Set ``debug = true`` if you want to see complete logs. + **ironic-inspector** requires root rights for managing iptables. It gets them by running ``ironic-inspector-rootwrap`` utility with ``sudo``. To allow it, copy file ``rootwrap.conf`` and directory ``rootwrap.d`` to the @@ -103,6 +153,41 @@ configuration directory (e.g. ``/etc/ironic-inspector/``) and create file Replace ``stack`` with whatever user you'll be using to run **ironic-inspector**. +Configuring IPA +^^^^^^^^^^^^^^^ + +ironic-python-agent_ is a ramdisk developed for Ironic. During the Liberty +cycle support for **ironic-inspector** was added. This is the default ramdisk +starting with the Mitaka release. + +.. note:: + You need at least 1.5 GiB of RAM on the machines to use IPA built with + diskimage-builder_ and at least 384 MiB to use the *TinyIPA*. + +To build an ironic-python-agent ramdisk, do the following: + +* Get the new enough version of diskimage-builder_:: + + sudo pip install -U "diskimage-builder>=1.1.2" + +* Build the ramdisk:: + + disk-image-create ironic-agent fedora -o ironic-agent + + .. note:: + Replace "fedora" with your distribution of choice. + +* Use the resulting files ``ironic-agent.kernel`` and + ``ironic-agent.initramfs`` in the following instructions to set PXE or iPXE. + +Alternatively, you can download a `prebuilt TinyIPA image +`_ or use +the `other builders +`_. + +.. _diskimage-builder: https://github.com/openstack/diskimage-builder +.. _ironic-python-agent: https://github.com/openstack/ironic-python-agent + Configuring PXE ^^^^^^^^^^^^^^^ @@ -111,10 +196,41 @@ As for PXE boot environment, you'll need: * TFTP server running and accessible (see below for using *dnsmasq*). Ensure ``pxelinux.0`` is present in the TFTP root. + Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` to the TFTP + root as well. + +* Next, set up ``$TFTPROOT/pxelinux.cfg/default`` as follows:: + + default introspect + + label introspect + kernel ironic-agent.kernel + append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue systemd.journald.forward_to_console=yes + + ipappend 3 + + Replace ``{IP}`` with IP of the machine (do not use loopback interface, it + will be accessed by ramdisk on a booting machine). + + .. note:: + While ``systemd.journald.forward_to_console=yes`` is not actually + required, it will substantially simplify debugging if something + goes wrong. + + IPA is pluggable: you can insert introspection plugins called + *collectors* into it. For example, to enable a very handy ``logs`` collector + (sending ramdisk logs to **ironic-inspector**), modify the ``append`` line in + ``$TFTPROOT/pxelinux.cfg/default``:: + + append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue ipa-inspection-collectors=default,logs systemd.journald.forward_to_console=yes + + .. note:: + You probably want to always keep the ``default`` collector, as it provides + the basic information required for introspection. * You need PXE boot server (e.g. *dnsmasq*) running on **the same** machine as **ironic-inspector**. Don't do any firewall configuration: - **ironic-inspector** will handle it for you. In **ironic-inspector** + **ironic-inspector** will handle it for you. In the **ironic-inspector** configuration file set ``dnsmasq_interface`` to the interface your PXE boot server listens on. Here is an example *dnsmasq.conf*:: @@ -132,87 +248,65 @@ As for PXE boot environment, you'll need: simultaneously cause conflicts - the same IP address is suggested to several nodes. -* You have to install and configure the ramdisk to be run on target machines - - see `Configuring IPA`_. +Configuring iPXE +^^^^^^^^^^^^^^^^ -Here is *inspector.conf* you may end up with:: +iPXE allows better scaling as it primarily uses the HTTP protocol instead of +slow and unreliable TFTP. You still need a TFTP server as a fall back for +nodes not supporting iPXE. To use iPXE you'll need: - [DEFAULT] - debug = false - [ironic] - identity_uri = http://127.0.0.1:35357 - os_auth_url = http://127.0.0.1:5000/v2.0 - os_username = admin - os_password = password - os_tenant_name = admin - [firewall] - dnsmasq_interface = br-ctlplane +* TFTP server running and accessible (see above for using *dnsmasq*). + Ensure ``undionly.kpxe`` is present in the TFTP root. If any of your nodes + boot with UEFI, you'll also need ``ipxe.efi`` there. -.. note:: - Set ``debug = true`` if you want to see complete logs. +* You also need an HTTP server capable of serving static files. + Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` there. -Configuring IPA -^^^^^^^^^^^^^^^ +* Create a file called ``inspector.ipxe`` in the HTTP root (you can name and + place it differently, just don't forget to adjust the *dnsmasq.conf* example + below):: -ironic-python-agent_ is a ramdisk developed for Ironic. During the Liberty -cycle support for **ironic-inspector** was added. This is the default ramdisk -starting with the Mitaka release. + #!ipxe -.. note:: - You need at least 1.5 GiB of RAM on the machines to use this ramdisk, - 2 GiB is recommended. + :retry_dhcp + dhcp || goto retry_dhcp -To build an ironic-python-agent ramdisk, do the following: - -* Get the new enough version of diskimage-builder_:: - - sudo pip install -U "diskimage-builder>=1.1.2" - -* Build the ramdisk:: - - disk-image-create ironic-agent fedora -o ironic-agent + :retry_boot + imgfree + kernel --timeout 30000 http://{IP}:8088/ironic-agent.kernel ipa-inspection-callback-url=http://{IP}>:5050/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=agent.ramdisk || goto retry_boot + initrd --timeout 30000 http://{IP}:8088/ironic-agent.ramdisk || goto retry_boot + boot .. note:: - Replace "fedora" with your distribution of choice. + Older versions of the iPXE ROM tend to misbehave on unreliable network + connection, thus we use the timeout option with retries. -* Copy resulting files ``ironic-agent.vmlinuz`` and ``ironic-agent.initramfs`` - to the TFTP root directory. + Just like with PXE you can customize the list of collectors by appending + the ``ipa-inspector-collectors`` kernel option, for example:: -Alternatively, you can download a `prebuilt IPA image -`_ or use -the `CoreOS-based IPA builder -`_. + ipa-inspection-collectors=default,logs,extra_hardware -Next, set up ``$TFTPROOT/pxelinux.cfg/default`` as follows:: +* Just as with PXE you'll need a PXE boot server. The configuration, however, + will be different. Here is an example *dnsmasq.conf*:: - default introspect + port=0 + interface={INTERFACE} + bind-interfaces + dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150} + enable-tftp + tftp-root={TFTP ROOT, e.g. /tftpboot} + dhcp-sequential-ip + dhcp-match=ipxe,175 + dhcp-match=set:efi,option:client-arch,7 + dhcp-boot=tag:ipxe,http://{IP}:8088/inspector.ipxe + dhcp-boot=tag:efi,ipxe.efi + dhcp-boot=undionly.kpxe,localhost.localdomain,{IP} - label introspect - kernel ironic-agent.vmlinuz - append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue systemd.journald.forward_to_console=yes - - ipappend 3 - -Replace ``{IP}`` with IP of the machine (do not use loopback interface, it -will be accessed by ramdisk on a booting machine). - -.. note:: - While ``systemd.journald.forward_to_console=yes`` is not actually - required, it will substantially simplify debugging if something goes wrong. - -This ramdisk is pluggable: you can insert introspection plugins called -*collectors* into it. For example, to enable a very handy ``logs`` collector -(sending ramdisk logs to **ironic-inspector**), modify the ``append`` line in -``$TFTPROOT/pxelinux.cfg/default``:: - - append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue ipa-inspection-collectors=default,logs systemd.journald.forward_to_console=yes - -.. note:: - You probably want to always keep ``default`` collector, as it provides the - basic information required for introspection. - -.. _diskimage-builder: https://github.com/openstack/diskimage-builder -.. _ironic-python-agent: https://github.com/openstack/ironic-python-agent + First, we configure the same common parameters as with PXE. Then we define + ``ipxe`` and ``efi`` tags. Nodes already supporting iPXE are ordered to + download and execute ``inspector.ipxe``. Nodes without iPXE booted with UEFI + will get ``ipxe.efi`` firmware to execute, while the remaining will get + ``undionly.kpxe``. Managing the **ironic-inspector** database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From d2eec5506bdd9ebf512351b8595e6ad921ea8ddd Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 9 Jun 2016 17:57:09 +0300 Subject: [PATCH 46/83] Tempest: don't rely on tempest ironic client Ironic client will be removed from tempest in near future, switch on ironic tempest plugin. Change-Id: Ifd49503f0b69a67155c2576f9ae70a17f0e01058 --- .../inspector_tempest_plugin/services/introspection_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py b/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py index 346e06c..3f43bf5 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py +++ b/ironic_inspector/test/inspector_tempest_plugin/services/introspection_client.py @@ -12,10 +12,10 @@ import json +from ironic_tempest_plugin.services.baremetal import base from tempest import clients from tempest.common import credentials_factory as common_creds from tempest import config -from tempest.services.baremetal import base CONF = config.CONF From 0d9c0cdc304efc9b6f0d1454900f51d2d914d30f Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 9 Jun 2016 10:11:15 +0300 Subject: [PATCH 47/83] Fix response code for rule creating API This change introduces new return code (201 instead of 200) for POST /v1/rules endpoint on success rule creation. API less 1.6 continues returning 200. Default API version was changed from minimum to maximum which Inspector can support. Change-Id: I911c7c241d16b9948ee4b6db92b127c7f8f374ae --- doc/source/http-api.rst | 8 +++++- ironic_inspector/main.py | 28 ++++++++++++------- ironic_inspector/test/unit/test_main.py | 22 +++++++++++++++ ...es-endpoint-response-d60984c40d927c1f.yaml | 10 +++++++ 4 files changed, 57 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml diff --git a/doc/source/http-api.rst b/doc/source/http-api.rst index aeff7ae..095f52e 100644 --- a/doc/source/http-api.rst +++ b/doc/source/http-api.rst @@ -131,7 +131,8 @@ authentication. Response - * 200 - OK + * 200 - OK for API version < 1.6 + * 201 - OK for API version 1.6 and higher * 400 - bad request Response body: JSON dictionary with introspection rule representation (the @@ -282,6 +283,10 @@ major version and is always ``1`` for now, ``Y`` is a minor version. ``X-OpenStack-Ironic-Inspector-API-Maximum-Version`` headers with minimum and maximum API versions supported by the server. + .. note:: + Maximum is server API version used by default. + + API Discovery ~~~~~~~~~~~~~ @@ -328,3 +333,4 @@ Version History * **1.3** endpoint for canceling running introspection * **1.4** endpoint for reapplying the introspection over stored data. * **1.5** support for Ironic node names. +* **1.6** endpoint for rules creating returns 201 instead of 200 on success. diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py index de913ff..05d054e 100644 --- a/ironic_inspector/main.py +++ b/ironic_inspector/main.py @@ -47,15 +47,26 @@ app = flask.Flask(__name__) LOG = utils.getProcessingLogger(__name__) MINIMUM_API_VERSION = (1, 0) -CURRENT_API_VERSION = (1, 5) +CURRENT_API_VERSION = (1, 6) _LOGGING_EXCLUDED_KEYS = ('logs',) +def _get_version(): + ver = flask.request.headers.get(conf.VERSION_HEADER, + _DEFAULT_API_VERSION) + try: + requested = tuple(int(x) for x in ver.split('.')) + except (ValueError, TypeError): + return error_response(_('Malformed API version: expected string ' + 'in form of X.Y'), code=400) + return requested + + def _format_version(ver): return '%d.%d' % ver -_DEFAULT_API_VERSION = _format_version(MINIMUM_API_VERSION) +_DEFAULT_API_VERSION = _format_version(CURRENT_API_VERSION) def error_response(exc, code=500): @@ -86,13 +97,7 @@ def convert_exceptions(func): @app.before_request def check_api_version(): - requested = flask.request.headers.get(conf.VERSION_HEADER, - _DEFAULT_API_VERSION) - try: - requested = tuple(int(x) for x in requested.split('.')) - except (ValueError, TypeError): - return error_response(_('Malformed API version: expected string ' - 'in form of X.Y'), code=400) + requested = _get_version() if requested < MINIMUM_API_VERSION or requested > CURRENT_API_VERSION: return error_response(_('Unsupported API version %(requested)s, ' @@ -279,7 +284,10 @@ def api_rules(): actions_json=body.get('actions', []), uuid=body.get('uuid'), description=body.get('description')) - return flask.jsonify(rule_repr(rule, short=False)) + + response_code = (200 if _get_version() < (1, 6) else 201) + return flask.make_response( + flask.jsonify(rule_repr(rule, short=False)), response_code) @app.route('/v1/rules/', methods=['GET', 'DELETE']) diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index daa77d7..0906e03 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -369,6 +369,28 @@ class TestApiRules(BaseAPITest): **{'as_dict.return_value': exp}) res = self.app.post('/v1/rules', data=json.dumps(data)) + self.assertEqual(201, res.status_code) + create_mock.assert_called_once_with(conditions_json='cond', + actions_json='act', + uuid=self.uuid, + description=None) + self.assertEqual(exp, json.loads(res.data.decode('utf-8'))) + + @mock.patch.object(rules, 'create', autospec=True) + def test_create_api_less_1_6(self, create_mock): + data = {'uuid': self.uuid, + 'conditions': 'cond', + 'actions': 'act'} + exp = data.copy() + exp['description'] = None + create_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, + **{'as_dict.return_value': exp}) + + headers = {conf.VERSION_HEADER: + main._format_version((1, 5))} + + res = self.app.post('/v1/rules', data=json.dumps(data), + headers=headers) self.assertEqual(200, res.status_code) create_mock.assert_called_once_with(conditions_json='cond', actions_json='act', diff --git a/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml b/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml new file mode 100644 index 0000000..1cf1379 --- /dev/null +++ b/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - API "POST /v1/rules" returns 201 response code instead of + 200 on creating success. API version was bumped to 1.6. + API less than 1.6 continues to return 200. + - Default API version was changed from minimum to maximum + which Inspector can support. +fixes: + - Fix response return code for rule creating endpoint, it + returns 201 now instead of 200 on success. From fb6bcbec9ce5380f5da17ad58a9b35255ca40dbd Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Fri, 17 Jun 2016 11:50:40 +0300 Subject: [PATCH 48/83] Fix tempest tests Base TempestException was removed from tempest, now exceptions based on tempest.lib TempestException, inherited from restclient exceptions. So inherit inspector tempest exceptions from last one. Change-Id: I8058a964e837dbb4aa5a8b214f216453a18a1713 --- ironic_inspector/test/inspector_tempest_plugin/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ironic_inspector/test/inspector_tempest_plugin/exceptions.py b/ironic_inspector/test/inspector_tempest_plugin/exceptions.py index 7791c40..ac08d54 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/exceptions.py +++ b/ironic_inspector/test/inspector_tempest_plugin/exceptions.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -from tempest import exceptions +from tempest.lib import exceptions class IntrospectionFailed(exceptions.TempestException): From d3f96f2ad407bc36b9e1943b9483014b97464df9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 17 Jun 2016 14:15:05 +0000 Subject: [PATCH 49/83] Updated from global requirements Change-Id: Ib4fec709c5d53a2a0ce925ff2132ef24a48205e2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ef9ebe7..842a1e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ Flask!=0.11,<1.0,>=0.10 # BSD futurist>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystoneauth1>=2.1.0 # Apache-2.0 +keystoneauth1>=2.7.0 # Apache-2.0 keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 From 33dea6b3815409b8a2342f954b662b0f888e70ff Mon Sep 17 00:00:00 2001 From: dparalen Date: Tue, 21 Jun 2016 16:11:43 +0200 Subject: [PATCH 50/83] devstack/plugin.sh: use screen_stop A custom, direct screen call is being used to stop inspector processes. This breaks grenade testing for me as no screen is used in that environment. This patch suggests adopting screen_stop instead. See also project devstack/functions-common:screen_stop Change-Id: I63d0c7400ecdff4333ffc31b6607d94f8cf20e18 --- devstack/plugin.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4eaeaa4..ab51ad4 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -82,11 +82,11 @@ function start_inspector_dhcp { } function stop_inspector { - screen -S $SCREEN_NAME -p ironic-inspector -X kill + screen_stop ironic-inspector } function stop_inspector_dhcp { - screen -S $SCREEN_NAME -p ironic-inspector-dhcp -X kill + screen_stop ironic-inspector-dhcp } ### Configuration From 066205418341aab40e595f6c8da922a00a0eecdd Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 21 Jun 2016 17:59:48 +0000 Subject: [PATCH 51/83] Updated from global requirements Change-Id: Id0b89350c08c016b273bdb7f697f167a58337f91 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 9878825..0a3f8ed 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ coverage>=3.6 # Apache-2.0 doc8 # Apache-2.0 hacking<0.11,>=0.10.0 mock>=2.0 # BSD -sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD +sphinx!=1.3b1,<1.3,>=1.2.1 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.6.2 # Apache2 fixtures>=3.0.0 # Apache-2.0/BSD From 7cbdd5dc8ad0be4c5a6c303f6908e009361170a6 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 24 Jun 2016 03:12:01 +0000 Subject: [PATCH 52/83] Updated from global requirements Change-Id: Ie2a8b4c3bcc1cd0225d8a01ad57e93c6e7f88233 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 0a3f8ed..fad950e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,7 +7,7 @@ hacking<0.11,>=0.10.0 mock>=2.0 # BSD sphinx!=1.3b1,<1.3,>=1.2.1 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=1.6.2 # Apache2 +reno>=1.8.0 # Apache2 fixtures>=3.0.0 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD From 6e458b63326fcb39043cfa17dfe90840b2b17585 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 27 Jun 2016 13:13:45 +0200 Subject: [PATCH 53/83] Create devstack/example.local.conf and include it in the docs It's convenient to have a downloadable example of local.conf. This change extracts the example from the contributing documentation. Also removes the noop variable. Change-Id: I18a7f9969a9820fa8c736dede5f39c70f9c85b6a --- CONTRIBUTING.rst | 29 +---------------------------- devstack/example.local.conf | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 28 deletions(-) create mode 100644 devstack/example.local.conf diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 9255e3d..a446b78 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -86,35 +86,8 @@ components. There is a plugin for installing **ironic-inspector** on DevStack. Example local.conf ------------------ -:: +.. literalinclude:: ../../devstack/example.local.conf - [[local|localrc]] - enable_service ironic ir-api ir-cond - disable_service n-net n-novnc - enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta - enable_service s-proxy s-object s-container s-account - disable_service heat h-api h-api-cfn h-api-cw h-eng - disable_service cinder c-sch c-api c-vol - - enable_plugin ironic https://github.com/openstack/ironic - enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector - - IRONIC_BAREMETAL_BASIC_OPS=True - IRONIC_VM_COUNT=2 - IRONIC_VM_SPECS_RAM=1024 - - IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True - IRONIC_BUILD_DEPLOY_RAMDISK=False - IRONIC_INSPECTOR_BUILD_RAMDISK=False - - VIRT_DRIVER=ironic - - LOGDAYS=1 - LOGFILE=~/logs/stack.sh.log - SCREEN_LOGDIR=~/logs/screen - - DEFAULT_INSTANCE_TYPE=baremetal - TEMPEST_ALLOW_TENANT_ISOLATION=False Notes ----- diff --git a/devstack/example.local.conf b/devstack/example.local.conf new file mode 100644 index 0000000..7ee7f87 --- /dev/null +++ b/devstack/example.local.conf @@ -0,0 +1,26 @@ +[[local|localrc]] +enable_service ironic ir-api ir-cond +disable_service n-net n-novnc +enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta +enable_service s-proxy s-object s-container s-account +disable_service heat h-api h-api-cfn h-api-cw h-eng +disable_service cinder c-sch c-api c-vol + +enable_plugin ironic https://github.com/openstack/ironic +enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector + +IRONIC_BAREMETAL_BASIC_OPS=True +IRONIC_VM_COUNT=2 +IRONIC_VM_SPECS_RAM=1024 + +IRONIC_BUILD_DEPLOY_RAMDISK=False +IRONIC_INSPECTOR_BUILD_RAMDISK=False + +VIRT_DRIVER=ironic + +LOGDAYS=1 +LOGFILE=~/logs/stack.sh.log +SCREEN_LOGDIR=~/logs/screen + +DEFAULT_INSTANCE_TYPE=baremetal +TEMPEST_ALLOW_TENANT_ISOLATION=False From a8cb6bf8388fe31ae13f22f177c3f0abe534c41e Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 29 Jun 2016 08:13:46 +0200 Subject: [PATCH 54/83] Use run_process in the devstack plugin Old commands we use give us problems with grenade. Change-Id: I12ad693870ea013d7da9ffa5e772ddd6630e7895 --- devstack/plugin.sh | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ab51ad4..80deec9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -72,21 +72,20 @@ function install_inspector_client { } function start_inspector { - screen_it ironic-inspector \ - "cd $IRONIC_INSPECTOR_DIR && $IRONIC_INSPECTOR_CMD" + run_process ironic-inspector "$IRONIC_INSPECTOR_CMD" } function start_inspector_dhcp { - screen_it ironic-inspector-dhcp \ + run_process ironic-inspector-dhcp \ "sudo dnsmasq --conf-file=$IRONIC_INSPECTOR_DHCP_CONF_FILE" } function stop_inspector { - screen_stop ironic-inspector + stop_process ironic-inspector } function stop_inspector_dhcp { - screen_stop ironic-inspector-dhcp + stop_process ironic-inspector-dhcp } ### Configuration From ca655b566073a2f1ad8a930dcb5c30f916b1da2f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 30 Jun 2016 18:43:23 +0000 Subject: [PATCH 55/83] Updated from global requirements Change-Id: Iabe042b051fa9c2b1b2b4c77f97ce84af3676d40 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 842a1e8..5646a4b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.utils>=3.11.0 # Apache-2.0 +oslo.utils>=3.14.0 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.10.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From 99c81d9c603a499fb08e0477e7d4e97fc8e34176 Mon Sep 17 00:00:00 2001 From: dparalen Date: Thu, 9 Jun 2016 15:35:30 +0200 Subject: [PATCH 56/83] Introduce upgrade testing with Grenade This change makes it possible to run Grenade jobs on ironic-inspector. The only thing tested in this patch is that ironic-inspector actually comes up after upgrade from a previous version. We will make it run actual introspection tests in the next patch. Co-Authored-By: Dmitry Tantsur Change-Id: I79e7ecaa89936144b63a72baf8460ae6ad139890 --- devstack/upgrade/resources.sh | 77 +++++++++++++++++++ devstack/upgrade/settings | 11 +++ devstack/upgrade/shutdown.sh | 29 ++++++++ devstack/upgrade/upgrade.sh | 136 ++++++++++++++++++++++++++++++++++ 4 files changed, 253 insertions(+) create mode 100755 devstack/upgrade/resources.sh create mode 100644 devstack/upgrade/settings create mode 100755 devstack/upgrade/shutdown.sh create mode 100755 devstack/upgrade/upgrade.sh diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh new file mode 100755 index 0000000..1c1bf39 --- /dev/null +++ b/devstack/upgrade/resources.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright 2016 Intel Corporation +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## based on Ironic/devstack/upgrade/resources.sh + +set -o errexit + +source $GRENADE_DIR/grenaderc +source $GRENADE_DIR/functions + +source $TOP_DIR/openrc admin admin + +# Inspector relies on a couple of Ironic variables +source $TARGET_RELEASE_DIR/ironic/devstack/lib/ironic + +INSPECTOR_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) +source $INSPECTOR_DEVSTACK_DIR/plugin.sh + +set -o xtrace + + +function early_create { + : +} + +function create { + : +} + +function verify { + : +} + +function verify_noapi { + : +} + +function destroy { + : +} + +# Dispatcher +case $1 in + "early_create") + early_create + ;; + "create") + create + ;; + "verify_noapi") + verify_noapi + ;; + "verify") + verify + ;; + "destroy") + destroy + ;; + "force_destroy") + set +o errexit + destroy + ;; +esac diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings new file mode 100644 index 0000000..d8bb6f6 --- /dev/null +++ b/devstack/upgrade/settings @@ -0,0 +1,11 @@ +# Enabling Inspector grenade plug-in +# Based on Ironic/devstack/grenade/settings +register_project_for_upgrade ironic-inspector +register_db_to_save ironic_inspector + +# Inspector plugin and service registration +devstack_localrc base enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector +devstack_localrc base enable_service ironic-inspector ironic-inspector-dhcp + +devstack_localrc target enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector +devstack_localrc target enable_service ironic-inspector ironic-inspector-dhcp diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh new file mode 100755 index 0000000..59e0cae --- /dev/null +++ b/devstack/upgrade/shutdown.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# based on Ironic/devstack/upgrade/shutdown.sh + +set -o errexit + +source $GRENADE_DIR/grenaderc +source $GRENADE_DIR/functions + +# We need base DevStack functions for this +source $BASE_DEVSTACK_DIR/functions +source $BASE_DEVSTACK_DIR/stackrc # needed for status directory +source $BASE_DEVSTACK_DIR/lib/tls +source $BASE_DEVSTACK_DIR/lib/apache + +# Inspector relies on a couple of Ironic variables +source $TARGET_RELEASE_DIR/ironic/devstack/lib/ironic + +# Keep track of the DevStack directory +INSPECTOR_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) +source $INSPECTOR_DEVSTACK_DIR/plugin.sh + + +set -o xtrace + +stop_inspector +if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then + stop_inspector_dhcp +fi diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh new file mode 100755 index 0000000..0387d11 --- /dev/null +++ b/devstack/upgrade/upgrade.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +## based on Ironic/devstack/upgrade/upgrade.sh + +# ``upgrade-inspector`` + +echo "*********************************************************************" +echo "Begin $0" +echo "*********************************************************************" + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + echo "*********************************************************************" + echo "ERROR: Abort $0" + echo "*********************************************************************" + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM + +# Keep track of the grenade directory +RUN_DIR=$(cd $(dirname "$0") && pwd) + +# Source params +source $GRENADE_DIR/grenaderc + +# Import common functions +source $GRENADE_DIR/functions + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Upgrade Inspector +# ================= + +# Duplicate some setup bits from target DevStack +source $TARGET_DEVSTACK_DIR/stackrc +source $TARGET_DEVSTACK_DIR/lib/tls +source $TARGET_DEVSTACK_DIR/lib/nova +source $TARGET_DEVSTACK_DIR/lib/neutron-legacy +source $TARGET_DEVSTACK_DIR/lib/apache +source $TARGET_DEVSTACK_DIR/lib/keystone +source $TARGET_DEVSTACK_DIR/lib/database + +# Inspector relies on couple of Ironic variables +source $TARGET_RELEASE_DIR/ironic/devstack/lib/ironic + +# Keep track of the DevStack directory +INSPECTOR_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) +INSPECTOR_PLUGIN=$INSPECTOR_DEVSTACK_DIR/plugin.sh +source $INSPECTOR_PLUGIN + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +initialize_database_backends + +function is_nova_migration { + # Determine whether we're "upgrading" from another compute driver + _ironic_old_driver=$(source $BASE_DEVSTACK_DIR/functions; source $BASE_DEVSTACK_DIR/localrc; echo $VIRT_DRIVER) + [ "$_ironic_old_driver" != "ironic" ] +} + +# Duplicate all required devstack setup that is needed before starting +# Inspector during a sideways upgrade, where we are migrating from an +# devstack environment without Inspector. +function init_inspector { + # We need to source credentials here but doing so in the gate will unset + # HOST_IP. + local tmp_host_ip=$HOST_IP + source $TARGET_DEVSTACK_DIR/openrc admin admin + HOST_IP=$tmp_host_ip + IRONIC_BAREMETAL_BASIC_OPS="True" + $TARGET_DEVSTACK_DIR/tools/install_prereqs.sh + recreate_database ironic_inspector utf8 + $INSPECTOR_PLUGIN stack install + $INSPECTOR_PLUGIN stack post-config + $INSPECTOR_PLUGIN stack extra +} + +function wait_for_keystone { + if ! wait_for_service $SERVICE_TIMEOUT ${KEYSTONE_AUTH_URI}/v$IDENTITY_API_VERSION/; then + die $LINENO "keystone did not start" + fi +} + +# Save current config files for posterity +if [[ -d $IRONIC_INSPECTOR_CONF_DIR ]] && [[ ! -d $SAVE_DIR/etc.inspector ]] ; then + cp -pr $IRONIC_INSPECTOR_CONF_DIR $SAVE_DIR/etc.inspector +fi + +stack_install_service ironic-inspector + +if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then + stack_install_service ironic-inspector-dhcp +fi + + +# FIXME(milan): using Ironic's detection; not sure whether it's needed +# If we are sideways upgrading and migrating from a base deployed with +# VIRT_DRIVER=fake, we need to run Inspector install, config and init +# code from devstack. +if is_nova_migration ; then + init_inspector +fi + +sync_inspector_database + +# calls upgrade inspector for specific release +upgrade_project ironic-inspector $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH + + +start_inspector + +if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then + start_inspector_dhcp +fi + +# Don't succeed unless the services come up +ensure_services_started ironic-inspector +ensure_logs_exist ironic-inspector + +if [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]]; then + ensure_services_started dnsmasq + ensure_logs_exist ironic-inspector-dhcp +fi + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End $0" +echo "*********************************************************************" From c98d3f479b7ef7f6ad6fce8aa3633f27eed41c2b Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 1 Jul 2016 13:51:05 +0200 Subject: [PATCH 57/83] Allow customizing ramdisk logs file names and simplify the default The template for ramdisk logs file names can now be changed via the configuration. The default now contains only node UUID and datetime. Also a proper tar.gz extension is appended to avoid confusion. Depends-On: Ie507e2e5c58cffa255bbfb2fa5ffb95cb98ed8c4 Change-Id: I738f9bd35705d0d11c95b0164186ed0b366b5252 --- example.conf | 102 +++++++++--------- ironic_inspector/conf.py | 8 ++ ironic_inspector/process.py | 15 ++- ironic_inspector/test/unit/test_process.py | 20 +++- ...tom-ramdisk-log-name-dac06822c38657e7.yaml | 8 ++ 5 files changed, 96 insertions(+), 57 deletions(-) create mode 100644 releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml diff --git a/example.conf b/example.conf index 64c8037..6132eb4 100644 --- a/example.conf +++ b/example.conf @@ -408,15 +408,15 @@ # From ironic_inspector.common.ironic # -# Authentication URL (unknown value) +# Authentication URL (string value) #auth_url = # Method to use for authentication: noauth or keystone. (string value) # Allowed values: keystone, noauth #auth_strategy = keystone -# Authentication type to load (unknown value) -# Deprecated group/name - [DEFAULT]/auth_plugin +# Authentication type to load (string value) +# Deprecated group/name - [ironic]/auth_plugin #auth_type = # PEM encoded Certificate Authority to use when verifying HTTPs @@ -428,18 +428,18 @@ # Optional domain ID to use with v3 and v2 parameters. It will be used # for both the user and project domain in v3 and ignored in v2 -# authentication. (unknown value) +# authentication. (string value) #default_domain_id = # Optional domain name to use with v3 API and v2 parameters. It will # be used for both the user and project domain in v3 and ignored in v2 -# authentication. (unknown value) +# authentication. (string value) #default_domain_name = -# Domain ID to scope to (unknown value) +# Domain ID to scope to (string value) #domain_id = -# Domain name to scope to (unknown value) +# Domain name to scope to (string value) #domain_name = # DEPRECATED: Keystone admin endpoint. DEPRECATED: Use @@ -510,50 +510,50 @@ # Reason: Use options presented by configured keystone auth plugin. #os_username = -# User's password (unknown value) +# User's password (string value) #password = -# Domain ID containing project (unknown value) +# Domain ID containing project (string value) #project_domain_id = -# Domain name containing project (unknown value) +# Domain name containing project (string value) #project_domain_name = -# Project ID to scope to (unknown value) -# Deprecated group/name - [DEFAULT]/tenant-id +# Project ID to scope to (string value) +# Deprecated group/name - [ironic]/tenant-id #project_id = -# Project name to scope to (unknown value) -# Deprecated group/name - [DEFAULT]/tenant-name +# Project name to scope to (string value) +# Deprecated group/name - [ironic]/tenant-name #project_name = # Interval between retries in case of conflict error (HTTP 409). # (integer value) #retry_interval = 2 -# Tenant ID (unknown value) +# Tenant ID (string value) #tenant_id = -# Tenant Name (unknown value) +# Tenant Name (string value) #tenant_name = # Timeout value for http requests (integer value) #timeout = -# Trust ID (unknown value) +# Trust ID (string value) #trust_id = -# User's domain id (unknown value) +# User's domain id (string value) #user_domain_id = -# User's domain name (unknown value) +# User's domain name (string value) #user_domain_name = -# User id (unknown value) +# User id (string value) #user_id = -# Username (unknown value) -# Deprecated group/name - [DEFAULT]/user-name +# Username (string value) +# Deprecated group/name - [ironic]/user-name #username = @@ -609,7 +609,7 @@ # Optionally specify a list of memcached server(s) to use for caching. # If left undefined, tokens will instead be cached in-process. (list # value) -# Deprecated group/name - [DEFAULT]/memcache_servers +# Deprecated group/name - [keystone_authtoken]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the @@ -693,11 +693,11 @@ # value) #hash_algorithms = md5 -# Authentication type to load (unknown value) -# Deprecated group/name - [DEFAULT]/auth_plugin +# Authentication type to load (string value) +# Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = -# Config Section from which to load plugin specific options (unknown +# Config Section from which to load plugin specific options (string # value) #auth_section = @@ -785,6 +785,12 @@ # processing. (boolean value) #log_bmc_address = true +# File name template for storing ramdisk logs. The following +# replacements can be used: {uuid} - node UUID or "unknown", {bmc} - +# node BMC address or "unknown", {dt} - current UTC date and time, +# {mac} - PXE booting MAC or "unknown". (string value) +#ramdisk_logs_filename_format = {uuid}_bmc_{bmc}-{dt:%Y.%m.%d_%H.%M.%S_%f}.tar.gz + [swift] @@ -792,11 +798,11 @@ # From ironic_inspector.common.swift # -# Authentication URL (unknown value) +# Authentication URL (string value) #auth_url = -# Authentication type to load (unknown value) -# Deprecated group/name - [DEFAULT]/auth_plugin +# Authentication type to load (string value) +# Deprecated group/name - [swift]/auth_plugin #auth_type = # PEM encoded Certificate Authority to use when verifying HTTPs @@ -811,22 +817,22 @@ # Optional domain ID to use with v3 and v2 parameters. It will be used # for both the user and project domain in v3 and ignored in v2 -# authentication. (unknown value) +# authentication. (string value) #default_domain_id = # Optional domain name to use with v3 API and v2 parameters. It will # be used for both the user and project domain in v3 and ignored in v2 -# authentication. (unknown value) +# authentication. (string value) #default_domain_name = # Number of seconds that the Swift object will last before being # deleted. (set to 0 to never delete the object). (integer value) #delete_after = 0 -# Domain ID to scope to (unknown value) +# Domain ID to scope to (string value) #domain_id = -# Domain name to scope to (unknown value) +# Domain name to scope to (string value) #domain_name = # Verify HTTPS connections. (boolean value) @@ -860,44 +866,44 @@ # Swift service type. (string value) #os_service_type = object-store -# User's password (unknown value) +# User's password (string value) #password = -# Domain ID containing project (unknown value) +# Domain ID containing project (string value) #project_domain_id = -# Domain name containing project (unknown value) +# Domain name containing project (string value) #project_domain_name = -# Project ID to scope to (unknown value) -# Deprecated group/name - [DEFAULT]/tenant-id +# Project ID to scope to (string value) +# Deprecated group/name - [swift]/tenant-id #project_id = -# Project name to scope to (unknown value) -# Deprecated group/name - [DEFAULT]/tenant-name +# Project name to scope to (string value) +# Deprecated group/name - [swift]/tenant-name #project_name = -# Tenant ID (unknown value) +# Tenant ID (string value) #tenant_id = -# Tenant Name (unknown value) +# Tenant Name (string value) #tenant_name = # Timeout value for http requests (integer value) #timeout = -# Trust ID (unknown value) +# Trust ID (string value) #trust_id = -# User's domain id (unknown value) +# User's domain id (string value) #user_domain_id = -# User's domain name (unknown value) +# User's domain name (string value) #user_domain_name = -# User id (unknown value) +# User id (string value) #user_id = -# Username (unknown value) -# Deprecated group/name - [DEFAULT]/user-name +# Username (string value) +# Deprecated group/name - [swift]/user-name #username = diff --git a/ironic_inspector/conf.py b/ironic_inspector/conf.py index 8f8748a..ecb74af 100644 --- a/ironic_inspector/conf.py +++ b/ironic_inspector/conf.py @@ -129,6 +129,14 @@ PROCESSING_OPTS = [ default=True, help='Whether to log node BMC address with every message ' 'during processing.'), + cfg.StrOpt('ramdisk_logs_filename_format', + default='{uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz', + help='File name template for storing ramdisk logs. The ' + 'following replacements can be used: ' + '{uuid} - node UUID or "unknown", ' + '{bmc} - node BMC address or "unknown", ' + '{dt} - current UTC date and time, ' + '{mac} - PXE booting MAC or "unknown".'), ] diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 83b7ddc..674a055 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -41,7 +41,6 @@ _CREDENTIALS_WAIT_RETRIES = 10 _CREDENTIALS_WAIT_PERIOD = 3 _STORAGE_EXCLUDED_KEYS = {'logs'} _UNPROCESSED_DATA_STORE_SUFFIX = 'UNPROCESSED' -_DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f' def _store_logs(introspection_data, node_info): @@ -58,10 +57,16 @@ def _store_logs(introspection_data, node_info): data=introspection_data, node_info=node_info) return - time_fmt = datetime.datetime.utcnow().strftime(_DATETIME_FORMAT) - bmc_address = (utils.get_ipmi_address_from_data(introspection_data) - or 'unknown') - file_name = 'bmc_%s_%s' % (bmc_address, time_fmt) + fmt_args = { + 'uuid': node_info.uuid if node_info is not None else 'unknown', + 'mac': (utils.get_pxe_mac(introspection_data) or + 'unknown').replace(':', ''), + 'dt': datetime.datetime.utcnow(), + 'bmc': (utils.get_ipmi_address_from_data(introspection_data) or + 'unknown') + } + + file_name = CONF.processing.ramdisk_logs_filename_format.format(**fmt_args) try: if not os.path.exists(CONF.processing.ramdisk_logs_dir): diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index befc4ed..3a81631 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -258,13 +258,15 @@ class TestStoreLogs(BaseProcessTest): self.logs = b'test logs' self.data['logs'] = base64.b64encode(self.logs) - def _check_contents(self): + def _check_contents(self, name=None): files = os.listdir(self.tempdir) self.assertEqual(1, len(files)) filename = files[0] - self.assertTrue(filename.startswith('bmc_%s_' % self.bmc_address), - '%s does not start with bmc_%s' - % (filename, self.bmc_address)) + if name is None: + self.assertTrue(filename.startswith(self.uuid), + '%s does not start with uuid' % filename) + else: + self.assertEqual(name, filename) with open(os.path.join(self.tempdir, filename), 'rb') as fp: self.assertEqual(self.logs, fp.read()) @@ -323,6 +325,16 @@ class TestStoreLogs(BaseProcessTest): self.assertRaises(utils.Error, process.process, self.data) self._check_contents() + def test_store_custom_name(self, hook_mock): + CONF.set_override('ramdisk_logs_filename_format', + '{uuid}-{bmc}-{mac}', + 'processing') + self.process_mock.side_effect = utils.Error('boom') + self.assertRaises(utils.Error, process.process, self.data) + self._check_contents(name='%s-%s-%s' % (self.uuid, + self.bmc_address, + self.pxe_mac.replace(':', ''))) + class TestProcessNode(BaseTest): def setUp(self): diff --git a/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml b/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml new file mode 100644 index 0000000..ff15bad --- /dev/null +++ b/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml @@ -0,0 +1,8 @@ +--- +features: + - File name for stored ramdisk logs can now be customized via + "ramdisk_logs_filename_format" option. +upgrade: + - The default file name for stored ramdisk logs was change to contain only + node UUID (if known) and the current date time. A proper ".tar.gz" + extension is now appended. From ba0075e0eaaa83cd8b96b52ab57ebda4825f67cf Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 7 Jul 2016 12:56:51 +0200 Subject: [PATCH 58/83] [devstack] Do not hardcode coreos ramdisk when building from source Change-Id: Ic0b9abd58a21ff60f486da2c4d1194406ad47813 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 80deec9..e911c90 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -102,7 +102,7 @@ function prepare_tftp { IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-debug=1" if [[ "$IRONIC_INSPECTOR_BUILD_RAMDISK" == "True" ]]; then if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then - build_ipa_coreos_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" + build_ipa_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" fi else # download the agent image tarball From 39644db177a64047397caf524a35032db3f2687e Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Thu, 7 Jul 2016 10:09:05 +0300 Subject: [PATCH 59/83] Make Ironic variables visible inside exercise.sh This patch make Ironic environment variables like PRIVATE_NETWORK_NAME visible during launching tests. Change-Id: If936efe079fd3b88eaa56bc0d96b7ef6854d3e48 --- devstack/exercise.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/devstack/exercise.sh b/devstack/exercise.sh index f11fa83..893aaac 100755 --- a/devstack/exercise.sh +++ b/devstack/exercise.sh @@ -1,6 +1,15 @@ #!/bin/bash -set -eux +set -ex + +# NOTE(vsaienko) this script is launched with sudo. +# Only exported variables are passed here. +# Source to make sure all vars are available. +STACK_ROOT="$(dirname "$0")/../../" +source "$STACK_ROOT/devstack/stackrc" +source "$STACK_ROOT/ironic/devstack/lib/ironic" + +set -u INTROSPECTION_SLEEP=${INTROSPECTION_SLEEP:-30} export IRONIC_API_VERSION=${IRONIC_API_VERSION:-latest} From 54cfca7a6b9e3c5df1eb6a1e923bbfe626d1da32 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Fri, 8 Jul 2016 13:58:58 +0200 Subject: [PATCH 60/83] Skip test_init_failed_processing_hook test https://review.openstack.org/#/c/337043/ makes this test to fail, but proper fix in https://review.openstack.org/339457 requires release of stevedore > 1.15.0 not released yet. This patch skips the test temporarily until new release is added in global requirements. Change-Id: Id23efad9c392fc70470d996e37d378efeaf65491 Partial-Bug: #1600141 --- ironic_inspector/test/unit/test_main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index 0906e03..cb51afc 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -626,6 +626,7 @@ class TestInit(test_base.BaseTest): self.service.init() self.assertFalse(mock_firewall.called) + @unittest.skip('skipped until stevedore > 1.15.0 is released') @mock.patch.object(main.LOG, 'critical') def test_init_failed_processing_hook(self, mock_log, mock_node_cache, mock_get_client, mock_auth, From 604cdd3a221b3cb913869affa19a0ce56c4b6b22 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 9 Jul 2016 19:20:55 +0000 Subject: [PATCH 61/83] Updated from global requirements Change-Id: I2d2c0cb4a04fdc5a4cbf776365c25921103fa49c --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 5646a4b..3101760 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,13 +15,13 @@ pbr>=1.6 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.10.0 # Apache-2.0 +oslo.config>=3.12.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.utils>=3.14.0 # Apache-2.0 +oslo.utils>=3.15.0 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.10.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From cd08d79e01173b2f9e5a60eef6ef245d3f11454f Mon Sep 17 00:00:00 2001 From: ji-xuepeng Date: Sun, 10 Jul 2016 13:43:36 +0800 Subject: [PATCH 62/83] remove unused LOG This is to remove unused LOG to keep code clean. Change-Id: If329acd9b7d1b9c0f1b7abc282f96d1543324003 --- ironic_inspector/common/swift.py | 3 --- ironic_inspector/plugins/rules.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/ironic_inspector/common/swift.py b/ironic_inspector/common/swift.py index 12ba20a..2b3bd6b 100644 --- a/ironic_inspector/common/swift.py +++ b/ironic_inspector/common/swift.py @@ -16,7 +16,6 @@ import json from oslo_config import cfg -from oslo_log import log import six from swiftclient import client as swift_client from swiftclient import exceptions as swift_exceptions @@ -28,8 +27,6 @@ from ironic_inspector import utils CONF = cfg.CONF -LOG = log.getLogger('ironic_inspector.common.swift') - SWIFT_GROUP = 'swift' SWIFT_OPTS = [ cfg.IntOpt('max_retries', diff --git a/ironic_inspector/plugins/rules.py b/ironic_inspector/plugins/rules.py index 436965f..adc1942 100644 --- a/ironic_inspector/plugins/rules.py +++ b/ironic_inspector/plugins/rules.py @@ -23,9 +23,6 @@ from ironic_inspector.plugins import base from ironic_inspector import utils -LOG = utils.getProcessingLogger(__name__) - - def coerce(value, expected): if isinstance(expected, float): return float(value) From 53f9534ece265de0705b5e051b4f82dc46595e2d Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Fri, 15 Jul 2016 11:32:42 +0800 Subject: [PATCH 63/83] [devstack]Only cleanup tftp directory if ipxe disabled There's no need to cleanup ironic-inspector.* if ipxe enabled Change-Id: I07f981773441914105e006ef2133ae4cc05b1df9 --- devstack/plugin.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index e911c90..9a4f8c2 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -274,10 +274,11 @@ function create_ironic_inspector_cache_dir { } function cleanup_inspector { - rm -f $IRONIC_TFTPBOOT_DIR/pxelinux.cfg/default - rm -f $IRONIC_TFTPBOOT_DIR/ironic-inspector.* if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then rm -f $IRONIC_HTTP_DIR/ironic-inspector.* + else + rm -f $IRONIC_TFTPBOOT_DIR/pxelinux.cfg/default + rm -f $IRONIC_TFTPBOOT_DIR/ironic-inspector.* fi sudo rm -f /etc/sudoers.d/ironic-inspector-rootwrap sudo rm -rf $IRONIC_INSPECTOR_AUTH_CACHE_DIR From 8f4157c59cdbfa07284947451af815e5b9c92fa6 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 15 Jul 2016 03:41:53 +0000 Subject: [PATCH 64/83] Updated from global requirements Change-Id: I177abd230c0010ff6d51f612b47f59370f0c1cd4 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3101760..bf10dd6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ alembic>=0.8.4 # MIT Babel>=2.3.4 # BSD eventlet!=0.18.3,>=0.18.2 # MIT Flask!=0.11,<1.0,>=0.10 # BSD -futurist>=0.11.0 # Apache-2.0 +futurist!=0.15.0,>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT keystoneauth1>=2.7.0 # Apache-2.0 From 5fcb4da558297ad62c28b2519268c46f1dd91221 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Fri, 15 Jul 2016 15:15:22 +0800 Subject: [PATCH 65/83] [devstack]Switch to pip_install_gr for inspector client Currently python-ironic-inspector-client is already in global-requirements Change-Id: I651ccbd22f57f0229f4137607ebf6b93fb96fdad --- devstack/plugin.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index e911c90..9353831 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -66,8 +66,7 @@ function install_inspector_client { git_clone_by_name python-ironic-inspector-client setup_dev_lib python-ironic-inspector-client else - # TODO(dtantsur): switch to pip_install_gr - pip_install python-ironic-inspector-client + pip_install_gr python-ironic-inspector-client fi } From dc3e7f0966b8f302bc045d2d5c9ddd3cf3f83f85 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Fri, 15 Jul 2016 16:48:11 +0800 Subject: [PATCH 66/83] [devstack]Remove unneeded enable_service in example.local.conf As we use enable_plugin for ironic, there's no need to enalbe ironic related services again. Change-Id: Iaf7e37b0a3c1d2c7883dddb44c31ee40f07e5b03 --- devstack/example.local.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/devstack/example.local.conf b/devstack/example.local.conf index 7ee7f87..05765b6 100644 --- a/devstack/example.local.conf +++ b/devstack/example.local.conf @@ -1,5 +1,4 @@ [[local|localrc]] -enable_service ironic ir-api ir-cond disable_service n-net n-novnc enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta enable_service s-proxy s-object s-container s-account From fa71b4ec5ccfa5904879f0dd243e3cf7c841f2f7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sun, 17 Jul 2016 23:53:30 +0000 Subject: [PATCH 67/83] Updated from global requirements Change-Id: I161da80586bb61a8a7e23d4dd8a28350e39d0afd --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bf10dd6..ad05aa8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,5 +23,5 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.utils>=3.15.0 # Apache-2.0 six>=1.9.0 # MIT -stevedore>=1.10.0 # Apache-2.0 +stevedore>=1.16.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From 8b83d8b960aef1a2c7f606442ba5fe6d96d089d1 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Mon, 18 Jul 2016 13:08:15 +0800 Subject: [PATCH 68/83] [doc]Add 'ipa-debug=1' to installation document Change-Id: I76b5f744153ae43c5bc7de5254c302aefb59c737 --- doc/source/install.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 1284802..2cf248e 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -215,7 +215,8 @@ As for PXE boot environment, you'll need: .. note:: While ``systemd.journald.forward_to_console=yes`` is not actually required, it will substantially simplify debugging if something - goes wrong. + goes wrong. You can also enable IPA debug logging by appending + ``ipa-debug=1``. IPA is pluggable: you can insert introspection plugins called *collectors* into it. For example, to enable a very handy ``logs`` collector From 4735ab87f3d15002ead82f6b1a4514f8cc1abe02 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Jul 2016 19:39:15 +0800 Subject: [PATCH 69/83] Update example.conf Change-Id: I9ca9bc0cb82e388987342a3a597c15524bab8808 --- example.conf | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/example.conf b/example.conf index 6132eb4..eb363d3 100644 --- a/example.conf +++ b/example.conf @@ -92,6 +92,7 @@ # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) +# Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = @@ -253,8 +254,12 @@ # From oslo.db # -# The file name to use with SQLite. (string value) +# DEPRECATED: The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use config option connection or slave_connection to +# connect the database. #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) @@ -563,7 +568,14 @@ # From keystonemiddleware.auth_token # -# Complete public Identity API endpoint. (string value) +# Complete "public" Identity API endpoint. This endpoint should not be +# an "admin" endpoint, as it should be accessible by all end users. +# Unauthenticated clients are redirected to this endpoint to +# authenticate. Although this endpoint should ideally be unversioned, +# client support in the wild varies. If you're using a versioned v2 +# endpoint here, then this should *not* be the same endpoint the +# service user utilizes for validating tokens, because normal end +# users may not be able to reach that endpoint. (string value) #auth_uri = # API version of the admin Identity API endpoint. (string value) @@ -789,7 +801,7 @@ # replacements can be used: {uuid} - node UUID or "unknown", {bmc} - # node BMC address or "unknown", {dt} - current UTC date and time, # {mac} - PXE booting MAC or "unknown". (string value) -#ramdisk_logs_filename_format = {uuid}_bmc_{bmc}-{dt:%Y.%m.%d_%H.%M.%S_%f}.tar.gz +#ramdisk_logs_filename_format = {uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz [swift] From a97c8cb6a35e84f470c95d93a241842c36cb8644 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Jul 2016 19:42:50 +0800 Subject: [PATCH 70/83] Add config to skip power off after introspection This adds configuration option 'processing.power_off' defaulting to True, which will prevent powering off the node after introspection Change-Id: I16eb6b73fd57e84175bbce81c79e432ed8d1d3fa Closes-Bug: #1488534 --- example.conf | 3 +++ ironic_inspector/conf.py | 3 +++ ironic_inspector/process.py | 3 ++- ironic_inspector/test/unit/test_process.py | 8 ++++++++ ...support-for-long-running-ramdisk-ffee3c177c56cebb.yaml | 4 ++++ 5 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml diff --git a/example.conf b/example.conf index eb363d3..0286c2b 100644 --- a/example.conf +++ b/example.conf @@ -803,6 +803,9 @@ # {mac} - PXE booting MAC or "unknown". (string value) #ramdisk_logs_filename_format = {uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz +# Whether to power off a node after introspection. (boolean value) +#power_off = true + [swift] diff --git a/ironic_inspector/conf.py b/ironic_inspector/conf.py index ecb74af..07cea1f 100644 --- a/ironic_inspector/conf.py +++ b/ironic_inspector/conf.py @@ -137,6 +137,9 @@ PROCESSING_OPTS = [ '{bmc} - node BMC address or "unknown", ' '{dt} - current UTC date and time, ' '{mac} - PXE booting MAC or "unknown".'), + cfg.BoolOpt('power_off', + default=True, + help='Whether to power off a node after introspection.'), ] diff --git a/ironic_inspector/process.py b/ironic_inspector/process.py index 674a055..083fee4 100644 --- a/ironic_inspector/process.py +++ b/ironic_inspector/process.py @@ -281,7 +281,8 @@ def _process_node(node, introspection_data, node_info): resp['ipmi_username'] = new_username resp['ipmi_password'] = new_password else: - utils.executor().submit(_finish, ironic, node_info, introspection_data) + utils.executor().submit(_finish, ironic, node_info, introspection_data, + power_off=CONF.processing.power_off) return resp diff --git a/ironic_inspector/test/unit/test_process.py b/ironic_inspector/test/unit/test_process.py index 3a81631..ee726e9 100644 --- a/ironic_inspector/test/unit/test_process.py +++ b/ironic_inspector/test/unit/test_process.py @@ -478,6 +478,14 @@ class TestProcessNode(BaseTest): self.assertTrue(self.cli.node.set_power_state.called) finished_mock.assert_called_once_with(self.node_info) + @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) + def test_no_power_off(self, finished_mock): + CONF.set_override('power_off', False, 'processing') + process._process_node(self.node, self.data, self.node_info) + + self.assertFalse(self.cli.node.set_power_state.called) + finished_mock.assert_called_once_with(self.node_info) + @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') diff --git a/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml b/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml new file mode 100644 index 0000000..aad718e --- /dev/null +++ b/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add configuration option `processing.power_off` defaulting to True, + which allows to leave nodes powered on after introspection. From 7b29eaf4a2ed9e53686afc7b6c47ead12e4988f3 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Wed, 20 Jul 2016 10:56:33 +0800 Subject: [PATCH 71/83] Fix improperly placed firewall.update_filters when aborting Currently, firewall.update_filters() is called before introspection finished, which has no effect as it white-lists MAC addresses that are under introspection. Change-Id: I789e39a86dc72470b80167e53f1755b506ca6f44 Closes-Bug: #1548806 --- ironic_inspector/introspect.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ironic_inspector/introspect.py b/ironic_inspector/introspect.py index f819c8f..e698298 100644 --- a/ironic_inspector/introspect.py +++ b/ironic_inspector/introspect.py @@ -201,15 +201,6 @@ def _abort(node_info, ironic): node_info.release_lock() return - # block this node from PXE Booting the introspection image - try: - firewall.update_filters(ironic) - except Exception as exc: - # Note(mkovacik): this will be retried in firewall update - # periodic task; we continue aborting - LOG.warning(_LW('Failed to update firewall filters: %s'), exc, - node_info=node_info) - # finish the introspection LOG.debug('Forcing power-off', node_info=node_info) try: @@ -219,4 +210,13 @@ def _abort(node_info, ironic): node_info=node_info) node_info.finished(error=_('Canceled by operator')) + + # block this node from PXE Booting the introspection image + try: + firewall.update_filters(ironic) + except Exception as exc: + # Note(mkovacik): this will be retried in firewall update + # periodic task; we continue aborting + LOG.warning(_LW('Failed to update firewall filters: %s'), exc, + node_info=node_info) LOG.info(_LI('Introspection aborted'), node_info=node_info) From 316e5d86e4823ec7ca2bbb44fd025b6c43382259 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Tue, 19 Jul 2016 19:35:18 +0300 Subject: [PATCH 72/83] Tempest: wrap instance actions into inspector methods This commit I4fe31ecae3393abc2779a5e80e348899f9113f1b broke inspector tempest tests, it changes boot_instance and terminate_instance signature. This change redefine action methods Change-Id: If6a9b300bd22e7b62b7e53763cb0328ad30f11c7 --- .../test/inspector_tempest_plugin/tests/manager.py | 6 ++++++ .../test/inspector_tempest_plugin/tests/test_basic.py | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py index dd2a18e..b62473c 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py @@ -94,6 +94,12 @@ class InspectorScenarioTest(BaremetalScenarioTest): return os.path.join(base_path, "inspector_tempest_plugin", "rules", rule_file) + def boot_instance(self): + return super(InspectorScenarioTest, self).boot_instance() + + def terminate_instance(self, instance): + return super(InspectorScenarioTest, self).terminate_instance(instance) + # TODO(aarefiev): switch to call_until_true def wait_for_introspection_finished(self, node_ids): """Waits for introspection of baremetal nodes to finish. diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py index 6830b78..0e11cf5 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py @@ -145,5 +145,5 @@ class InspectorBasicTest(manager.InspectorScenarioTest): self.wait_for_nova_aware_of_bvms() self.add_keypair() - self.boot_instance() - self.terminate_instance() + ins, _node = self.boot_instance() + self.terminate_instance(ins) From 17e01aab465565f64ba42b22f415c5e688c6152d Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Wed, 20 Jul 2016 23:50:03 +0800 Subject: [PATCH 73/83] Remove redundant white space Change-Id: Ifdad27359118d86362708e0e47e69ae5920f0993 --- ironic_inspector/node_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ironic_inspector/node_cache.py b/ironic_inspector/node_cache.py index e06a084..2b17375 100644 --- a/ironic_inspector/node_cache.py +++ b/ironic_inspector/node_cache.py @@ -586,7 +586,7 @@ def clean_up(): return uuids -def create_node(driver, ironic=None, **attributes): +def create_node(driver, ironic=None, **attributes): """Create ironic node and cache it. * Create new node in ironic. From 823f6d26a20a1d00b8b0ca788a2d45279189ab59 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 1 Jul 2016 14:22:51 +0200 Subject: [PATCH 74/83] Add a simple smoke test to be run in the grenade gate This test only runs introspection on one node, nothing else. Also make sure tempest gets our and ironic plugin. Depends-On: Ia2a5b9cc535c7c46728eee6284a36340745e9043 Change-Id: Id12b6cc75977c32f0a9e1ada8ff954b8f4bc2e41 --- devstack/upgrade/settings | 3 + .../inspector_tempest_plugin/tests/manager.py | 46 +++++++++++ .../tests/test_basic.py | 81 ++++++++----------- 3 files changed, 81 insertions(+), 49 deletions(-) diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings index d8bb6f6..3efcc8c 100644 --- a/devstack/upgrade/settings +++ b/devstack/upgrade/settings @@ -1,3 +1,6 @@ +# Enable our tests; also enable ironic tempest plugin as we depend on it. +export TEMPEST_PLUGINS="/opt/stack/new/ironic /opt/stack/new/ironic-inspector" + # Enabling Inspector grenade plug-in # Based on Ironic/devstack/grenade/settings register_project_for_upgrade ironic-inspector diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py index b62473c..6d0f7d2 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/manager.py @@ -14,11 +14,17 @@ import os import time +import tempest from tempest import config +from tempest.lib.common.api_version_utils import LATEST_MICROVERSION from ironic_inspector.test.inspector_tempest_plugin import exceptions from ironic_inspector.test.inspector_tempest_plugin.services import \ introspection_client +from ironic_tempest_plugin.tests.api.admin.api_microversion_fixture import \ + APIMicroversionFixture as IronicMicroversionFixture +from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ + BaremetalProvisionStates from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ BaremetalScenarioTest @@ -29,8 +35,12 @@ CONF = config.CONF class InspectorScenarioTest(BaremetalScenarioTest): """Provide harness to do Inspector scenario tests.""" + wait_provisioning_state_interval = 15 + credentials = ['primary', 'admin'] + ironic_api_version = LATEST_MICROVERSION + @classmethod def setup_clients(cls): super(InspectorScenarioTest, cls).setup_clients() @@ -39,7 +49,15 @@ class InspectorScenarioTest(BaremetalScenarioTest): def setUp(self): super(InspectorScenarioTest, self).setUp() + # we rely on the 'available' provision_state; using latest + # microversion + self.useFixture(IronicMicroversionFixture(self.ironic_api_version)) self.flavor = self.baremetal_flavor() + self.node_ids = {node['uuid'] for node in + self.node_filter(filter=lambda node: + node['provision_state'] == + BaremetalProvisionStates.AVAILABLE)} + self.rule_purge() def item_filter(self, list_method, show_method, filter=lambda item: True, items=None): @@ -144,3 +162,31 @@ class InspectorScenarioTest(BaremetalScenarioTest): {'stats': stats, 'timeout': timeout}) raise exceptions.HypervisorUpdateTimeout(message) + + def node_cleanup(self, node_id): + if (self.node_show(node_id)['provision_state'] == + BaremetalProvisionStates.AVAILABLE): + return + try: + self.baremetal_client.set_node_provision_state(node_id, 'provide') + except tempest.lib.exceptions.RestClientException: + # maybe node already cleaning or available + pass + + self.wait_provisioning_state( + node_id, [BaremetalProvisionStates.AVAILABLE, + BaremetalProvisionStates.NOSTATE], + timeout=CONF.baremetal.unprovision_timeout, + interval=self.wait_provisioning_state_interval) + + def introspect_node(self, node_id): + # in case there are properties remove those + patch = {('properties/%s' % key): None for key in + self.node_show(node_id)['properties']} + # reset any previous rule result + patch['extra/rule_success'] = None + self.node_update(node_id, patch) + + self.baremetal_client.set_node_provision_state(node_id, 'manage') + self.baremetal_client.set_node_provision_state(node_id, 'inspect') + self.addCleanup(self.node_cleanup, node_id) diff --git a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py index 0e11cf5..053bc9f 100644 --- a/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py +++ b/ironic_inspector/test/inspector_tempest_plugin/tests/test_basic.py @@ -10,63 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -import tempest - from tempest.config import CONF from tempest import test # noqa from ironic_inspector.test.inspector_tempest_plugin.tests import manager -from ironic_tempest_plugin.tests.api.admin.api_microversion_fixture import \ - APIMicroversionFixture as IronicMicroversionFixture from ironic_tempest_plugin.tests.scenario.baremetal_manager import \ BaremetalProvisionStates -from tempest.lib.common.api_version_utils import LATEST_MICROVERSION class InspectorBasicTest(manager.InspectorScenarioTest): - wait_provisioning_state_interval = 15 - - def node_cleanup(self, node_id): - if (self.node_show(node_id)['provision_state'] == - BaremetalProvisionStates.AVAILABLE): - return - try: - self.baremetal_client.set_node_provision_state(node_id, 'provide') - except tempest.lib.exceptions.RestClientException: - # maybe node already cleaning or available - pass - - self.wait_provisioning_state( - node_id, [BaremetalProvisionStates.AVAILABLE, - BaremetalProvisionStates.NOSTATE], - timeout=CONF.baremetal.unprovision_timeout, - interval=self.wait_provisioning_state_interval) - - def introspect_node(self, node_id): - # in case there are properties remove those - patch = {('properties/%s' % key): None for key in - self.node_show(node_id)['properties']} - # reset any previous rule result - patch['extra/rule_success'] = None - self.node_update(node_id, patch) - - self.baremetal_client.set_node_provision_state(node_id, 'manage') - self.baremetal_client.set_node_provision_state(node_id, 'inspect') - self.addCleanup(self.node_cleanup, node_id) - - def setUp(self): - super(InspectorBasicTest, self).setUp() - # we rely on the 'available' provision_state; using latest - # microversion - self.useFixture(IronicMicroversionFixture(LATEST_MICROVERSION)) - # avoid testing nodes that aren't available - self.node_ids = {node['uuid'] for node in - self.node_filter(filter=lambda node: - node['provision_state'] == - BaremetalProvisionStates.AVAILABLE)} - if not self.node_ids: - self.skipTest('no available nodes detected') - self.rule_purge() def verify_node_introspection_data(self, node): self.assertEqual('yes', node['extra']['rule_success']) @@ -98,7 +50,7 @@ class InspectorBasicTest(manager.InspectorScenarioTest): @test.services('baremetal', 'compute', 'image', 'network', 'object_storage') def test_baremetal_introspection(self): - """This smoke test case follows this basic set of operations: + """This smoke test case follows this set of operations: * Fetches expected properties from baremetal flavor * Removes all properties from nodes @@ -147,3 +99,34 @@ class InspectorBasicTest(manager.InspectorScenarioTest): self.add_keypair() ins, _node = self.boot_instance() self.terminate_instance(ins) + + +class InspectorSmokeTest(manager.InspectorScenarioTest): + + @test.idempotent_id('a702d1f1-88e4-42ce-88ef-cba2d9e3312e') + @test.attr(type='smoke') + @test.services('baremetal', 'compute', 'image', + 'network', 'object_storage') + def test_baremetal_introspection(self): + """This smoke test case follows this very basic set of operations: + + * Fetches expected properties from baremetal flavor + * Removes all properties from one node + * Sets the node to manageable state + * Inspects the node + * Sets the node to available state + + """ + # NOTE(dtantsur): we can't silently skip this test because it runs in + # grenade with several other tests, and we won't have any indication + # that it was not run. + assert self.node_ids, "No available nodes" + node_id = next(iter(self.node_ids)) + self.introspect_node(node_id) + + # settle down introspection + self.wait_for_introspection_finished([node_id]) + self.wait_provisioning_state( + node_id, 'manageable', + timeout=CONF.baremetal_introspection.ironic_sync_timeout, + interval=self.wait_provisioning_state_interval) From 7a2e1a248aebcfa7293a0ba2272debf879483371 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Mon, 25 Jul 2016 11:32:05 +0800 Subject: [PATCH 75/83] Combine multiple warning logs into one in create_ports Change-Id: Icb2b000ce3f0f5db89e90c05cf1bc42e511fd36d --- ironic_inspector/node_cache.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ironic_inspector/node_cache.py b/ironic_inspector/node_cache.py index 2b17375..2be0709 100644 --- a/ironic_inspector/node_cache.py +++ b/ironic_inspector/node_cache.py @@ -227,12 +227,15 @@ class NodeInfo(object): A warning is issued if port already exists on a node. """ + existing_macs = [] for mac in macs: if mac not in self.ports(): self._create_port(mac) else: - LOG.warning(_LW('Port %s already exists, skipping'), - mac, node_info=self) + existing_macs.append(mac) + if existing_macs: + LOG.warning(_LW('Did not create ports %s as they already exist'), + existing_macs, node_info=self) def ports(self): """Get Ironic port objects associated with the cached node record. From 4af672b8493a3580b4ff94d0416da7f839036231 Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Wed, 25 May 2016 15:19:56 +0100 Subject: [PATCH 76/83] Add GenericLocalLinkConnectionHook processing hook For Ironic multi-tenant networking support, we need to be able to discover the nodes local connectivity. To do this IPA can try to pull LLDP packets for every NIC. This patch adds a processing hook to handle the data from these packets in ironic-inspector so that we can populate the correct fields fields in Ironic. The generic lldp hook only handles the mandatory fields port id and chassis id, set on port_id and switch_id in local_link_connection. Further LLDP fields should be handled by additional vendor specific LLDP processing hooks, that populate the switch_info field in a non-generic way. Change-Id: I884eaaa9cc54cd08c21147da438b1dabc10d3a40 Related-Bug: #1526403 Depends-On: Ie655fd59b06de7b84fba3b438d5e4c2ecd8075c3 Depends-On: Idae9b1ede1797029da1bd521501b121957ca1f1a --- doc/source/usage.rst | 6 + ironic_inspector/node_cache.py | 52 ++++--- .../plugins/local_link_connection.py | 122 ++++++++++++++++ ironic_inspector/test/base.py | 6 +- .../test_plugins_local_link_connection.py | 138 ++++++++++++++++++ .../test/unit/test_plugins_rules.py | 7 +- .../add-lldp-plugin-4645596cb8b39fd3.yaml | 5 + setup.cfg | 1 + 8 files changed, 313 insertions(+), 24 deletions(-) create mode 100644 ironic_inspector/plugins/local_link_connection.py create mode 100644 ironic_inspector/test/unit/test_plugins_local_link_connection.py create mode 100644 releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml diff --git a/doc/source/usage.rst b/doc/source/usage.rst index 01bbe4f..790e2f4 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -214,6 +214,12 @@ Here are some plugins that can be additionally enabled: then the new format will be stored in the 'extra' key. The 'data' key is then deleted from the introspection data, as unless converted it's assumed unusable by introspection rules. +``local_link_connection`` + Processes LLDP data returned from inspection specifically looking for the + port ID and chassis ID, if found it configures the local link connection + information on the nodes Ironic ports with that data. To enable LLDP in the + inventory from IPA ``ipa-collect-lldp=1`` should be passed as a kernel + parameter to the IPA ramdisk. Refer to :ref:`contributing_link` for information on how to write your own plugin. diff --git a/ironic_inspector/node_cache.py b/ironic_inspector/node_cache.py index 2b17375..fe09301 100644 --- a/ironic_inspector/node_cache.py +++ b/ironic_inspector/node_cache.py @@ -216,25 +216,26 @@ class NodeInfo(object): self._attributes = None self._ironic = None - def node(self): + def node(self, ironic=None): """Get Ironic node object associated with the cached node record.""" if self._node is None: - self._node = ir_utils.get_node(self.uuid, ironic=self.ironic) + ironic = ironic or self.ironic + self._node = ir_utils.get_node(self.uuid, ironic=ironic) return self._node - def create_ports(self, macs): + def create_ports(self, macs, ironic=None): """Create one or several ports for this node. A warning is issued if port already exists on a node. """ for mac in macs: if mac not in self.ports(): - self._create_port(mac) + self._create_port(mac, ironic) else: LOG.warning(_LW('Port %s already exists, skipping'), mac, node_info=self) - def ports(self): + def ports(self, ironic=None): """Get Ironic port objects associated with the cached node record. This value is cached as well, use invalidate_cache() to clean. @@ -242,13 +243,15 @@ class NodeInfo(object): :return: dict MAC -> port object """ if self._ports is None: + ironic = ironic or self.ironic self._ports = {p.address: p for p in - self.ironic.node.list_ports(self.uuid, limit=0)} + ironic.node.list_ports(self.uuid, limit=0)} return self._ports - def _create_port(self, mac): + def _create_port(self, mac, ironic=None): + ironic = ironic or self.ironic try: - port = self.ironic.port.create(node_uuid=self.uuid, address=mac) + port = ironic.port.create(node_uuid=self.uuid, address=mac) except exceptions.Conflict: LOG.warning(_LW('Port %s already exists, skipping'), mac, node_info=self) @@ -258,14 +261,16 @@ class NodeInfo(object): else: self._ports[mac] = port - def patch(self, patches): + def patch(self, patches, ironic=None): """Apply JSON patches to a node. Refreshes cached node instance. :param patches: JSON patches to apply + :param ironic: Ironic client to use instead of self.ironic :raises: ironicclient exceptions """ + ironic = ironic or self.ironic # NOTE(aarefiev): support path w/o ahead forward slash # as Ironic cli does for patch in patches: @@ -273,14 +278,16 @@ class NodeInfo(object): patch['path'] = '/' + patch['path'] LOG.debug('Updating node with patches %s', patches, node_info=self) - self._node = self.ironic.node.update(self.uuid, patches) + self._node = ironic.node.update(self.uuid, patches) - def patch_port(self, port, patches): + def patch_port(self, port, patches, ironic=None): """Apply JSON patches to a port. :param port: port object or its MAC :param patches: JSON patches to apply + :param ironic: Ironic client to use instead of self.ironic """ + ironic = ironic or self.ironic ports = self.ports() if isinstance(port, str): port = ports[port] @@ -288,39 +295,45 @@ class NodeInfo(object): LOG.debug('Updating port %(mac)s with patches %(patches)s', {'mac': port.address, 'patches': patches}, node_info=self) - new_port = self.ironic.port.update(port.uuid, patches) + new_port = ironic.port.update(port.uuid, patches) ports[port.address] = new_port - def update_properties(self, **props): + def update_properties(self, ironic=None, **props): """Update properties on a node. :param props: properties to update + :param ironic: Ironic client to use instead of self.ironic """ + ironic = ironic or self.ironic patches = [{'op': 'add', 'path': '/properties/%s' % k, 'value': v} for k, v in props.items()] - self.patch(patches) + self.patch(patches, ironic) - def update_capabilities(self, **caps): + def update_capabilities(self, ironic=None, **caps): """Update capabilities on a node. - :param props: capabilities to update + :param caps: capabilities to update + :param ironic: Ironic client to use instead of self.ironic """ existing = ir_utils.capabilities_to_dict( self.node().properties.get('capabilities')) existing.update(caps) self.update_properties( + ironic=ironic, capabilities=ir_utils.dict_to_capabilities(existing)) - def delete_port(self, port): + def delete_port(self, port, ironic=None): """Delete port. :param port: port object or its MAC + :param ironic: Ironic client to use instead of self.ironic """ + ironic = ironic or self.ironic ports = self.ports() if isinstance(port, str): port = ports[port] - self.ironic.port.delete(port.uuid) + ironic.port.delete(port.uuid) del ports[port.address] def get_by_path(self, path): @@ -350,6 +363,7 @@ class NodeInfo(object): :raises: KeyError if value is not found and default is not set :raises: everything that patch() may raise """ + ironic = kwargs.pop("ironic", None) or self.ironic try: value = self.get_by_path(path) op = 'replace' @@ -363,7 +377,7 @@ class NodeInfo(object): ref_value = copy.deepcopy(value) value = func(value) if value != ref_value: - self.patch([{'op': op, 'path': path, 'value': value}]) + self.patch([{'op': op, 'path': path, 'value': value}], ironic) def add_node(uuid, **attributes): diff --git a/ironic_inspector/plugins/local_link_connection.py b/ironic_inspector/plugins/local_link_connection.py new file mode 100644 index 0000000..3f5480d --- /dev/null +++ b/ironic_inspector/plugins/local_link_connection.py @@ -0,0 +1,122 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic LLDP Processing Hook""" + +import binascii + +from ironicclient import exc as client_exc +import netaddr +from oslo_config import cfg + +from ironic_inspector.common.i18n import _LW, _LE +from ironic_inspector.common import ironic +from ironic_inspector.plugins import base +from ironic_inspector import utils + +LOG = utils.getProcessingLogger(__name__) + +# NOTE(sambetts) Constants defined according to IEEE standard for LLDP +# http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf +LLDP_TLV_TYPE_CHASSIS_ID = 1 +LLDP_TLV_TYPE_PORT_ID = 2 +PORT_ID_SUBTYPE_MAC = 3 +PORT_ID_SUBTYPE_IFNAME = 5 +PORT_ID_SUBTYPE_LOCAL = 7 +STRING_PORT_SUBTYPES = [PORT_ID_SUBTYPE_IFNAME, PORT_ID_SUBTYPE_LOCAL] +CHASSIS_ID_SUBTYPE_MAC = 4 + +CONF = cfg.CONF + +REQUIRED_IRONIC_VERSION = '1.19' + + +class GenericLocalLinkConnectionHook(base.ProcessingHook): + """Process mandatory LLDP packet fields + + Non-vendor specific LLDP packet fields processed for each NIC found for a + baremetal node, port ID and chassis ID. These fields if found and if valid + will be saved into the local link connection info port id and switch id + fields on the Ironic port that represents that NIC. + """ + + def _get_local_link_patch(self, tlv_type, tlv_value, port): + try: + data = bytearray(binascii.unhexlify(tlv_value)) + except TypeError: + LOG.warning(_LW("TLV value for TLV type %d not in correct" + "format, ensure TLV value is in " + "hexidecimal format when sent to " + "inspector"), tlv_type) + return + + item = value = None + if tlv_type == LLDP_TLV_TYPE_PORT_ID: + # Check to ensure the port id is an allowed type + item = "port_id" + if data[0] in STRING_PORT_SUBTYPES: + value = data[1:].decode() + if data[0] == PORT_ID_SUBTYPE_MAC: + value = str(netaddr.EUI( + binascii.hexlify(data[1:]).decode())) + elif tlv_type == LLDP_TLV_TYPE_CHASSIS_ID: + # Check to ensure the chassis id is the allowed type + if data[0] == CHASSIS_ID_SUBTYPE_MAC: + item = "switch_id" + value = str(netaddr.EUI( + binascii.hexlify(data[1:]).decode())) + + if item and value: + if (not CONF.processing.overwrite_existing and + item in port.local_link_connection): + return + return {'op': 'add', + 'path': '/local_link_connection/%s' % item, + 'value': value} + + def before_update(self, introspection_data, node_info, **kwargs): + """Process LLDP data and patch Ironic port local link connection""" + inventory = utils.get_inventory(introspection_data) + + ironic_ports = node_info.ports() + + for iface in inventory['interfaces']: + if iface['name'] not in introspection_data['all_interfaces']: + continue + port = ironic_ports[iface['mac_address']] + + lldp_data = iface.get('lldp') + if lldp_data is None: + LOG.warning(_LW("No LLDP Data found for interface %s"), iface) + continue + + patches = [] + for tlv_type, tlv_value in lldp_data: + patch = self._get_local_link_patch(tlv_type, tlv_value, port) + if patch is not None: + patches.append(patch) + + try: + # NOTE(sambetts) We need a newer version of Ironic API for this + # transaction, so create a new ironic client and explicitly + # pass it into the function. + cli = ironic.get_client(api_version=REQUIRED_IRONIC_VERSION) + node_info.patch_port(iface['mac_address'], patches, ironic=cli) + except client_exc.NotAcceptable: + LOG.error(_LE("Unable to set Ironic port local link " + "connection information because Ironic does not " + "support the required version")) + # NOTE(sambetts) May as well break out out of the loop here + # because Ironic version is not going to change for the other + # interfaces. + break diff --git a/ironic_inspector/test/base.py b/ironic_inspector/test/base.py index 9a6bf7a..a6c77e1 100644 --- a/ironic_inspector/test/base.py +++ b/ironic_inspector/test/base.py @@ -71,9 +71,11 @@ class BaseTest(fixtures.TestWithFixtures): def assertCalledWithPatch(self, expected, mock_call): def _get_patch_param(call): try: - return call[0][1] + if isinstance(call[0][1], list): + return call[0][1] except IndexError: - return call[0][0] + pass + return call[0][0] actual = sum(map(_get_patch_param, mock_call.call_args_list), []) self.assertPatchEqual(actual, expected) diff --git a/ironic_inspector/test/unit/test_plugins_local_link_connection.py b/ironic_inspector/test/unit/test_plugins_local_link_connection.py new file mode 100644 index 0000000..759c4a7 --- /dev/null +++ b/ironic_inspector/test/unit/test_plugins_local_link_connection.py @@ -0,0 +1,138 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from oslo_config import cfg + +from ironic_inspector import node_cache +from ironic_inspector.plugins import local_link_connection +from ironic_inspector.test import base as test_base +from ironic_inspector import utils + + +class TestGenericLocalLinkConnectionHook(test_base.NodeTest): + hook = local_link_connection.GenericLocalLinkConnectionHook() + + def setUp(self): + super(TestGenericLocalLinkConnectionHook, self).setUp() + self.data = { + 'inventory': { + 'interfaces': [{ + 'name': 'em1', 'mac_address': '11:11:11:11:11:11', + 'ipv4_address': '1.1.1.1', + 'lldp': [ + (0, ''), + (1, '04885a92ec5459'), + (2, '0545746865726e6574312f3138'), + (3, '0078')] + }], + 'cpu': 1, + 'disks': 1, + 'memory': 1 + }, + 'all_interfaces': { + 'em1': {}, + } + } + + llc = { + 'port_id': '56' + } + + ports = [mock.Mock(spec=['address', 'uuid', 'local_link_connection'], + address=a, local_link_connection=llc) + for a in ('11:11:11:11:11:11',)] + self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, + node=self.node, ports=ports) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_expected_data(self, mock_patch): + patches = [ + {'path': '/local_link_connection/port_id', + 'value': 'Ethernet1/18', 'op': 'add'}, + {'path': '/local_link_connection/switch_id', + 'value': '88-5A-92-EC-54-59', 'op': 'add'}, + ] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_invalid_chassis_id_subtype(self, mock_patch): + # First byte of TLV value is processed to calculate the subtype for the + # chassis ID, Subtype 5 ('05...') isn't a subtype supported by this + # plugin, so we expect it to skip this TLV. + self.data['inventory']['interfaces'][0]['lldp'][1] = ( + 1, '05885a92ec5459') + patches = [ + {'path': '/local_link_connection/port_id', + 'value': 'Ethernet1/18', 'op': 'add'}, + ] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_invalid_port_id_subtype(self, mock_patch): + # First byte of TLV value is processed to calculate the subtype for the + # port ID, Subtype 6 ('06...') isn't a subtype supported by this + # plugin, so we expect it to skip this TLV. + self.data['inventory']['interfaces'][0]['lldp'][2] = ( + 2, '0645746865726e6574312f3138') + patches = [ + {'path': '/local_link_connection/switch_id', + 'value': '88-5A-92-EC-54-59', 'op': 'add'} + ] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_port_id_subtype_mac(self, mock_patch): + self.data['inventory']['interfaces'][0]['lldp'][2] = ( + 2, '03885a92ec5458') + patches = [ + {'path': '/local_link_connection/port_id', + 'value': '88-5A-92-EC-54-58', 'op': 'add'}, + {'path': '/local_link_connection/switch_id', + 'value': '88-5A-92-EC-54-59', 'op': 'add'} + ] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_lldp_none(self, mock_patch): + self.data['inventory']['interfaces'][0]['lldp'] = None + patches = [] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_interface_not_in_all_interfaces(self, mock_patch): + self.data['all_interfaces'] = {} + patches = [] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) + + def test_no_inventory(self): + del self.data['inventory'] + self.assertRaises(utils.Error, self.hook.before_update, + self.data, self.node_info) + + @mock.patch.object(node_cache.NodeInfo, 'patch_port') + def test_no_overwrite(self, mock_patch): + cfg.CONF.set_override('overwrite_existing', False, group='processing') + patches = [ + {'path': '/local_link_connection/switch_id', + 'value': '88-5A-92-EC-54-59', 'op': 'add'} + ] + self.hook.before_update(self.data, self.node_info) + self.assertCalledWithPatch(patches, mock_patch) diff --git a/ironic_inspector/test/unit/test_plugins_rules.py b/ironic_inspector/test/unit/test_plugins_rules.py index b9f94f4..71b9c3d 100644 --- a/ironic_inspector/test/unit/test_plugins_rules.py +++ b/ironic_inspector/test/unit/test_plugins_rules.py @@ -179,7 +179,7 @@ class TestSetCapabilityAction(test_base.NodeTest): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( [{'op': 'add', 'path': '/properties/capabilities', - 'value': 'cap1:val'}]) + 'value': 'cap1:val'}], mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_with_existing(self, mock_patch): @@ -203,7 +203,7 @@ class TestExtendAttributeAction(test_base.NodeTest): def test_apply(self, mock_patch): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( - [{'op': 'add', 'path': '/extra/value', 'value': [42]}]) + [{'op': 'add', 'path': '/extra/value', 'value': [42]}], mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_non_empty(self, mock_patch): @@ -211,7 +211,8 @@ class TestExtendAttributeAction(test_base.NodeTest): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( - [{'op': 'replace', 'path': '/extra/value', 'value': [0, 42]}]) + [{'op': 'replace', 'path': '/extra/value', 'value': [0, 42]}], + mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_unique_with_existing(self, mock_patch): diff --git a/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml b/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml new file mode 100644 index 0000000..eecd281 --- /dev/null +++ b/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added GenericLocalLinkConnectionHook processing plugin to process LLDP data + returned during inspection and set port ID and switch ID in an Ironic + node's port local link connection information using that data. diff --git a/setup.cfg b/setup.cfg index 2557d1e..6d64b72 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,6 +32,7 @@ ironic_inspector.hooks.processing = extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook + local_link_connection = ironic_inspector.plugins.local_link_connection:GenericLocalLinkConnectionHook ironic_inspector.hooks.node_not_found = example = ironic_inspector.plugins.example:example_not_found_hook enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook From 52922bc2f0d367368a19b59092c4ecd06771abb4 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Tue, 2 Aug 2016 12:20:47 +0200 Subject: [PATCH 77/83] Provide meaningful error messages in functional tests The default python-requests error message does not contain response body, so it's not possible to figure out the actual cause of the failure. Change it to a custom exception including the cause. Change-Id: I83ea035ab2b3ace4dbdf5b17cd5d632c4c704003 --- ironic_inspector/test/functional.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ironic_inspector/test/functional.py b/ironic_inspector/test/functional.py index af5beb4..5e27b85 100644 --- a/ironic_inspector/test/functional.py +++ b/ironic_inspector/test/functional.py @@ -69,6 +69,10 @@ def get_test_conf_file(): return TEST_CONF_FILE +def get_error(response): + return response.json()['error']['message'] + + class Base(base.NodeTest): ROOT_URL = 'http://127.0.0.1:5050' IS_FUNCTIONAL = True @@ -115,7 +119,11 @@ class Base(base.NodeTest): if expect_error: self.assertEqual(expect_error, res.status_code) else: - res.raise_for_status() + if res.status_code >= 400: + msg = ('%(meth)s %(url)s failed with code %(code)s: %(msg)s' % + {'meth': method.upper(), 'url': endpoint, + 'code': res.status_code, 'msg': get_error(res)}) + raise AssertionError(msg) return res def call_introspect(self, uuid, new_ipmi_username=None, From 4cf7ee10d152e6431a800cf2ad32ccdd75dc7a97 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Wed, 20 Jul 2016 10:53:00 +0300 Subject: [PATCH 78/83] Use OSC in exercise.sh This patch replaces project client calls by OSC. Change-Id: I0f6da0b85bfaf210d854e5bddbda4079fa16a14e --- devstack/exercise.sh | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/devstack/exercise.sh b/devstack/exercise.sh index 893aaac..999103d 100755 --- a/devstack/exercise.sh +++ b/devstack/exercise.sh @@ -73,7 +73,7 @@ function curl_ins { curl -f -H "X-Auth-Token: $token" -X $1 $args "http://127.0.0.1:5050/$2" } -nodes=$(ironic node-list | tail -n +4 | head -n -1 | tr '|' ' ' | awk '{ print $1; }') +nodes=$(openstack baremetal node list -f value -c UUID) if [ -z "$nodes" ]; then echo "No nodes found in Ironic" exit 1 @@ -81,10 +81,10 @@ fi for uuid in $nodes; do for p in cpus cpu_arch memory_mb local_gb; do - ironic node-update $uuid remove properties/$p > /dev/null || true + openstack baremetal node unset --property $p $uuid > /dev/null || true done - if ! ironic node-show $uuid | grep provision_state | grep -iq manageable; then - ironic node-set-provision-state $uuid manage + if [[ "$(openstack baremetal node show $uuid -f value -c provision_state)" != "manageable" ]]; then + openstack baremetal node manage $uuid fi done @@ -92,7 +92,7 @@ openstack baremetal introspection rule purge openstack baremetal introspection rule import "$rules_file" for uuid in $nodes; do - ironic node-set-provision-state $uuid inspect + openstack baremetal node inspect $uuid done current_nodes=$nodes @@ -139,12 +139,12 @@ function wait_for_provision_state { local max_attempts=${3:-6} for attempt in $(seq 1 $max_attempts); do - local current=$(ironic node-show $uuid | grep ' provision_state ' | awk '{ print $4; }') + local current=$(openstack baremetal node show $uuid -f value -c provision_state) if [ "$current" != "$expected" ]; then if [ "$attempt" -eq "$max_attempts" ]; then echo "Expected provision_state $expected, got $current:" - ironic node-show $uuid + openstack baremetal node show $uuid exit 1 fi else @@ -186,7 +186,7 @@ for uuid in $nodes; do openstack service list | grep swift && test_swift wait_for_provision_state $uuid manageable - ironic node-set-provision-state $uuid provide + openstack baremetal node provide $uuid done # Cleaning kicks in here, we have to wait until it finishes (~ 2 minutes) @@ -197,11 +197,11 @@ done echo "Wait until nova becomes aware of bare metal instances" for attempt in {1..24}; do - if [ $(nova hypervisor-stats | grep ' vcpus ' | head -n1 | awk '{ print $4; }') -ge $expected_cpus ]; then + if [ $(openstack hypervisor stats show -f value -c vcpus) -ge $expected_cpus ]; then break elif [ "$attempt" -eq 24 ]; then echo "Timeout while waiting for nova hypervisor-stats, current:" - nova hypervisor-stats + openstack hypervisor stats show exit 1 fi sleep 5 @@ -210,7 +210,8 @@ done echo "Try nova boot for one instance" image=$(openstack image list --property disk_format=ami -f value -c ID | head -n1) -net_id=$(neutron net-list | egrep "$PRIVATE_NETWORK_NAME"'[^-]' | awk '{ print $2 }') +net_id=$(openstack network show "$PRIVATE_NETWORK_NAME" -f value -c id) +# TODO(vsaienko) replace by openstack create with --wait flag uuid=$(nova boot --flavor baremetal --nic net-id=$net_id --image $image testing | grep " id " | awk '{ print $4 }') for attempt in {1..30}; do @@ -218,8 +219,8 @@ for attempt in {1..30}; do if [ "$status" = "ERROR" ]; then echo "Instance failed to boot" # Some debug output - nova show $uuid - nova hypervisor-stats + openstack server show $uuid + openstack hypervisor stats show exit 1 elif [ "$status" != "ACTIVE" ]; then if [ "$attempt" -eq 30 ]; then @@ -232,6 +233,6 @@ for attempt in {1..30}; do sleep 30 done -nova delete $uuid +openstack server delete $uuid echo "Validation passed" From 73ead226415eb20c9ac21f5a6270b90517213a78 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 2 Aug 2016 15:09:42 +0000 Subject: [PATCH 79/83] Updated from global requirements Change-Id: I3cffd9a46e6908289a2135aa12923d7b748c0875 --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index ad05aa8..bc58c05 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ Flask!=0.11,<1.0,>=0.10 # BSD futurist!=0.15.0,>=0.11.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystoneauth1>=2.7.0 # Apache-2.0 +keystoneauth1>=2.10.0 # Apache-2.0 keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 @@ -20,8 +20,8 @@ oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.utils>=3.15.0 # Apache-2.0 +oslo.rootwrap>=5.0.0 # Apache-2.0 +oslo.utils>=3.16.0 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.16.0 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT From 9b4116a1ae930f21a18490d8a477a6d08dc76946 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Mon, 1 Aug 2016 15:22:00 +0800 Subject: [PATCH 80/83] Check whether action value is string before calling format() AttributeError: 'int' object has no attribute 'format' will be raised if we pass an integer value, so we should check whether the value is a string or not. Change-Id: I14ed4d404a9be1233493083bef49218cf0f45867 Closes-Bug: #1608393 --- ironic_inspector/rules.py | 3 ++- ironic_inspector/test/unit/test_rules.py | 14 ++++++++++++++ ...tted-value-from-nonstring-3d851cb42ce3a0ac.yaml | 5 +++++ 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml diff --git a/ironic_inspector/rules.py b/ironic_inspector/rules.py index 23d6033..60b752a 100644 --- a/ironic_inspector/rules.py +++ b/ironic_inspector/rules.py @@ -19,6 +19,7 @@ import jsonschema from oslo_db import exception as db_exc from oslo_utils import timeutils from oslo_utils import uuidutils +import six from sqlalchemy import orm from ironic_inspector.common.i18n import _, _LE, _LI @@ -202,7 +203,7 @@ class IntrospectionRule(object): ext = ext_mgr[act.action].obj for formatted_param in ext.FORMATTED_PARAMS: value = act.params.get(formatted_param) - if not value: + if not value or not isinstance(value, six.string_types): continue # NOTE(aarefiev): verify provided value with introspection diff --git a/ironic_inspector/test/unit/test_rules.py b/ironic_inspector/test/unit/test_rules.py index 8524d97..b90fae7 100644 --- a/ironic_inspector/test/unit/test_rules.py +++ b/ironic_inspector/test/unit/test_rules.py @@ -419,6 +419,20 @@ class TestApplyActions(BaseTest): self.assertRaises(utils.Error, self.rule.apply_actions, self.node_info, data=self.data) + def test_apply_data_non_format_value(self, mock_ext_mgr): + self.rule = rules.create(actions_json=[ + {'action': 'set-attribute', + 'path': '/driver_info/ipmi_address', + 'value': 1}], + conditions_json=self.conditions_json + ) + mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock + + self.rule.apply_actions(self.node_info, data=self.data) + + self.assertEqual(1, self.act_mock.apply.call_count) + self.assertFalse(self.act_mock.rollback.called) + def test_rollback(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock diff --git a/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml b/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml new file mode 100644 index 0000000..90d9069 --- /dev/null +++ b/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml @@ -0,0 +1,5 @@ +--- +fixes: + This fixes setting non string 'value' field for rule's + actions. As non string value is obviously not a formatted + value, this adds a check to avoid AttributeError exception. From c3f38feb2e9451e9a9f5be5762056d39c6e85505 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 4 Aug 2016 02:35:39 +0000 Subject: [PATCH 81/83] Updated from global requirements Change-Id: I7646183ec9fd2cbec7aa0bccccfdb95a63374178 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index bc58c05..f75e042 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,10 +12,10 @@ keystoneauth1>=2.10.0 # Apache-2.0 keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD pbr>=1.6 # Apache-2.0 -python-ironicclient>=1.1.0 # Apache-2.0 +python-ironicclient>=1.6.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.12.0 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 From 522113da4390347a9cd55e89a7eddf6e14527eff Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Fri, 8 Jul 2016 11:11:13 +0200 Subject: [PATCH 82/83] Add callback function to manage missing hooks Before https://review.openstack.org/#/c/337043/, stevedore.NamedExtensionManager returned KeyError when calling with non-existing names and name_order=True. After the mentioned change this is not longer true, so we used the just added on_missing_entrypoints_callback option to customize the behavior in this cases and make it raise a custom exception MissingHookError. Change-Id: I1f1edc0b7a82a16bf9be4113db61ee1cd0080db4 Closes-Bug: #1600141 --- ironic_inspector/main.py | 5 +++-- ironic_inspector/plugins/base.py | 11 +++++++++++ ironic_inspector/test/unit/test_main.py | 1 - 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ironic_inspector/main.py b/ironic_inspector/main.py index 05d054e..130c810 100644 --- a/ironic_inspector/main.py +++ b/ironic_inspector/main.py @@ -411,8 +411,9 @@ class Service(object): hooks = [ext.name for ext in plugins_base.processing_hooks_manager()] except KeyError as exc: - # stevedore raises KeyError on missing hook - LOG.critical(_LC('Hook %s failed to load or was not found'), + # callback function raises MissingHookError derived from KeyError + # on missing hook + LOG.critical(_LC('Hook(s) %s failed to load or was not found'), str(exc)) sys.exit(1) diff --git a/ironic_inspector/plugins/base.py b/ironic_inspector/plugins/base.py index 218f7f4..368a0ab 100644 --- a/ironic_inspector/plugins/base.py +++ b/ironic_inspector/plugins/base.py @@ -149,6 +149,12 @@ _CONDITIONS_MGR = None _ACTIONS_MGR = None +def missing_entrypoints_callback(names): + """Raise MissingHookError with comma-separated list of missing hooks""" + missing_names = ', '.join(names) + raise MissingHookError(missing_names) + + def processing_hooks_manager(*args): """Create a Stevedore extension manager for processing hooks. @@ -164,6 +170,7 @@ def processing_hooks_manager(*args): names=names, invoke_on_load=True, invoke_args=args, + on_missing_entrypoints_callback=missing_entrypoints_callback, name_order=True) return _HOOKS_MGR @@ -204,3 +211,7 @@ def rule_actions_manager(): 'actions is deprecated (action "%s")'), act.name) return _ACTIONS_MGR + + +class MissingHookError(KeyError): + """Exception when hook is not found when processing it.""" diff --git a/ironic_inspector/test/unit/test_main.py b/ironic_inspector/test/unit/test_main.py index cb51afc..0906e03 100644 --- a/ironic_inspector/test/unit/test_main.py +++ b/ironic_inspector/test/unit/test_main.py @@ -626,7 +626,6 @@ class TestInit(test_base.BaseTest): self.service.init() self.assertFalse(mock_firewall.called) - @unittest.skip('skipped until stevedore > 1.15.0 is released') @mock.patch.object(main.LOG, 'critical') def test_init_failed_processing_hook(self, mock_log, mock_node_cache, mock_get_client, mock_auth, From 625add4f416e8916874d95698d5d411000895a07 Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Thu, 11 Aug 2016 12:16:53 +0300 Subject: [PATCH 83/83] Fix release notes formatting Change-Id: I7ecc9ed5382f72a6cd440ac5c02d4f0eee6060db --- ...eck-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml b/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml index 90d9069..c98628c 100644 --- a/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml +++ b/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml @@ -1,5 +1,5 @@ --- fixes: - This fixes setting non string 'value' field for rule's - actions. As non string value is obviously not a formatted - value, this adds a check to avoid AttributeError exception. + - Fix setting non string 'value' field for rule's actions. As + non string value is obviously not a formatted value, add the + check to avoid AttributeError exception.