ironic-inspector 4.2.0 release

meta:version: 4.2.0
 meta:diff-start: -
 meta:series: newton
 meta:release-type: release
 meta:announce: openstack-announce@lists.openstack.org
 meta:pypi: no
 meta:first: no
 meta:release:Author: Jim Rollenhagen <jim@jimrollenhagen.com>
 meta:release:Commit: Jim Rollenhagen <jim@jimrollenhagen.com>
 meta:release:Change-Id: I85c67e643719736fe3bacc4b8774c6d10e3edd8f
 meta:release:Code-Review+1: Jay Faulkner <jay@jvf.cc>
 meta:release:Code-Review+1: Dmitry Tantsur <divius.inside@gmail.com>
 meta:release:Code-Review+2: Davanum Srinivas (dims) <davanum@gmail.com>
 meta:release:Workflow+1: Davanum Srinivas (dims) <davanum@gmail.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJX48jgAAoJENljH+rwzGIn9kUIAI1hvVDmAWTwOBbrW+DFiWn/
 qd9qJaP21tZwAkMx7tLirMx4Qk8oDnkWlcWkCrPtIJRCHV6Hh5gbWRaQPQnWobkI
 IacOKLpev2IuehKeNbPZxlyBZOvCGle5pS/JO8mrJ/tsjCft3DdceuhcuYSPzh5T
 lrKqz0R9UilSMzMZh5b3WRbt6egk4YBbZXH00ICK9KCCGCImm6bbdOwwQKYPA2GZ
 hfD16OxwnV8r5G4Yga20kg8TitFveEoYr/HGJXBG7YbaCQWhyplnUhtQKh/ls2y/
 1sYe0hYzY2LR/XyajRbF+qKVBWKjqIQVfpYXxga1ZReCPo/vcw3ZQgvcIR+QusI=
 =1FYM
 -----END PGP SIGNATURE-----

Merge tag '4.2.0' into debian/newton

ironic-inspector 4.2.0 release

  * New upstream release.
  * Fixed (build-)depends for this release.

Change-Id: Ide4ea295d6c8699003a27665883df7b523ac0d5c
This commit is contained in:
Thomas Goirand 2016-09-28 09:50:16 +02:00
commit 9a09c18024
43 changed files with 833 additions and 367 deletions

View File

@ -5,6 +5,7 @@ namespace = ironic_inspector.common.ironic
namespace = ironic_inspector.common.swift
namespace = ironic_inspector.plugins.capabilities
namespace = ironic_inspector.plugins.discovery
namespace = ironic_inspector.plugins.pci_devices
namespace = keystonemiddleware.auth_token
namespace = oslo.db
namespace = oslo.log

9
debian/changelog vendored
View File

@ -1,9 +1,14 @@
ironic-inspector (4.1.0-2) UNRELEASED; urgency=medium
ironic-inspector (4.2.0-1) UNRELEASED; urgency=medium
[ Ondřej Nový ]
* d/s/options: extend-diff-ignore of .gitreview
* d/control: Use correct branch in Vcs-* fields
-- Ondřej Nový <onovy@debian.org> Mon, 26 Sep 2016 19:02:35 +0200
[ Thomas Goirand ]
* New upstream release.
* Fixed (build-)depends for this release.
-- Thomas Goirand <zigo@debian.org> Wed, 28 Sep 2016 09:51:51 +0200
ironic-inspector (4.1.0-1) experimental; urgency=medium

10
debian/control vendored
View File

@ -24,17 +24,17 @@ Build-Depends-Indep: alembic (>= 0.8.4),
python-keystoneauth1 (>= 2.10.0),
python-keystonemiddleware (>= 4.0.0),
python-mock (>= 2.0),
python-netaddr (>= 0.7.12),
python-netaddr (>= 0.7.13),
python-oslo.concurrency (>= 3.8.0),
python-oslo.config (>= 1:3.14.0),
python-oslo.db (>= 4.1.0),
python-oslo.db (>= 4.10.0),
python-oslo.i18n (>= 2.1.0),
python-oslo.log (>= 1.14.0),
python-oslo.middleware (>= 3.0.0),
python-oslo.rootwrap (>= 5.0.0),
python-oslo.utils (>= 3.16.0),
python-oslosphinx (>= 2.5.0),
python-oslotest (>= 1.10.0),
python-oslotest (>= 1:1.10.0),
python-six (>= 1.9.0),
python-sqlalchemy (>= 1.0.10),
python-stevedore (>= 1.16.0),
@ -58,10 +58,10 @@ Depends: alembic (>= 0.8.4),
python-jsonschema,
python-keystoneauth1 (>= 2.10.0),
python-keystonemiddleware (>= 4.0.0),
python-netaddr (>= 0.7.12),
python-netaddr (>= 0.7.13),
python-oslo.concurrency (>= 3.8.0),
python-oslo.config (>= 1:3.14.0),
python-oslo.db (>= 4.1.0),
python-oslo.db (>= 4.10.0),
python-oslo.i18n (>= 2.1.0),
python-oslo.log (>= 1.14.0),
python-oslo.middleware (>= 3.0.0),

View File

@ -287,7 +287,7 @@ function get_ini {
local option=${3:?option not specified}
cat <<_GET_INI | python -
import ConfigParser
from six.moves import configparser as ConfigParser
cp = ConfigParser.ConfigParser()
cp.read("$file")
assert "$section" in cp.sections(), '$section not in $file'

View File

@ -18,7 +18,7 @@ IRONIC_INSPECTOR_URI="http://$IRONIC_INSPECTOR_HOST:$IRONIC_INSPECTOR_PORT"
IRONIC_INSPECTOR_BUILD_RAMDISK=$(trueorfalse False IRONIC_INSPECTOR_BUILD_RAMDISK)
IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz}
IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz}
IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs}
IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs,pci-devices}
IRONIC_INSPECTOR_RAMDISK_LOGDIR=${IRONIC_INSPECTOR_RAMDISK_LOGDIR:-$IRONIC_INSPECTOR_DATA_DIR/ramdisk-logs}
IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS=${IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS:-True}
IRONIC_INSPECTOR_TIMEOUT=${IRONIC_INSPECTOR_TIMEOUT:-600}
@ -327,6 +327,13 @@ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
start_inspector_dhcp
fi
start_inspector
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
if is_service_enabled tempest; then
echo_summary "Configuring Tempest for Ironic Inspector"
if [ -n "$IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK" ]; then
iniset $TEMPEST_CONFIG baremetal_introspection auto_discovery_feature True
fi
fi
fi
if [[ "$1" == "unstack" ]]; then

View File

@ -199,6 +199,12 @@ needed:
``capabilities``
detect node capabilities: CPU, boot mode, etc. See `Capabilities
Detection`_ for more details.
``pci_devices``
gathers the list of all PCI devices returned by the ramdisk and compares to
those defined in ``alias`` field(s) from ``pci_devices`` section of
configuration file. The recognized PCI devices and their count are then
stored in node properties. This information can be later used in nova
flavors for node scheduling.
Here are some plugins that can be additionally enabled:

View File

@ -299,11 +299,11 @@
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool. (integer
# value)
# Maximum number of SQL connections to keep open in a pool. Setting a
# value of 0 indicates no limit. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
#max_pool_size = 5
# Maximum number of database connection retries during startup. Set to
# -1 to specify an infinite retry count. (integer value)
@ -325,6 +325,8 @@
# Verbosity of SQL debugging information: 0=None, 100=Everything.
# (integer value)
# Minimum value: 0
# Maximum value: 100
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
@ -594,7 +596,11 @@
# Identity API Server. (integer value)
#http_request_max_retries = 3
# Env key for the swift cache. (string value)
# Request environment key where the Swift cache object is stored. When
# auth_token middleware is deployed with a Swift cache, use this
# option to have the middleware share a caching backend with swift.
# Otherwise, use the ``memcached_servers`` option instead. (string
# value)
#cache = <None>
# Required if identity server requires client certificate (string
@ -714,6 +720,18 @@
#auth_section = <None>
[pci_devices]
#
# From ironic_inspector.plugins.pci_devices
#
# An alias for PCI device identified by 'vendor_id' and 'product_id'
# fields. Format: {"vendor_id": "1234", "product_id": "5678", "name":
# "pci_dev1"} (multi valued)
#alias =
[processing]
#
@ -754,7 +772,7 @@
# the Nova scheduler. Hook 'validate_interfaces' ensures that valid
# NIC data was provided by the ramdisk.Do not exclude these two unless
# you really know what you're doing. (string value)
#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces,capabilities
#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces,capabilities,pci_devices
# Comma-separated list of enabled hooks for processing pipeline. The
# default for this is $default_processing_hooks, hooks can be added

View File

@ -34,74 +34,75 @@ IRONIC_GROUP = 'ironic'
IRONIC_OPTS = [
cfg.StrOpt('os_region',
help='Keystone region used to get Ironic endpoints.'),
help=_('Keystone region used to get Ironic endpoints.')),
cfg.StrOpt('os_auth_url',
default='',
help='Keystone authentication endpoint for accessing Ironic '
'API. Use [keystone_authtoken] section for keystone '
'token validation.',
help=_('Keystone authentication endpoint for accessing Ironic '
'API. Use [keystone_authtoken] section for keystone '
'token validation.'),
deprecated_group='discoverd',
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('os_username',
default='',
help='User name for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.',
help=_('User name for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.'),
deprecated_group='discoverd',
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('os_password',
default='',
help='Password for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.',
help=_('Password for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.'),
secret=True,
deprecated_group='discoverd',
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('os_tenant_name',
default='',
help='Tenant name for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.',
help=_('Tenant name for accessing Ironic API. '
'Use [keystone_authtoken] section for keystone '
'token validation.'),
deprecated_group='discoverd',
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('identity_uri',
default='',
help='Keystone admin endpoint. '
'DEPRECATED: Use [keystone_authtoken] section for '
'keystone token validation.',
help=_('Keystone admin endpoint. '
'DEPRECATED: Use [keystone_authtoken] section for '
'keystone token validation.'),
deprecated_group='discoverd',
deprecated_for_removal=True),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=('keystone', 'noauth'),
help='Method to use for authentication: noauth or keystone.'),
help=_('Method to use for authentication: noauth or '
'keystone.')),
cfg.StrOpt('ironic_url',
default='http://localhost:6385/',
help='Ironic API URL, used to set Ironic API URL when '
'auth_strategy option is noauth to work with standalone '
'Ironic without keystone.'),
help=_('Ironic API URL, used to set Ironic API URL when '
'auth_strategy option is noauth to work with standalone '
'Ironic without keystone.')),
cfg.StrOpt('os_service_type',
default='baremetal',
help='Ironic service type.'),
help=_('Ironic service type.')),
cfg.StrOpt('os_endpoint_type',
default='internalURL',
help='Ironic endpoint type.'),
help=_('Ironic endpoint type.')),
cfg.IntOpt('retry_interval',
default=2,
help='Interval between retries in case of conflict error '
'(HTTP 409).'),
help=_('Interval between retries in case of conflict error '
'(HTTP 409).')),
cfg.IntOpt('max_retries',
default=30,
help='Maximum number of retries in case of conflict error '
'(HTTP 409).'),
help=_('Maximum number of retries in case of conflict error '
'(HTTP 409).')),
]
@ -147,8 +148,11 @@ def get_ipmi_address(node):
ip = socket.gethostbyname(value)
return ip
except socket.gaierror:
msg = ('Failed to resolve the hostname (%s) for node %s')
raise utils.Error(msg % (value, node.uuid), node_info=node)
msg = _('Failed to resolve the hostname (%(value)s)'
' for node %(uuid)s')
raise utils.Error(msg % {'value': value,
'uuid': node.uuid},
node_info=node)
def get_client(token=None,

View File

@ -31,35 +31,37 @@ SWIFT_GROUP = 'swift'
SWIFT_OPTS = [
cfg.IntOpt('max_retries',
default=2,
help='Maximum number of times to retry a Swift request, '
'before failing.'),
help=_('Maximum number of times to retry a Swift request, '
'before failing.')),
cfg.IntOpt('delete_after',
default=0,
help='Number of seconds that the Swift object will last before '
'being deleted. (set to 0 to never delete the object).'),
help=_('Number of seconds that the Swift object will last '
'before being deleted. (set to 0 to never delete the '
'object).')),
cfg.StrOpt('container',
default='ironic-inspector',
help='Default Swift container to use when creating objects.'),
help=_('Default Swift container to use when creating '
'objects.')),
cfg.StrOpt('os_auth_version',
default='2',
help='Keystone authentication API version',
help=_('Keystone authentication API version'),
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('os_auth_url',
default='',
help='Keystone authentication URL',
help=_('Keystone authentication URL'),
deprecated_for_removal=True,
deprecated_reason='Use options presented by configured '
'keystone auth plugin.'),
deprecated_reason=_('Use options presented by configured '
'keystone auth plugin.')),
cfg.StrOpt('os_service_type',
default='object-store',
help='Swift service type.'),
help=_('Swift service type.')),
cfg.StrOpt('os_endpoint_type',
default='internalURL',
help='Swift endpoint type.'),
help=_('Swift endpoint type.')),
cfg.StrOpt('os_region',
help='Keystone region to get endpoint for.'),
help=_('Keystone region to get endpoint for.')),
]
# NOTE(pas-ha) these old options conflict with options exported by
@ -68,14 +70,14 @@ SWIFT_OPTS = [
LEGACY_OPTS = [
cfg.StrOpt('username',
default='',
help='User name for accessing Swift API.'),
help=_('User name for accessing Swift API.')),
cfg.StrOpt('password',
default='',
help='Password for accessing Swift API.',
help=_('Password for accessing Swift API.'),
secret=True),
cfg.StrOpt('tenant_name',
default='',
help='Tenant name for accessing Swift API.'),
help=_('Tenant name for accessing Swift API.')),
]
CONF.register_opts(SWIFT_OPTS, group=SWIFT_GROUP)

View File

@ -14,6 +14,8 @@
from oslo_config import cfg
from oslo_middleware import cors
from ironic_inspector.common.i18n import _
MIN_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Minimum-Version'
MAX_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Maximum-Version'
@ -27,194 +29,195 @@ VALID_STORE_DATA_VALUES = ('none', 'swift')
FIREWALL_OPTS = [
cfg.BoolOpt('manage_firewall',
default=True,
help='Whether to manage firewall rules for PXE port.',
help=_('Whether to manage firewall rules for PXE port.'),
deprecated_group='discoverd'),
cfg.StrOpt('dnsmasq_interface',
default='br-ctlplane',
help='Interface on which dnsmasq listens, the default is for '
'VM\'s.',
help=_('Interface on which dnsmasq listens, the default is for '
'VM\'s.'),
deprecated_group='discoverd'),
cfg.IntOpt('firewall_update_period',
default=15,
help='Amount of time in seconds, after which repeat periodic '
'update of firewall.',
help=_('Amount of time in seconds, after which repeat periodic '
'update of firewall.'),
deprecated_group='discoverd'),
cfg.StrOpt('firewall_chain',
default='ironic-inspector',
help='iptables chain name to use.'),
help=_('iptables chain name to use.')),
]
PROCESSING_OPTS = [
cfg.StrOpt('add_ports',
default='pxe',
help='Which MAC addresses to add as ports during '
'introspection. Possible values: '
'all (all MAC addresses), active (MAC addresses of NIC with IP '
'addresses), pxe (only MAC address of NIC node PXE booted '
'from, falls back to "active" if PXE MAC is not supplied '
'by the ramdisk).',
help=_('Which MAC addresses to add as ports during '
'introspection. Possible values: all '
'(all MAC addresses), active (MAC addresses of NIC with '
'IP addresses), pxe (only MAC address of NIC node PXE '
'booted from, falls back to "active" if PXE MAC is not '
'supplied by the ramdisk).'),
choices=VALID_ADD_PORTS_VALUES,
deprecated_group='discoverd'),
cfg.StrOpt('keep_ports',
default='all',
help='Which ports (already present on a node) to keep after '
'introspection. Possible values: '
'all (do not delete anything), present (keep ports which MACs '
'were present in introspection data), added (keep only MACs '
'that we added during introspection).',
help=_('Which ports (already present on a node) to keep after '
'introspection. Possible values: all (do not delete '
'anything), present (keep ports which MACs were present '
'in introspection data), added (keep only MACs that we '
'added during introspection).'),
choices=VALID_KEEP_PORTS_VALUES,
deprecated_group='discoverd'),
cfg.BoolOpt('overwrite_existing',
default=True,
help='Whether to overwrite existing values in node database. '
'Disable this option to make introspection a '
'non-destructive operation.',
help=_('Whether to overwrite existing values in node '
'database. Disable this option to make '
'introspection a non-destructive operation.'),
deprecated_group='discoverd'),
cfg.BoolOpt('enable_setting_ipmi_credentials',
default=False,
help='Whether to enable setting IPMI credentials during '
'introspection. This is an experimental and not well '
'tested feature, use at your own risk.',
help=_('Whether to enable setting IPMI credentials during '
'introspection. This is an experimental and not well '
'tested feature, use at your own risk.'),
deprecated_group='discoverd'),
cfg.StrOpt('default_processing_hooks',
default='ramdisk_error,root_disk_selection,scheduler,'
'validate_interfaces,capabilities',
help='Comma-separated list of default hooks for processing '
'pipeline. Hook \'scheduler\' updates the node with the '
'minimum properties required by the Nova scheduler. '
'Hook \'validate_interfaces\' ensures that valid NIC '
'data was provided by the ramdisk.'
'Do not exclude these two unless you really know what '
'you\'re doing.'),
'validate_interfaces,capabilities,pci_devices',
help=_('Comma-separated list of default hooks for processing '
'pipeline. Hook \'scheduler\' updates the node with the '
'minimum properties required by the Nova scheduler. '
'Hook \'validate_interfaces\' ensures that valid NIC '
'data was provided by the ramdisk.'
'Do not exclude these two unless you really know what '
'you\'re doing.')),
cfg.StrOpt('processing_hooks',
default='$default_processing_hooks',
help='Comma-separated list of enabled hooks for processing '
'pipeline. The default for this is '
'$default_processing_hooks, hooks can be added before '
'or after the defaults like this: '
'"prehook,$default_processing_hooks,posthook".',
help=_('Comma-separated list of enabled hooks for processing '
'pipeline. The default for this is '
'$default_processing_hooks, hooks can be added before '
'or after the defaults like this: '
'"prehook,$default_processing_hooks,posthook".'),
deprecated_group='discoverd'),
cfg.StrOpt('ramdisk_logs_dir',
help='If set, logs from ramdisk will be stored in this '
'directory.',
help=_('If set, logs from ramdisk will be stored in this '
'directory.'),
deprecated_group='discoverd'),
cfg.BoolOpt('always_store_ramdisk_logs',
default=False,
help='Whether to store ramdisk logs even if it did not return '
'an error message (dependent upon "ramdisk_logs_dir" option '
'being set).',
help=_('Whether to store ramdisk logs even if it did not '
'return an error message (dependent upon '
'"ramdisk_logs_dir" option being set).'),
deprecated_group='discoverd'),
cfg.StrOpt('node_not_found_hook',
default=None,
help='The name of the hook to run when inspector receives '
'inspection information from a node it isn\'t already '
'aware of. This hook is ignored by default.'),
help=_('The name of the hook to run when inspector receives '
'inspection information from a node it isn\'t already '
'aware of. This hook is ignored by default.')),
cfg.StrOpt('store_data',
default='none',
choices=VALID_STORE_DATA_VALUES,
help='Method for storing introspection data. If set to \'none'
'\', introspection data will not be stored.'),
help=_('Method for storing introspection data. If set to \'none'
'\', introspection data will not be stored.')),
cfg.StrOpt('store_data_location',
default=None,
help='Name of the key to store the location of stored data in '
'the extra column of the Ironic database.'),
help=_('Name of the key to store the location of stored data '
'in the extra column of the Ironic database.')),
cfg.BoolOpt('disk_partitioning_spacing',
default=True,
help='Whether to leave 1 GiB of disk size untouched for '
'partitioning. Only has effect when used with the IPA '
'as a ramdisk, for older ramdisk local_gb is '
'calculated on the ramdisk side.'),
help=_('Whether to leave 1 GiB of disk size untouched for '
'partitioning. Only has effect when used with the IPA '
'as a ramdisk, for older ramdisk local_gb is '
'calculated on the ramdisk side.')),
cfg.BoolOpt('log_bmc_address',
default=True,
help='Whether to log node BMC address with every message '
'during processing.'),
help=_('Whether to log node BMC address with every message '
'during processing.')),
cfg.StrOpt('ramdisk_logs_filename_format',
default='{uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz',
help='File name template for storing ramdisk logs. The '
'following replacements can be used: '
'{uuid} - node UUID or "unknown", '
'{bmc} - node BMC address or "unknown", '
'{dt} - current UTC date and time, '
'{mac} - PXE booting MAC or "unknown".'),
help=_('File name template for storing ramdisk logs. The '
'following replacements can be used: '
'{uuid} - node UUID or "unknown", '
'{bmc} - node BMC address or "unknown", '
'{dt} - current UTC date and time, '
'{mac} - PXE booting MAC or "unknown".')),
cfg.BoolOpt('power_off',
default=True,
help='Whether to power off a node after introspection.'),
help=_('Whether to power off a node after introspection.')),
]
DISCOVERD_OPTS = [
cfg.StrOpt('database',
default='',
help='SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work. '
'DEPRECATED: use [database]/connection.',
help=_('SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work. '
'DEPRECATED: use [database]/connection.'),
deprecated_for_removal=True),
]
SERVICE_OPTS = [
cfg.StrOpt('listen_address',
default='0.0.0.0',
help='IP to listen on.',
help=_('IP to listen on.'),
deprecated_group='discoverd'),
cfg.PortOpt('listen_port',
default=5050,
help='Port to listen on.',
help=_('Port to listen on.'),
deprecated_group='discoverd'),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=('keystone', 'noauth'),
help='Authentication method used on the ironic-inspector '
'API. Either "noauth" or "keystone" are currently valid '
'options. "noauth" will disable all authentication.'),
help=_('Authentication method used on the ironic-inspector '
'API. Either "noauth" or "keystone" are currently valid '
'options. "noauth" will disable all authentication.')),
cfg.BoolOpt('authenticate',
default=None,
help='DEPRECATED: use auth_strategy.',
help=_('DEPRECATED: use auth_strategy.'),
deprecated_group='discoverd',
deprecated_for_removal=True),
cfg.IntOpt('timeout',
default=3600,
help='Timeout after which introspection is considered failed, '
'set to 0 to disable.',
help=_('Timeout after which introspection is considered '
'failed, set to 0 to disable.'),
deprecated_group='discoverd'),
cfg.IntOpt('node_status_keep_time',
default=604800,
help='For how much time (in seconds) to keep status '
'information about nodes after introspection was '
'finished for them. Default value is 1 week.',
help=_('For how much time (in seconds) to keep status '
'information about nodes after introspection was '
'finished for them. Default value is 1 week.'),
deprecated_group='discoverd'),
cfg.IntOpt('clean_up_period',
default=60,
help='Amount of time in seconds, after which repeat clean up '
'of timed out nodes and old nodes status information.',
help=_('Amount of time in seconds, after which repeat clean up '
'of timed out nodes and old nodes status information.'),
deprecated_group='discoverd'),
cfg.BoolOpt('use_ssl',
default=False,
help='SSL Enabled/Disabled'),
help=_('SSL Enabled/Disabled')),
cfg.StrOpt('ssl_cert_path',
default='',
help='Path to SSL certificate'),
help=_('Path to SSL certificate')),
cfg.StrOpt('ssl_key_path',
default='',
help='Path to SSL key'),
help=_('Path to SSL key')),
cfg.IntOpt('max_concurrency',
default=1000, min=2,
help='The green thread pool size.'),
help=_('The green thread pool size.')),
cfg.IntOpt('introspection_delay',
default=5,
help='Delay (in seconds) between two introspections.'),
help=_('Delay (in seconds) between two introspections.')),
cfg.StrOpt('introspection_delay_drivers',
default='^.*_ssh$',
help='Only node with drivers matching this regular expression '
'will be affected by introspection_delay setting.'),
help=_('Only node with drivers matching this regular '
'expression will be affected by introspection_delay '
'setting.')),
cfg.ListOpt('ipmi_address_fields',
default=['ilo_address', 'drac_host', 'cimc_address'],
help='Ironic driver_info fields that are equivalent '
'to ipmi_address.'),
help=_('Ironic driver_info fields that are equivalent '
'to ipmi_address.')),
cfg.StrOpt('rootwrap_config',
default="/etc/ironic-inspector/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
help=_('Path to the rootwrap configuration file to use for '
'running commands as root')),
]

View File

@ -46,7 +46,8 @@ def _iptables(*args, **kwargs):
except subprocess.CalledProcessError as exc:
output = exc.output.replace('\n', '. ')
if ignore:
LOG.debug('Ignoring failed iptables %s: %s', args, output)
LOG.debug('Ignoring failed iptables %(args)s: %(output)s',
{'args': args, 'output': output})
else:
LOG.error(_LE('iptables %(iptables)s failed: %(exc)s') %
{'iptables': args, 'exc': output})

View File

@ -514,7 +514,7 @@ def find_node(**attributes):
% (name, value))
value_list = []
for v in value:
value_list.append('name="%s" AND value="%s"' % (name, v))
value_list.append("name='%s' AND value='%s'" % (name, v))
stmt = ('select distinct uuid from attributes where ' +
' OR '.join(value_list))
rows = (db.model_query(db.Attribute.uuid).from_statement(

View File

@ -15,7 +15,7 @@
from oslo_config import cfg
from ironic_inspector.common.i18n import _LI, _LW
from ironic_inspector.common.i18n import _, _LI, _LW
from ironic_inspector.plugins import base
from ironic_inspector import utils
@ -32,11 +32,11 @@ DEFAULT_CPU_FLAGS_MAPPING = {
CAPABILITIES_OPTS = [
cfg.BoolOpt('boot_mode',
default=False,
help='Whether to store the boot mode (BIOS or UEFI).'),
help=_('Whether to store the boot mode (BIOS or UEFI).')),
cfg.DictOpt('cpu_flags',
default=DEFAULT_CPU_FLAGS_MAPPING,
help='Mapping between a CPU flag and a capability to set '
'if this flag is present.'),
help=_('Mapping between a CPU flag and a capability to set '
'if this flag is present.')),
]

View File

@ -24,8 +24,8 @@ from ironic_inspector import utils
DISCOVERY_OPTS = [
cfg.StrOpt('enroll_node_driver',
default='fake',
help='The name of the Ironic driver used by the enroll '
'hook when creating a new node in Ironic.'),
help=_('The name of the Ironic driver used by the enroll '
'hook when creating a new node in Ironic.')),
]

View File

@ -20,15 +20,11 @@ is stored in the 'inspector' container.
import json
from oslo_config import cfg
from ironic_inspector.common.i18n import _LW
from ironic_inspector.common import swift
from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF
LOG = utils.getProcessingLogger(__name__)
EDEPLOY_ITEM_SIZE = 4

View File

@ -0,0 +1,87 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gather and distinguish PCI devices from inventory."""
import collections
import json
from oslo_config import cfg
from ironic_inspector.common.i18n import _, _LI, _LW, _LE
from ironic_inspector.plugins import base
from ironic_inspector import utils
PCI_DEVICES_OPTS = [
cfg.MultiStrOpt('alias',
default=[],
help=_('An alias for PCI device identified by '
'\'vendor_id\' and \'product_id\' fields. Format: '
'{"vendor_id": "1234", "product_id": "5678", '
'"name": "pci_dev1"}')),
]
def list_opts():
return [
('pci_devices', PCI_DEVICES_OPTS)
]
CONF = cfg.CONF
CONF.register_opts(PCI_DEVICES_OPTS, group='pci_devices')
LOG = utils.getProcessingLogger(__name__)
def _parse_pci_alias_entry():
parsed_pci_devices = []
for pci_alias_entry in CONF.pci_devices.alias:
try:
parsed_entry = json.loads(pci_alias_entry)
if set(parsed_entry) != {'vendor_id', 'product_id', 'name'}:
raise KeyError(_LE("The 'alias' entry should contain "
"exactly 'vendor_id', 'product_id' and "
"'name' keys"))
parsed_pci_devices.append(parsed_entry)
except (ValueError, KeyError) as ex:
LOG.error(_LE("Error parsing 'alias' option: %s"), ex)
return {(dev['vendor_id'], dev['product_id']): dev['name']
for dev in parsed_pci_devices}
class PciDevicesHook(base.ProcessingHook):
"""Processing hook for counting and distinguishing various PCI devices.
That information can be later used by nova for node scheduling.
"""
aliases = _parse_pci_alias_entry()
def _found_pci_devices_count(self, found_pci_devices):
return collections.Counter([(dev['vendor_id'], dev['product_id'])
for dev in found_pci_devices
if (dev['vendor_id'], dev['product_id'])
in self.aliases])
def before_update(self, introspection_data, node_info, **kwargs):
if 'pci_devices' not in introspection_data:
if CONF.pci_devices.alias:
LOG.warning(_LW('No PCI devices information was received from '
'the ramdisk.'))
return
alias_count = {self.aliases[id_pair]: count for id_pair, count in
self._found_pci_devices_count(
introspection_data['pci_devices']).items()}
if alias_count:
node_info.update_capabilities(**alias_count)
LOG.info(_LI('Found the following PCI devices: %s'),
alias_count)

View File

@ -92,6 +92,11 @@ def _find_node_info(introspection_data, failures):
if not_found_hook is None:
failures.append(_('Look up error: %s') % exc)
return
LOG.debug('Running node_not_found_hook %s',
CONF.processing.node_not_found_hook,
data=introspection_data)
# NOTE(sambetts): If not_found_hook is not none it means that we were
# unable to find the node in the node cache and there is a node not
# found hook defined so we should try to send the introspection data
@ -110,6 +115,8 @@ def _find_node_info(introspection_data, failures):
def _run_pre_hooks(introspection_data, failures):
hooks = plugins_base.processing_hooks_manager()
for hook_ext in hooks:
LOG.debug('Running pre-processing hook %s', hook_ext.name,
data=introspection_data)
# NOTE(dtantsur): catch exceptions, so that we have changes to update
# node introspection status after look up
try:
@ -251,6 +258,8 @@ def _run_post_hooks(node_info, introspection_data):
hooks = plugins_base.processing_hooks_manager()
for hook_ext in hooks:
LOG.debug('Running post-processing hook %s', hook_ext.name,
node_info=node_info, data=introspection_data)
hook_ext.obj.before_update(introspection_data, node_info)
@ -376,11 +385,14 @@ def _reapply(node_info):
# runs in background
try:
introspection_data = _get_unprocessed_data(node_info.uuid)
except Exception:
except Exception as exc:
LOG.exception(_LE('Encountered exception while fetching '
'stored introspection data'),
node_info=node_info)
node_info.release_lock()
msg = (_('Unexpected exception %(exc_class)s while fetching '
'unprocessed introspection data from Swift: %(error)s') %
{'exc_class': exc.__class__.__name__, 'error': exc})
node_info.finished(error=msg)
return
failures = []

View File

@ -12,7 +12,6 @@
# limitations under the License.
import fixtures
import futurist
import mock
from oslo_concurrency import lockutils
@ -21,6 +20,7 @@ from oslo_config import fixture as config_fixture
from oslo_log import log
from oslo_utils import units
from oslo_utils import uuidutils
from oslotest import base as test_base
from ironic_inspector.common import i18n
# Import configuration options
@ -33,7 +33,7 @@ from ironic_inspector import utils
CONF = cfg.CONF
class BaseTest(fixtures.TestWithFixtures):
class BaseTest(test_base.BaseTestCase):
IS_FUNCTIONAL = False

View File

@ -127,41 +127,41 @@ class Base(base.NodeTest):
return res
def call_introspect(self, uuid, new_ipmi_username=None,
new_ipmi_password=None):
new_ipmi_password=None, **kwargs):
endpoint = '/v1/introspection/%s' % uuid
if new_ipmi_password:
endpoint += '?new_ipmi_password=%s' % new_ipmi_password
if new_ipmi_username:
endpoint += '&new_ipmi_username=%s' % new_ipmi_username
return self.call('post', endpoint)
return self.call('post', endpoint, **kwargs)
def call_get_status(self, uuid):
return self.call('get', '/v1/introspection/%s' % uuid).json()
def call_get_status(self, uuid, **kwargs):
return self.call('get', '/v1/introspection/%s' % uuid, **kwargs).json()
def call_abort_introspect(self, uuid):
return self.call('post', '/v1/introspection/%s/abort' % uuid)
def call_abort_introspect(self, uuid, **kwargs):
return self.call('post', '/v1/introspection/%s/abort' % uuid, **kwargs)
def call_reapply(self, uuid):
def call_reapply(self, uuid, **kwargs):
return self.call('post', '/v1/introspection/%s/data/unprocessed' %
uuid)
uuid, **kwargs)
def call_continue(self, data):
return self.call('post', '/v1/continue', data=data).json()
def call_continue(self, data, **kwargs):
return self.call('post', '/v1/continue', data=data, **kwargs).json()
def call_add_rule(self, data):
return self.call('post', '/v1/rules', data=data).json()
def call_add_rule(self, data, **kwargs):
return self.call('post', '/v1/rules', data=data, **kwargs).json()
def call_list_rules(self):
return self.call('get', '/v1/rules').json()['rules']
def call_list_rules(self, **kwargs):
return self.call('get', '/v1/rules', **kwargs).json()['rules']
def call_delete_rules(self):
self.call('delete', '/v1/rules')
def call_delete_rules(self, **kwargs):
self.call('delete', '/v1/rules', **kwargs)
def call_delete_rule(self, uuid):
self.call('delete', '/v1/rules/' + uuid)
def call_delete_rule(self, uuid, **kwargs):
self.call('delete', '/v1/rules/' + uuid, **kwargs)
def call_get_rule(self, uuid):
return self.call('get', '/v1/rules/' + uuid).json()
def call_get_rule(self, uuid, **kwargs):
return self.call('get', '/v1/rules/' + uuid, **kwargs).json()
class Test(Base):
@ -380,7 +380,7 @@ class Test(Base):
res = self.call_abort_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.assertEqual(res.status_code, 202)
self.assertEqual(202, res.status_code)
status = self.call_get_status(self.uuid)
self.assertTrue(status['finished'])
self.assertEqual('Canceled by operator', status['error'])
@ -489,4 +489,4 @@ def mocked_server():
if __name__ == '__main__':
with mocked_server():
unittest.main()
unittest.main(verbosity=2)

View File

@ -14,6 +14,15 @@ from oslo_config import cfg
from tempest import config # noqa
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("ironic-inspector",
default=True,
help="Whether or not ironic-inspector is expected to be"
" available"),
]
baremetal_introspection_group = cfg.OptGroup(
name="baremetal_introspection",
@ -45,8 +54,20 @@ BaremetalIntrospectionGroup = [
default=300,
help="Time out for wait until nova becomes aware of "
"bare metal instances"),
# NOTE(aarefiev): status_check_period default is 60s, but checking
# node state takes some time(API call), so races appear here,
# 80s would be enough to make one more check.
cfg.IntOpt('ironic_sync_timeout',
default=60,
default=80,
help="Time it might take for Ironic--Inspector "
"sync to happen"),
cfg.IntOpt('discovery_timeout',
default=300,
help="Time to wait until new node would enrolled in "
"ironic"),
cfg.BoolOpt('auto_discovery_feature',
default=False,
help="Is the auto-discovery feature enabled. Enroll hook "
"should be specified in node_not_found_hook - processing "
"section of inspector.conf"),
]

View File

@ -13,6 +13,7 @@
import os
from oslo_config import cfg
from tempest import config as tempest_config
from tempest.test_discover import plugins
@ -28,10 +29,19 @@ class InspectorTempestPlugin(plugins.TempestPlugin):
return full_test_dir, base_path
def register_opts(self, conf):
tempest_config.register_opt_group(
conf, config.service_available_group,
config.ServiceAvailableGroup)
tempest_config.register_opt_group(
conf, config.baremetal_introspection_group,
config.BaremetalIntrospectionGroup)
# FIXME(dtantsur): pretend like Neutron does not exist due to random
# failures, see https://bugs.launchpad.net/bugs/1621791.
cfg.CONF.set_override('neutron', False, 'service_available')
def get_opt_lists(self):
return [(config.baremetal_introspection_group.name,
config.BaremetalIntrospectionGroup)]
return [
(config.baremetal_introspection_group.name,
config.BaremetalIntrospectionGroup),
('service_available', config.ServiceAvailableGroup)
]

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
from ironic_tempest_plugin.services.baremetal import base
from tempest import clients
from tempest.common import credentials_factory as common_creds
@ -47,13 +45,10 @@ class BaremetalIntrospectionClient(base.BaremetalClient):
return self._delete_request('rules', uuid=None)
@base.handle_errors
def import_rule(self, rule_path):
"""Import introspection rules from a json file."""
with open(rule_path, 'r') as fp:
rules = json.load(fp)
if not isinstance(rules, list):
rules = [rules]
def create_rules(self, rules):
"""Create introspection rules."""
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
self._create_request('rules', rule)
@ -68,3 +63,13 @@ class BaremetalIntrospectionClient(base.BaremetalClient):
return self._show_request('introspection', uuid=uuid,
uri='/%s/introspection/%s/data' %
(self.uri_prefix, uuid))
@base.handle_errors
def start_introspection(self, uuid):
"""Start introspection for a node."""
resp, _body = self.post(url=('/%s/introspection/%s' %
(self.uri_prefix, uuid)),
body=None)
self.expected_success(202, resp.status)
return resp

View File

@ -10,13 +10,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import six
import time
import tempest
from tempest import config
from tempest.lib.common.api_version_utils import LATEST_MICROVERSION
from tempest.lib import exceptions as lib_exc
from tempest import test
from ironic_inspector.test.inspector_tempest_plugin import exceptions
from ironic_inspector.test.inspector_tempest_plugin.services import \
@ -69,16 +72,28 @@ class InspectorScenarioTest(BaremetalScenarioTest):
def node_list(self):
return self.baremetal_client.list_nodes()[1]['nodes']
def node_port_list(self, node_uuid):
return self.baremetal_client.list_node_ports(node_uuid)[1]['ports']
def node_update(self, uuid, patch):
return self.baremetal_client.update_node(uuid, **patch)
def node_show(self, uuid):
return self.baremetal_client.show_node(uuid)[1]
def node_delete(self, uuid):
return self.baremetal_client.delete_node(uuid)
def node_filter(self, filter=lambda node: True, nodes=None):
return self.item_filter(self.node_list, self.node_show,
filter=filter, items=nodes)
def node_set_power_state(self, uuid, state):
self.baremetal_client.set_node_power_state(uuid, state)
def node_set_provision_state(self, uuid, state):
self.baremetal_client.set_node_provision_state(self, uuid, state)
def hypervisor_stats(self):
return (self.admin_manager.hypervisor_client.
show_hypervisor_statistics())
@ -90,7 +105,12 @@ class InspectorScenarioTest(BaremetalScenarioTest):
self.introspection_client.purge_rules()
def rule_import(self, rule_path):
self.introspection_client.import_rule(rule_path)
with open(rule_path, 'r') as fp:
rules = json.load(fp)
self.introspection_client.create_rules(rules)
def rule_import_from_dict(self, rules):
self.introspection_client.create_rules(rules)
def introspection_status(self, uuid):
return self.introspection_client.get_status(uuid)[1]
@ -98,6 +118,9 @@ class InspectorScenarioTest(BaremetalScenarioTest):
def introspection_data(self, uuid):
return self.introspection_client.get_data(uuid)[1]
def introspection_start(self, uuid):
return self.introspection_client.start_introspection(uuid)
def baremetal_flavor(self):
flavor_id = CONF.compute.flavor_ref
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
@ -118,11 +141,31 @@ class InspectorScenarioTest(BaremetalScenarioTest):
def terminate_instance(self, instance):
return super(InspectorScenarioTest, self).terminate_instance(instance)
def wait_for_node(self, node_name):
def check_node():
try:
self.node_show(node_name)
except lib_exc.NotFound:
return False
return True
if not test.call_until_true(
check_node,
duration=CONF.baremetal_introspection.discovery_timeout,
sleep_for=20):
msg = ("Timed out waiting for node %s " % node_name)
raise lib_exc.TimeoutException(msg)
inspected_node = self.node_show(self.node_info['name'])
self.wait_for_introspection_finished(inspected_node['uuid'])
# TODO(aarefiev): switch to call_until_true
def wait_for_introspection_finished(self, node_ids):
"""Waits for introspection of baremetal nodes to finish.
"""
if isinstance(node_ids, six.text_type):
node_ids = [node_ids]
start = int(time.time())
not_introspected = {node_id for node_id in node_ids}

View File

@ -105,8 +105,7 @@ class InspectorSmokeTest(manager.InspectorScenarioTest):
@test.idempotent_id('a702d1f1-88e4-42ce-88ef-cba2d9e3312e')
@test.attr(type='smoke')
@test.services('baremetal', 'compute', 'image',
'network', 'object_storage')
@test.services('baremetal', 'object_storage')
def test_baremetal_introspection(self):
"""This smoke test case follows this very basic set of operations:

View File

@ -0,0 +1,147 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ironic_tempest_plugin.tests.scenario import baremetal_manager
from tempest import config
from tempest import test # noqa
from ironic_inspector.test.inspector_tempest_plugin.tests import manager
CONF = config.CONF
ProvisionStates = baremetal_manager.BaremetalProvisionStates
class InspectorDiscoveryTest(manager.InspectorScenarioTest):
@classmethod
def skip_checks(cls):
super(InspectorDiscoveryTest, cls).skip_checks()
if not CONF.baremetal_introspection.auto_discovery_feature:
msg = ("Please, provide a value for node_not_found_hook in "
"processing section of inspector.conf for enable "
"auto-discovery feature.")
raise cls.skipException(msg)
def setUp(self):
super(InspectorDiscoveryTest, self).setUp()
discovered_node = self._get_discovery_node()
self.node_info = self._get_node_info(discovered_node)
rule = self._generate_discovery_rule(self.node_info)
self.rule_import_from_dict(rule)
self.addCleanup(self.rule_purge)
def _get_node_info(self, node_uuid):
node = self.node_show(node_uuid)
ports = self.node_port_list(node_uuid)
node['port_macs'] = [port['address'] for port in ports]
return node
def _get_discovery_node(self):
nodes = self.node_list()
discovered_node = None
for node in nodes:
if (node['provision_state'] == ProvisionStates.AVAILABLE or
node['provision_state'] == ProvisionStates.ENROLL or
node['provision_state'] is ProvisionStates.NOSTATE):
discovered_node = node['uuid']
break
self.assertIsNotNone(discovered_node)
return discovered_node
def _generate_discovery_rule(self, node):
rule = dict()
rule["description"] = "Node %s discovery rule" % node['name']
rule["actions"] = [
{"action": "set-attribute", "path": "/name",
"value": "%s" % node['name']},
{"action": "set-attribute", "path": "/driver",
"value": "%s" % node['driver']},
]
for key, value in node['driver_info'].items():
rule["actions"].append(
{"action": "set-attribute", "path": "/driver_info/%s" % key,
"value": "%s" % value})
rule["conditions"] = [
{"op": "eq", "field": "data://auto_discovered", "value": True}
]
return rule
def verify_node_introspection_data(self, node):
data = self.introspection_data(node['uuid'])
self.assertEqual(data['cpu_arch'],
self.flavor['properties']['cpu_arch'])
self.assertEqual(int(data['memory_mb']),
int(self.flavor['ram']))
self.assertEqual(int(data['cpus']), int(self.flavor['vcpus']))
def verify_node_flavor(self, node):
expected_cpus = self.flavor['vcpus']
expected_memory_mb = self.flavor['ram']
expected_cpu_arch = self.flavor['properties']['cpu_arch']
disk_size = self.flavor['disk']
ephemeral_size = self.flavor['OS-FLV-EXT-DATA:ephemeral']
expected_local_gb = disk_size + ephemeral_size
self.assertEqual(expected_cpus,
int(node['properties']['cpus']))
self.assertEqual(expected_memory_mb,
int(node['properties']['memory_mb']))
self.assertEqual(expected_local_gb,
int(node['properties']['local_gb']))
self.assertEqual(expected_cpu_arch,
node['properties']['cpu_arch'])
def verify_node_driver_info(self, node_info, inspected_node):
for key in node_info['driver_info']:
self.assertEqual(six.text_type(node_info['driver_info'][key]),
inspected_node['driver_info'].get(key))
@test.idempotent_id('dd3abe5e-0d23-488d-bb4e-344cdeff7dcb')
@test.services('baremetal', 'compute')
def test_berametal_auto_discovery(self):
"""This test case follows this set of operations:
* Choose appropriate node, based on provision state;
* Get node info;
* Generate discovery rule;
* Delete discovered node from ironic;
* Start baremetal vm via virsh;
* Wating for node introspection;
* Verify introspected node.
"""
# NOTE(aarefiev): workaround for infra, 'tempest' user doesn't
# have virsh privileges, so lets power on the node via ironic
# and then delete it. Because of node is blacklisted in inspector
# we can't just power on it, therefor start introspection is used
# to whitelist discovered node first.
self.baremetal_client.set_node_provision_state(
self.node_info['uuid'], 'manage')
self.introspection_start(self.node_info['uuid'])
self.wait_power_state(
self.node_info['uuid'],
baremetal_manager.BaremetalPowerStates.POWER_ON)
self.node_delete(self.node_info['uuid'])
self.wait_for_node(self.node_info['name'])
inspected_node = self.node_show(self.node_info['name'])
self.verify_node_flavor(inspected_node)
self.verify_node_introspection_data(inspected_node)
self.verify_node_driver_info(self.node_info, inspected_node)

View File

@ -75,7 +75,7 @@ class TestGetIpmiAddress(base.BaseTest):
node = mock.Mock(spec=['driver_info', 'uuid'],
driver_info={'ipmi_address': '192.168.1.1'})
ip = ir_utils.get_ipmi_address(node)
self.assertEqual(ip, '192.168.1.1')
self.assertEqual('192.168.1.1', ip)
@mock.patch('socket.gethostbyname')
def test_good_hostname_resolves(self, mock_socket):
@ -84,7 +84,7 @@ class TestGetIpmiAddress(base.BaseTest):
mock_socket.return_value = '192.168.1.1'
ip = ir_utils.get_ipmi_address(node)
mock_socket.assert_called_once_with('www.example.com')
self.assertEqual(ip, '192.168.1.1')
self.assertEqual('192.168.1.1', ip)
@mock.patch('socket.gethostbyname')
def test_bad_hostname_errors(self, mock_socket):
@ -101,7 +101,7 @@ class TestGetIpmiAddress(base.BaseTest):
self.cfg.config(ipmi_address_fields=['foo', 'bar', 'baz'])
ip = ir_utils.get_ipmi_address(node)
self.assertEqual(ip, '192.168.1.1')
self.assertEqual('192.168.1.1', ip)
def test_ipmi_bridging_enabled(self):
node = mock.Mock(spec=['driver_info', 'uuid'],

View File

@ -188,14 +188,14 @@ class TestIntrospect(BaseTest):
def test_failed_to_get_node(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.side_effect = exceptions.NotFound()
self.assertRaisesRegexp(utils.Error,
'Node %s was not found' % self.uuid,
introspect.introspect, self.uuid)
self.assertRaisesRegex(utils.Error,
'Node %s was not found' % self.uuid,
introspect.introspect, self.uuid)
cli.node.get.side_effect = exceptions.BadRequest()
self.assertRaisesRegexp(utils.Error,
'%s: Bad Request' % self.uuid,
introspect.introspect, self.uuid)
self.assertRaisesRegex(utils.Error,
'%s: Bad Request' % self.uuid,
introspect.introspect, self.uuid)
self.assertEqual(0, self.node_info.ports.call_count)
self.assertEqual(0, filters_mock.call_count)
@ -210,7 +210,7 @@ class TestIntrospect(BaseTest):
cli.node.validate.return_value = mock.Mock(power={'result': False,
'reason': 'oops'})
self.assertRaisesRegexp(
self.assertRaisesRegex(
utils.Error,
'Failed validation of power interface',
introspect.introspect, self.uuid)
@ -227,7 +227,7 @@ class TestIntrospect(BaseTest):
cli = client_mock.return_value
cli.node.get.return_value = self.node
self.assertRaisesRegexp(
self.assertRaisesRegex(
utils.Error, 'Invalid provision state for introspection: "active"',
introspect.introspect, self.uuid)
@ -365,9 +365,9 @@ class TestSetIpmiCredentials(BaseTest):
'processing')
self._prepare(client_mock)
self.assertRaisesRegexp(utils.Error, 'disabled',
introspect.introspect, self.uuid,
new_ipmi_credentials=self.new_creds)
self.assertRaisesRegex(utils.Error, 'disabled',
introspect.introspect, self.uuid,
new_ipmi_credentials=self.new_creds)
def test_no_username(self, client_mock, add_mock, filters_mock):
self._prepare(client_mock)
@ -446,8 +446,8 @@ class TestAbort(BaseTest):
exc = utils.Error('Not found.', code=404)
get_mock.side_effect = exc
self.assertRaisesRegexp(utils.Error, str(exc),
introspect.abort, self.uuid)
self.assertRaisesRegex(utils.Error, str(exc),
introspect.abort, self.uuid)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
@ -459,8 +459,8 @@ class TestAbort(BaseTest):
self.node_info.acquire_lock.return_value = False
self.node_info.started_at = time.time()
self.assertRaisesRegexp(utils.Error, 'Node is locked, please, '
'retry later', introspect.abort, self.uuid)
self.assertRaisesRegex(utils.Error, 'Node is locked, please, '
'retry later', introspect.abort, self.uuid)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)

View File

@ -21,7 +21,6 @@ from ironic_inspector.common import keystone
from ironic_inspector.test import base
CONF = cfg.CONF
TESTGROUP = 'keystone_test'

View File

@ -584,6 +584,7 @@ class TestInit(test_base.BaseTest):
@mock.patch.object(firewall, 'clean_up', lambda: None)
def tearDown(self):
self.service.shutdown()
super(TestInit, self).tearDown()
def test_ok(self, mock_node_cache, mock_get_client, mock_auth,
mock_firewall):

View File

@ -265,10 +265,9 @@ class MigrationCheckersMixin(object):
self.assertIsInstance(rules.c.description.type, sqlalchemy.types.Text)
self.assertIn('disabled', col_names)
# in some backends bool type is integer
self.assertTrue(isinstance(rules.c.disabled.type,
sqlalchemy.types.Boolean) or
isinstance(rules.c.disabled.type,
sqlalchemy.types.Integer))
self.assertIsInstance(rules.c.disabled.type,
(sqlalchemy.types.Boolean,
sqlalchemy.types.Integer))
conditions = db_utils.get_table(engine, 'rule_conditions')
col_names = [column.name for column in conditions.c]

View File

@ -33,12 +33,12 @@ class TestWithValidation(test_base.BaseTest):
def test_required_missing(self):
err_re = 'missing required parameter\(s\): x'
self.assertRaisesRegexp(ValueError, err_re, self.test.validate, {})
self.assertRaisesRegexp(ValueError, err_re, self.test.validate,
{'x': None})
self.assertRaisesRegexp(ValueError, err_re, self.test.validate,
{'y': 1, 'z': 2})
self.assertRaisesRegex(ValueError, err_re, self.test.validate, {})
self.assertRaisesRegex(ValueError, err_re, self.test.validate,
{'x': None})
self.assertRaisesRegex(ValueError, err_re, self.test.validate,
{'y': 1, 'z': 2})
def test_unexpected(self):
self.assertRaisesRegexp(ValueError, 'unexpected parameter\(s\): foo',
self.test.validate, {'foo': 'bar', 'x': 42})
self.assertRaisesRegex(ValueError, 'unexpected parameter\(s\): foo',
self.test.validate, {'foo': 'bar', 'x': 42})

View File

@ -75,7 +75,7 @@ class TestExtraHardware(test_base.NodeTest):
[{'op': 'add', 'path': '/extra/hardware_swift_object',
'value': name}])
self.assertFalse('data' in introspection_data)
self.assertNotIn('data', introspection_data)
def test_no_data_recieved(self, patch_mock, swift_mock):
introspection_data = {'cats': 'meow'}

View File

@ -0,0 +1,102 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from ironic_inspector import node_cache
from ironic_inspector.plugins import base
from ironic_inspector.plugins import pci_devices
from ironic_inspector.test import base as test_base
class TestPciDevicesHook(test_base.NodeTest):
hook = pci_devices.PciDevicesHook()
def test_parse_pci_alias_entry(self):
pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",'
' "name": "baz1"}',
'{"vendor_id": "foo2", "product_id": "bar2",'
' "name": "baz2"}']
valid_pci_entry = {("foo1", "bar1"): "baz1", ("foo2", "bar2"): "baz2"}
base.CONF.set_override('alias', pci_alias, 'pci_devices')
parsed_pci_entry = pci_devices._parse_pci_alias_entry()
self.assertDictEqual(valid_pci_entry, parsed_pci_entry)
def test_parse_pci_alias_entry_no_entries(self):
pci_alias = []
base.CONF.set_override('alias', pci_alias, 'pci_devices')
parsed_pci_alias = pci_devices._parse_pci_alias_entry()
self.assertFalse(parsed_pci_alias)
@mock.patch('ironic_inspector.plugins.pci_devices.LOG')
def test_parse_pci_alias_entry_invalid_json(self, mock_oslo_log):
pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",'
' "name": "baz1"}', '{"invalid" = "entry"}']
base.CONF.set_override('alias', pci_alias, 'pci_devices')
valid_pci_alias = {("foo1", "bar1"): "baz1"}
parsed_pci_alias = pci_devices._parse_pci_alias_entry()
self.assertDictEqual(valid_pci_alias, parsed_pci_alias)
mock_oslo_log.error.assert_called_once()
@mock.patch('ironic_inspector.plugins.pci_devices.LOG')
def test_parse_pci_alias_entry_invalid_keys(self, mock_oslo_log):
pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",'
' "name": "baz1"}', '{"invalid": "keys"}']
base.CONF.set_override('alias', pci_alias, 'pci_devices')
valid_pci_alias = {("foo1", "bar1"): "baz1"}
parsed_pci_alias = pci_devices._parse_pci_alias_entry()
self.assertDictEqual(valid_pci_alias, parsed_pci_alias)
mock_oslo_log.error.assert_called_once()
@mock.patch.object(hook, 'aliases', {("1234", "5678"): "pci_dev1",
("9876", "5432"): "pci_dev2"})
@mock.patch.object(node_cache.NodeInfo, 'update_capabilities',
autospec=True)
def test_before_update(self, mock_update_props):
self.data['pci_devices'] = [
{"vendor_id": "1234", "product_id": "5678"},
{"vendor_id": "1234", "product_id": "5678"},
{"vendor_id": "1234", "product_id": "7890"},
{"vendor_id": "9876", "product_id": "5432"}
]
expected_pci_devices_count = {"pci_dev1": 2, "pci_dev2": 1}
self.hook.before_update(self.data, self.node_info)
mock_update_props.assert_called_once_with(self.node_info,
**expected_pci_devices_count)
@mock.patch('ironic_inspector.plugins.pci_devices.LOG')
@mock.patch.object(node_cache.NodeInfo, 'update_capabilities',
autospec=True)
def test_before_update_no_pci_info_from_ipa(self, mock_update_props,
mock_oslo_log):
pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",'
' "name": "baz1"}']
base.CONF.set_override('alias', pci_alias, 'pci_devices')
self.hook.before_update(self.data, self.node_info)
mock_oslo_log.warning.assert_called_once()
self.assertFalse(mock_update_props.called)
@mock.patch.object(pci_devices, '_parse_pci_alias_entry')
@mock.patch('ironic_inspector.plugins.pci_devices.LOG')
@mock.patch.object(node_cache.NodeInfo, 'update_capabilities',
autospec=True)
def test_before_update_no_match(self, mock_update_props, mock_oslo_log,
mock_parse_pci_alias):
self.data['pci_devices'] = [
{"vendor_id": "1234", "product_id": "5678"},
{"vendor_id": "1234", "product_id": "7890"},
]
mock_parse_pci_alias.return_value = {("9876", "5432"): "pci_dev"}
self.hook.before_update(self.data, self.node_info)
self.assertFalse(mock_update_props.called)
self.assertFalse(mock_oslo_log.info.called)

View File

@ -144,8 +144,8 @@ class TestFailAction(test_base.BaseTest):
self.assertRaises(ValueError, self.act.validate, {})
def test_apply(self):
self.assertRaisesRegexp(utils.Error, 'boom',
self.act.apply, None, {'message': 'boom'})
self.assertRaisesRegex(utils.Error, 'boom',
self.act.apply, None, {'message': 'boom'})
class TestSetAttributeAction(test_base.NodeTest):

View File

@ -39,9 +39,9 @@ class TestSchedulerHook(test_base.NodeTest):
def test_no_root_disk(self):
del self.inventory['disks']
self.assertRaisesRegexp(utils.Error, 'disks key is missing or empty',
self.hook.before_update, self.data,
self.node_info)
self.assertRaisesRegex(utils.Error, 'disks key is missing or empty',
self.hook.before_update, self.data,
self.node_info)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_ok(self, mock_patch):
@ -111,16 +111,16 @@ class TestValidateInterfacesHook(test_base.NodeTest):
self.assertRaises(SystemExit, std_plugins.ValidateInterfacesHook)
def test_no_interfaces(self):
self.assertRaisesRegexp(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {})
self.assertRaisesRegexp(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {'inventory': {}})
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {})
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {'inventory': {}})
del self.inventory['interfaces']
self.assertRaisesRegexp(utils.Error,
'interfaces key is missing or empty',
self.hook.before_processing, self.data)
self.assertRaisesRegex(utils.Error,
'interfaces key is missing or empty',
self.hook.before_processing, self.data)
def test_only_pxe(self):
self.hook.before_processing(self.data)
@ -139,8 +139,8 @@ class TestValidateInterfacesHook(test_base.NodeTest):
def test_only_pxe_not_found(self):
self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff'
self.assertRaisesRegexp(utils.Error, 'No suitable interfaces',
self.hook.before_processing, self.data)
self.assertRaisesRegex(utils.Error, 'No suitable interfaces',
self.hook.before_processing, self.data)
def test_only_pxe_no_boot_interface(self):
del self.data['boot_interface']
@ -179,8 +179,8 @@ class TestValidateInterfacesHook(test_base.NodeTest):
# empty
{},
]
self.assertRaisesRegexp(utils.Error, 'No interfaces supplied',
self.hook.before_processing, self.data)
self.assertRaisesRegex(utils.Error, 'No interfaces supplied',
self.hook.before_processing, self.data)
def test_skipped_interfaces(self):
CONF.set_override('add_ports', 'all', 'processing')
@ -197,8 +197,8 @@ class TestValidateInterfacesHook(test_base.NodeTest):
{'name': 'em4', 'mac_address': 'foobar',
'ipv4_address': '2.2.2.2'},
]
self.assertRaisesRegexp(utils.Error, 'No suitable interfaces found',
self.hook.before_processing, self.data)
self.assertRaisesRegex(utils.Error, 'No suitable interfaces found',
self.hook.before_processing, self.data)
@mock.patch.object(node_cache.NodeInfo, 'delete_port', autospec=True)
def test_keep_all(self, mock_delete_port):
@ -252,10 +252,10 @@ class TestRootDiskSelection(test_base.NodeTest):
del self.data['inventory']
del self.data['root_disk']
self.assertRaisesRegexp(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_update,
self.data, self.node_info)
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertNotIn('root_disk', self.data)
@ -264,10 +264,10 @@ class TestRootDiskSelection(test_base.NodeTest):
self.node.properties['root_device'] = {'size': 10}
self.inventory['disks'] = []
self.assertRaisesRegexp(utils.Error,
'disks key is missing or empty',
self.hook.before_update,
self.data, self.node_info)
self.assertRaisesRegex(utils.Error,
'disks key is missing or empty',
self.hook.before_update,
self.data, self.node_info)
def test_one_matches(self):
self.node.properties['root_device'] = {'size': 10}
@ -289,10 +289,10 @@ class TestRootDiskSelection(test_base.NodeTest):
'model': 'Model 42'}
del self.data['root_disk']
self.assertRaisesRegexp(utils.Error,
'No disks satisfied root device hints',
self.hook.before_update,
self.data, self.node_info)
self.assertRaisesRegex(utils.Error,
'No disks satisfied root device hints',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertNotIn('root_disk', self.data)
@ -305,10 +305,10 @@ class TestRootDiskSelection(test_base.NodeTest):
def test_size_invalid(self):
for bad_size in ('foo', None, {}):
self.node.properties['root_device'] = {'size': bad_size}
self.assertRaisesRegexp(utils.Error,
'Invalid root device size hint',
self.hook.before_update,
self.data, self.node_info)
self.assertRaisesRegex(utils.Error,
'Invalid root device size hint',
self.hook.before_update,
self.data, self.node_info)
class TestRamdiskError(test_base.InventoryTest):
@ -319,6 +319,6 @@ class TestRamdiskError(test_base.InventoryTest):
self.data['error'] = self.msg
def test_no_logs(self):
self.assertRaisesRegexp(utils.Error,
self.msg,
process.process, self.data)
self.assertRaisesRegex(utils.Error,
self.msg,
process.process, self.data)

View File

@ -100,42 +100,42 @@ class TestProcess(BaseProcessTest):
def test_not_found_in_cache(self):
self.find_mock.side_effect = utils.Error('not found')
self.assertRaisesRegexp(utils.Error,
'not found',
process.process, self.data)
self.assertRaisesRegex(utils.Error,
'not found',
process.process, self.data)
self.assertFalse(self.cli.node.get.called)
self.assertFalse(self.process_mock.called)
def test_not_found_in_ironic(self):
self.cli.node.get.side_effect = exceptions.NotFound()
self.assertRaisesRegexp(utils.Error,
'Node %s was not found' % self.uuid,
process.process, self.data)
self.assertRaisesRegex(utils.Error,
'Node %s was not found' % self.uuid,
process.process, self.data)
self.cli.node.get.assert_called_once_with(self.uuid)
self.assertFalse(self.process_mock.called)
self.node_info.finished.assert_called_once_with(error=mock.ANY)
def test_already_finished(self):
self.node_info.finished_at = time.time()
self.assertRaisesRegexp(utils.Error, 'already finished',
process.process, self.data)
self.assertRaisesRegex(utils.Error, 'already finished',
process.process, self.data)
self.assertFalse(self.process_mock.called)
self.assertFalse(self.find_mock.return_value.finished.called)
def test_expected_exception(self):
self.process_mock.side_effect = utils.Error('boom')
self.assertRaisesRegexp(utils.Error, 'boom',
process.process, self.data)
self.assertRaisesRegex(utils.Error, 'boom',
process.process, self.data)
self.node_info.finished.assert_called_once_with(error='boom')
def test_unexpected_exception(self):
self.process_mock.side_effect = RuntimeError('boom')
with self.assertRaisesRegexp(utils.Error,
'Unexpected exception') as ctx:
with self.assertRaisesRegex(utils.Error,
'Unexpected exception') as ctx:
process.process(self.data)
self.assertEqual(500, ctx.exception.http_code)
@ -149,8 +149,8 @@ class TestProcess(BaseProcessTest):
patcher.start()
self.addCleanup(lambda p=patcher: p.stop())
self.assertRaisesRegexp(utils.Error, 'Unexpected exception',
process.process, self.data)
self.assertRaisesRegex(utils.Error, 'Unexpected exception',
process.process, self.data)
self.node_info.finished.assert_called_once_with(
error=mock.ANY)
@ -167,17 +167,17 @@ class TestProcess(BaseProcessTest):
patcher.start()
self.addCleanup(lambda p=patcher: p.stop())
self.assertRaisesRegexp(utils.Error, 'Unexpected exception',
process.process, self.data)
self.assertRaisesRegex(utils.Error, 'Unexpected exception',
process.process, self.data)
self.assertFalse(self.node_info.finished.called)
def test_error_if_node_not_found_hook(self):
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
self.assertRaisesRegexp(utils.Error,
'Look up error: BOOM',
process.process, self.data)
self.assertRaisesRegex(utils.Error,
'Look up error: BOOM',
process.process, self.data)
@mock.patch.object(example_plugin, 'example_not_found_hook',
@ -199,9 +199,9 @@ class TestNodeNotFoundHook(BaseProcessTest):
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
hook_mock.return_value = None
self.assertRaisesRegexp(utils.Error,
'Node not found hook returned nothing',
process.process, self.data)
self.assertRaisesRegex(utils.Error,
'Node not found hook returned nothing',
process.process, self.data)
hook_mock.assert_called_once_with(self.data)
def test_node_not_found_hook_exception(self, hook_mock):
@ -209,9 +209,9 @@ class TestNodeNotFoundHook(BaseProcessTest):
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
hook_mock.side_effect = Exception('Hook Error')
self.assertRaisesRegexp(utils.Error,
'Node not found hook failed: Hook Error',
process.process, self.data)
self.assertRaisesRegex(utils.Error,
'Node not found hook failed: Hook Error',
process.process, self.data)
hook_mock.assert_called_once_with(self.data)
@ -563,12 +563,9 @@ class TestReapply(BaseTest):
@prepare_mocks
def test_locking_failed(self, pop_mock, reapply_mock):
pop_mock.return_value.acquire_lock.return_value = False
exc = utils.Error('Node locked, please, try again later')
with self.assertRaises(type(exc)) as cm:
process.reapply(self.uuid)
self.assertEqual(str(exc), str(cm.exception))
self.assertRaisesRegex(utils.Error,
'Node locked, please, try again later',
process.reapply, self.uuid)
pop_mock.assert_called_once_with(self.uuid, locked=False)
pop_mock.return_value.acquire_lock.assert_called_once_with(
@ -656,6 +653,8 @@ class TestReapplyNode(BaseTest):
swift_mock, apply_mock,
post_hook_mock, ):
exc = Exception('Oops')
expected_error = ('Unexpected exception Exception while fetching '
'unprocessed introspection data from Swift: Oops')
swift_mock.get_object.side_effect = exc
with mock.patch.object(process.LOG, 'exception',
autospec=True) as log_mock:
@ -669,7 +668,8 @@ class TestReapplyNode(BaseTest):
self.assertFalse(swift_mock.create_object.called)
self.assertFalse(apply_mock.called)
self.assertFalse(post_hook_mock.called)
self.assertFalse(finished_mock.called)
finished_mock.assert_called_once_with(self.node_info,
expected_error)
@prepare_mocks
def test_prehook_failure(self, finished_mock, swift_mock,

View File

@ -55,9 +55,9 @@ class TestCreateRule(BaseTest):
def test_duplicate_uuid(self):
rules.create([], self.actions_json, uuid=self.uuid)
self.assertRaisesRegexp(utils.Error, 'already exists',
rules.create, [], self.actions_json,
uuid=self.uuid)
self.assertRaisesRegex(utils.Error, 'already exists',
rules.create, [], self.actions_json,
uuid=self.uuid)
def test_with_conditions(self):
rule = rules.create(self.conditions_json, self.actions_json)
@ -72,62 +72,62 @@ class TestCreateRule(BaseTest):
def test_invalid_condition(self):
del self.conditions_json[0]['op']
self.assertRaisesRegexp(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
self.conditions_json[0]['op'] = 'foobar'
self.assertRaisesRegexp(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_condition_field(self):
self.conditions_json[0]['field'] = '!*!'
self.assertRaisesRegexp(utils.Error,
'Unable to parse field JSON path',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Unable to parse field JSON path',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_condition_parameters(self):
self.conditions_json[0]['foo'] = 'bar'
self.assertRaisesRegexp(utils.Error,
'Invalid parameters for operator',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Invalid parameters for operator',
rules.create,
self.conditions_json, self.actions_json)
def test_no_actions(self):
self.assertRaisesRegexp(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, [])
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, [])
def test_invalid_action(self):
del self.actions_json[0]['action']
self.assertRaisesRegexp(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
self.actions_json[0]['action'] = 'foobar'
self.assertRaisesRegexp(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_action_parameters(self):
self.actions_json[0]['foo'] = 'bar'
self.assertRaisesRegexp(utils.Error,
'Invalid parameters for action',
rules.create,
self.conditions_json, self.actions_json)
self.assertRaisesRegex(utils.Error,
'Invalid parameters for action',
rules.create,
self.conditions_json, self.actions_json)
class TestGetRule(BaseTest):

View File

@ -0,0 +1,4 @@
---
fixes:
- Use only single quotes for strings inside SQL statements. Fixes a crash
when PostgreSQL is used as a database backend.

View File

@ -0,0 +1,7 @@
---
features:
- Adds new processing hook pci_devices for setting node
capabilities based on PCI devices present on a node
and rules in the [pci_devices] aliases configuration
option. Requires "pci-devices" collector to be enabled
in IPA.

View File

@ -0,0 +1,3 @@
fixes:
- Set the node to the error state when it
failed get data from swift.

View File

@ -10,13 +10,13 @@ jsonpath-rw<2.0,>=1.2.0 # Apache-2.0
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
keystoneauth1>=2.10.0 # Apache-2.0
keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
netaddr!=0.7.16,>=0.7.12 # BSD
netaddr!=0.7.16,>=0.7.13 # BSD
pbr>=1.6 # Apache-2.0
python-ironicclient>=1.6.0 # Apache-2.0
python-swiftclient>=2.2.0 # Apache-2.0
oslo.concurrency>=3.8.0 # Apache-2.0
oslo.config>=3.14.0 # Apache-2.0
oslo.db>=4.1.0 # Apache-2.0
oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0
oslo.i18n>=2.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.middleware>=3.0.0 # Apache-2.0

View File

@ -2,7 +2,7 @@
name = ironic-inspector
summary = Hardware introspection for OpenStack Bare Metal
description-file = README.rst
home-page = https://launchpad.net/ironic-inspector
home-page = http://docs.openstack.org/developer/ironic-inspector/
license = Apache-2
classifier =
Environment :: Console
@ -33,6 +33,7 @@ ironic_inspector.hooks.processing =
raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection
capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook
local_link_connection = ironic_inspector.plugins.local_link_connection:GenericLocalLinkConnectionHook
pci_devices = ironic_inspector.plugins.pci_devices:PciDevicesHook
ironic_inspector.hooks.node_not_found =
example = ironic_inspector.plugins.example:example_not_found_hook
enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook
@ -59,6 +60,7 @@ oslo.config.opts =
ironic_inspector.common.swift = ironic_inspector.common.swift:list_opts
ironic_inspector.plugins.discovery = ironic_inspector.plugins.discovery:list_opts
ironic_inspector.plugins.capabilities = ironic_inspector.plugins.capabilities:list_opts
ironic_inspector.plugins.pci_devices = ironic_inspector.plugins.pci_devices:list_opts
oslo.config.opts.defaults =
ironic_inspector = ironic_inspector.conf:set_config_defaults

18
tox.ini
View File

@ -14,31 +14,13 @@ setenv = PYTHONDONTWRITEBYTECODE=1
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
[testenv:venv]
# NOTE(amrith) The setting of the install_command in this location
# is only required because currently infra does not actually
# support constraints files for the environment job, and while
# the environment variable UPPER_CONSTRAINTS_FILE is set, there's
# no file there. It can be removed when infra changes this.
install_command = pip install -U {opts} {packages}
commands = {posargs}
[testenv:releasenotes]
# NOTE(amrith) The setting of the install_command in this location
# is only required because currently infra does not actually
# support constraints files for the release notes job, and while
# the environment variable UPPER_CONSTRAINTS_FILE is set, there's
# no file there. It can be removed when infra changes this.
install_command = pip install -U {opts} {packages}
envdir = {toxworkdir}/venv
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:cover]
# NOTE(amrith) The setting of the install_command in this location
# is only required because currently infra does not actually
# support constraints files for the cover job, and while
# the environment variable UPPER_CONSTRAINTS_FILE is set, there's
# no file there. It can be removed when infra changes this.
install_command = pip install -U {opts} {packages}
commands =
coverage run --branch --include "ironic_inspector*" -m unittest discover ironic_inspector.test.unit
coverage report -m