From b12d184b73f6decb20f733ae642a54ae49e90b88 Mon Sep 17 00:00:00 2001 From: Aparna Date: Mon, 18 Apr 2016 11:06:31 +0000 Subject: [PATCH 001/166] Deployment vmedia operations to run when cleaning The virtual media operations in task.driver.boot.prepare_ramdisk() should be performed while the provision state of the node is in 'cleaning'. Change-Id: I4f563586523ea6e4a5a630c5fe44f70fe473bdf8 Closes-Bug: #1570283 --- ironic/drivers/modules/ilo/boot.py | 7 +++--- ironic/drivers/modules/irmc/boot.py | 7 +++--- .../unit/drivers/modules/ilo/test_boot.py | 23 +++++++++++++++++-- .../unit/drivers/modules/irmc/test_boot.py | 21 ++++++++++++----- .../notes/bug-1570283-6cdc62e4ef43cb02.yaml | 4 ++++ 5 files changed, 48 insertions(+), 14 deletions(-) create mode 100644 releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py index 481e3ebd3..1a5c5dae4 100644 --- a/ironic/drivers/modules/ilo/boot.py +++ b/ironic/drivers/modules/ilo/boot.py @@ -295,10 +295,11 @@ class IloVirtualMediaBoot(base.BootInterface): node = task.node # NOTE(TheJulia): If this method is being called by something - # aside from a deployment, such as conductor takeover, we should - # treat this as a no-op and move on otherwise we would modify + # aside from deployment and clean, such as conductor takeover, we + # should treat this as a no-op and move on otherwise we would modify # the state of the node due to virtual media operations. - if node.provision_state != states.DEPLOYING: + if (node.provision_state != states.DEPLOYING and + node.provision_state != states.CLEANING): return # Clear ilo_boot_iso if it's a glance image to force recreate diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py index 89c58a8d5..0c39d7bb6 100644 --- a/ironic/drivers/modules/irmc/boot.py +++ b/ironic/drivers/modules/irmc/boot.py @@ -604,10 +604,11 @@ class IRMCVirtualMediaBoot(base.BootInterface): """ # NOTE(TheJulia): If this method is being called by something - # aside from a deployment, such as conductor takeover, we should - # treat this as a no-op and move on otherwise we would modify + # aside from deployment and clean, such as conductor takeover, we + # should treat this as a no-op and move on otherwise we would modify # the state of the node due to virtual media operations. - if task.node.provision_state != states.DEPLOYING: + if (task.node.provision_state != states.DEPLOYING and + task.node.provision_state != states.CLEANING): return deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task) diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py index 93ea84c4b..6cf6009f1 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py @@ -502,8 +502,8 @@ class IloVirtualMediaBootTestCase(db_base.DbTestCase): @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, autospec=True) - def test_prepare_ramdisk_not_deploying(self, mock_is_image): - """Ensure ramdisk build operations are blocked when not deploying""" + def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image): + """Ensure deploy ops are blocked when not deploying and not cleaning""" for state in states.STABLE_STATES: mock_is_image.reset_mock() @@ -534,6 +534,25 @@ class IloVirtualMediaBootTestCase(db_base.DbTestCase): self.assertEqual('http://mybootiso', self.node.instance_info['ilo_boot_iso']) + def test_prepare_ramdisk_glance_image_cleaning(self): + self.node.provision_state = states.CLEANING + self.node.save() + self._test_prepare_ramdisk( + ilo_boot_iso='swift:abcdef', + image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af') + self.node.refresh() + self.assertNotIn('ilo_boot_iso', self.node.instance_info) + + def test_prepare_ramdisk_not_a_glance_image_cleaning(self): + self.node.provision_state = states.CLEANING + self.node.save() + self._test_prepare_ramdisk( + ilo_boot_iso='http://mybootiso', + image_source='http://myimage') + self.node.refresh() + self.assertEqual('http://mybootiso', + self.node.instance_info['ilo_boot_iso']) + @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True, diff --git a/ironic/tests/unit/drivers/modules/irmc/test_boot.py b/ironic/tests/unit/drivers/modules/irmc/test_boot.py index a607282ea..a06ca4e7a 100644 --- a/ironic/tests/unit/drivers/modules/irmc/test_boot.py +++ b/ironic/tests/unit/drivers/modules/irmc/test_boot.py @@ -900,14 +900,13 @@ class IRMCVirtualMediaBootTestCase(db_base.DbTestCase): spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id', spec_set=True, autospec=True) - def test_prepare_ramdisk(self, - get_single_nic_with_vif_port_id_mock, - _setup_deploy_iso_mock): + def _test_prepare_ramdisk(self, + get_single_nic_with_vif_port_id_mock, + _setup_deploy_iso_mock): instance_info = self.node.instance_info instance_info['irmc_boot_iso'] = 'glance://abcdef' instance_info['image_source'] = '6b2f0c0c-79e8-4db6-842e-43c9764204af' self.node.instance_info = instance_info - self.node.provision_state = states.DEPLOYING self.node.save() ramdisk_params = {'a': 'b'} @@ -924,10 +923,20 @@ class IRMCVirtualMediaBootTestCase(db_base.DbTestCase): self.assertEqual('glance://abcdef', self.node.instance_info['irmc_boot_iso']) + def test_prepare_ramdisk_glance_image_deploying(self): + self.node.provision_state = states.DEPLOYING + self.node.save() + self._test_prepare_ramdisk() + + def test_prepare_ramdisk_glance_image_cleaning(self): + self.node.provision_state = states.CLEANING + self.node.save() + self._test_prepare_ramdisk() + @mock.patch.object(irmc_boot, '_setup_deploy_iso', spec_set=True, autospec=True) - def test_prepare_ramdisk_not_deploying(self, mock_is_image): - """Ensure ramdisk build operations are blocked when not deploying""" + def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image): + """Ensure deploy ops are blocked when not deploying and not cleaning""" for state in states.STABLE_STATES: mock_is_image.reset_mock() diff --git a/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml b/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml new file mode 100644 index 000000000..7d6ec791a --- /dev/null +++ b/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml @@ -0,0 +1,4 @@ +--- +fixes: + -Fixes the issue of not attaching virtual media during cleaning operation + for vmedia based drivers. From b9cf7d42b1dd861bfd812de3928f58fe6aa46ce8 Mon Sep 17 00:00:00 2001 From: vsaienko Date: Fri, 11 Dec 2015 11:41:00 +0200 Subject: [PATCH 002/166] Add Link-Local-Connection info to ironic port This patch introduce new variable called IRONIC_USE_LINK_LOCAL. Link-Local-Connection information is used by Neutron to bind_port on the switch. Change-Id: Icfb9257bded5b234a9d05f2b2de6d6580f2f42aa Partial-bug: #1526403 --- devstack/lib/ironic | 24 +++++++++++++++++++- devstack/tools/ironic/scripts/create-node.sh | 3 ++- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 2682d2bf8..080672b7b 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -207,6 +207,11 @@ IRONIC_AUTH_STRATEGY=${IRONIC_AUTH_STRATEGY:-keystone} IRONIC_TERMINAL_SSL=$(trueorfalse False IRONIC_TERMINAL_SSL) IRONIC_TERMINAL_CERT_DIR=${IRONIC_TERMINAL_CERT_DIR:-$IRONIC_DATA_DIR/terminal_cert/} +# This flag is used to allow adding Link-Local-Connection info +# to ironic port-create command. LLC info is obtained from +# IRONIC_{VM,HW}_NODES_FILE +IRONIC_USE_LINK_LOCAL=$(trueorfalse False IRONIC_USE_LINK_LOCAL) + # get_pxe_boot_file() - Get the PXE/iPXE boot file path function get_pxe_boot_file { local relpath=syslinux/pxelinux.0 @@ -832,6 +837,21 @@ function enroll_nodes { vbmc_port=$(echo $hardware_info | awk '{print $2}') node_options+=" -i ipmi_port=$vbmc_port" fi + # Local-link-connection options + if [[ "${IRONIC_USE_LINK_LOCAL}" == "True" ]]; then + local llc_opts="" + local switch_info + local switch_id + local port_id + + switch_info=$(echo $hardware_info |awk '{print $3}') + switch_id=$(echo $hardware_info |awk '{print $4}') + port_id=$(echo $hardware_info |awk '{print $5}') + + llc_opts="-l switch_id=${switch_id} -l switch_info=${switch_info} -l port_id=${port_id}" + + local ironic_api_version='--ironic-api-version latest' + fi else # Currently we require all hardware platform have same CPU/RAM/DISK info # in future, this can be enhanced to support different type, and then @@ -887,7 +907,9 @@ function enroll_nodes { ironic node-update $node_id add properties/root_device='{"vendor": "0x1af4"}' fi - ironic port-create --address $mac_address --node $node_id + # In case we using portgroups, we should API version that support them. + # Othervise API will return 406 ERROR + ironic $ironic_api_version port-create --address $mac_address --node $node_id $llc_opts total_nodes=$((total_nodes+1)) total_cpus=$((total_cpus+$ironic_node_cpu)) diff --git a/devstack/tools/ironic/scripts/create-node.sh b/devstack/tools/ironic/scripts/create-node.sh index 0b0278b71..7850ff4ca 100755 --- a/devstack/tools/ironic/scripts/create-node.sh +++ b/devstack/tools/ironic/scripts/create-node.sh @@ -78,4 +78,5 @@ fi # echo mac VM_MAC=$(virsh dumpxml $NAME | grep "mac address" | head -1 | cut -d\' -f2) -echo $VM_MAC $VBMC_PORT +switch_id=$(ip link show dev $BRIDGE | egrep -o "ether [A-Za-z0-9:]+"|sed "s/ether\ //") +echo $VM_MAC $VBMC_PORT $BRIDGE $switch_id ovs-$NAME From 9a1aeb76da2ed53e042a94ead8640af9374a10bf Mon Sep 17 00:00:00 2001 From: Yuiko Takada Mori Date: Mon, 6 Jun 2016 14:39:20 +0900 Subject: [PATCH 003/166] Fix parameter create-node.sh When IRONIC_VM_LOG_CONSOLE is set in devstack/local.conf, devstack/tools/ironic/scripts/create-node.sh fails with below error message. expected syntax: --format This error occurs because when IRONIC_VM_LOG_CONSOLE is set False, log_arg is passed with empty string to devstack/tools/ironic/scripts/create-node.sh. This patch set fixes parameter order of devstack/tools/ironic/scripts/create-node.sh, and also create-node.sh has too many parameters, so fix to use getopts. Change-Id: Ibdfff27e2c0119f8261f2324233acbeea6612d79 Closes-bug: #1589370 --- devstack/lib/ironic | 10 +++---- devstack/tools/ironic/scripts/create-node.sh | 30 ++++++++++++-------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 0b356f868..729039e75 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -751,7 +751,7 @@ function create_bridge_and_vms { # Call libvirt setup scripts in a new shell to ensure any new group membership sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network.sh" if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - local log_arg="$IRONIC_VM_LOG_DIR" + local log_arg="-l $IRONIC_VM_LOG_DIR" if [[ "$IRONIC_VM_LOG_ROTATE" == "True" ]] ; then setup_qemu_log_hook @@ -763,10 +763,10 @@ function create_bridge_and_vms { local vbmc_port=$IRONIC_VBMC_PORT_RANGE_START local vm_name for vm_name in $(_ironic_bm_vm_names); do - sudo -E su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-node.sh $vm_name \ - $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ - $IRONIC_VM_SPECS_CPU_ARCH $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \ - $vbmc_port $log_arg $IRONIC_VM_SPECS_DISK_FORMAT" >> $IRONIC_VM_MACS_CSV_FILE + sudo -E su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-node.sh -n $vm_name \ + -c $IRONIC_VM_SPECS_CPU -m $IRONIC_VM_SPECS_RAM -d $IRONIC_VM_SPECS_DISK \ + -a $IRONIC_VM_SPECS_CPU_ARCH -b $IRONIC_VM_NETWORK_BRIDGE -e $IRONIC_VM_EMULATOR \ + -p $vbmc_port -f $IRONIC_VM_SPECS_DISK_FORMAT $log_arg" >> $IRONIC_VM_MACS_CSV_FILE vbmc_port=$((vbmc_port+1)) done local ironic_net_id diff --git a/devstack/tools/ironic/scripts/create-node.sh b/devstack/tools/ironic/scripts/create-node.sh index 0b0278b71..5a96b45d3 100755 --- a/devstack/tools/ironic/scripts/create-node.sh +++ b/devstack/tools/ironic/scripts/create-node.sh @@ -9,18 +9,24 @@ set -ex # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -NAME=$1 -CPU=$2 -MEM=$(( 1024 * $3 )) -# Extra G to allow fuzz for partition table : flavor size and registered size -# need to be different to actual size. -DISK=$(( $4 + 1)) -ARCH=$5 -BRIDGE=$6 -EMULATOR=$7 -VBMC_PORT=$8 -LOGDIR=$9 -DISK_FORMAT=${10} +while getopts "n:c:m:d:a:b:e:p:v:f:l:" arg; do + case $arg in + n) NAME=$OPTARG;; + c) CPU=$OPTARG;; + m) MEM=$(( 1024 * OPTARG ));; + # Extra G to allow fuzz for partition table : flavor size and registered + # size need to be different to actual size. + d) DISK=$(( OPTARG + 1 ));; + a) ARCH=$OPTARG;; + b) BRIDGE=$OPTARG;; + e) EMULATOR=$OPTARG;; + p) VBMC_PORT=$OPTARG;; + f) DISK_FORMAT=$OPTARG;; + l) LOGDIR=$OPTARG;; + esac +done + +shift $(( $OPTIND - 1 )) LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"virtio"} LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} From 5f2236467209ca64578e119608e9b9aeddf982f2 Mon Sep 17 00:00:00 2001 From: Ruby Loo Date: Mon, 20 Jun 2016 21:11:29 -0400 Subject: [PATCH 004/166] Fix test in test_agent_client.py In test_agent_client.py, the MockNode's driver_internal_info field was incorrectly set. 'clean_version' is not a valid key for driver_internal_info; it should be 'hardware_manager_version'. That change allows us to improve TestAgentClient.test_execute_clean_step(), where the value for 'clean_version' parameter comes from driver_internal_info['hardware_manager_version']. See: https://github.com/openstack/ironic/blob/ba83fb1bb45b63e056c87c3bbf42cabaade7865c/ironic/drivers/modules/agent_client.py#L224 Change-Id: Ie6d7f1eb81acd0b2cb437708a5cf20cd7d7b7241 --- ironic/tests/unit/drivers/modules/test_agent_client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ironic/tests/unit/drivers/modules/test_agent_client.py b/ironic/tests/unit/drivers/modules/test_agent_client.py index 6bc9652e9..a61c3066f 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_client.py +++ b/ironic/tests/unit/drivers/modules/test_agent_client.py @@ -40,7 +40,7 @@ class MockNode(object): self.uuid = 'uuid' self.driver_internal_info = { 'agent_url': "http://127.0.0.1:9999", - 'clean_version': {'generic': '1'} + 'hardware_manager_version': {'generic': '1'} } self.instance_info = {} @@ -309,7 +309,8 @@ class TestAgentClient(base.TestCase): 'step': step, 'node': self.node.as_dict(), 'ports': [], - 'clean_version': None + 'clean_version': + self.node.driver_internal_info['hardware_manager_version'] } self.client.execute_clean_step(step, self.node, From d52077f4fe8c668b258702e8298a4beaa19476d8 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 26 Apr 2016 10:19:04 +0800 Subject: [PATCH 005/166] Clear target_power_state on conductor startup During clearing locks, also clear target_power_state. As nodes may locked in powering process, sync_power_state task will sync the power_state field, but nobody handles target_power_state. Change-Id: I2293e03c05e13c716f78533680d128ba45ccda02 Closes-Bug: #1567255 --- ironic/conductor/base_manager.py | 2 ++ ironic/db/sqlalchemy/api.py | 18 ++++++++++++++++++ ironic/tests/unit/db/test_conductor.py | 18 ++++++++++++++++++ ...de-target-power-state-de1f25be46d3e6d7.yaml | 4 ++++ 4 files changed, 42 insertions(+) create mode 100644 releasenotes/notes/clear-node-target-power-state-de1f25be46d3e6d7.yaml diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py index 4d1a132e5..3d0a48cfa 100644 --- a/ironic/conductor/base_manager.py +++ b/ironic/conductor/base_manager.py @@ -139,6 +139,8 @@ class BaseConductorManager(object): self._periodic_task_callables, executor_factory=periodics.ExistingExecutor(self._executor)) + # clear all target_power_state with locks by this conductor + self.dbapi.clear_node_target_power_state(self.host) # clear all locks held by this conductor before registering self.dbapi.clear_node_reservations_for_conductor(self.host) try: diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index 02bf4782c..c89d17d7a 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -755,6 +755,24 @@ class Connection(api.Connection): _LW('Cleared reservations held by %(hostname)s: ' '%(nodes)s'), {'hostname': hostname, 'nodes': nodes}) + def clear_node_target_power_state(self, hostname): + nodes = [] + with _session_for_write(): + query = (model_query(models.Node) + .filter_by(reservation=hostname)) + query = query.filter(models.Node.target_power_state != sql.null()) + nodes = [node['uuid'] for node in query] + query.update({'target_power_state': None, + 'last_error': "Pending power operation was aborted " + "due to conductor restart"}) + + if nodes: + nodes = ', '.join(nodes) + LOG.warning( + _LW('Cleared target_power_state of the locked nodes in ' + 'powering process, their power state can be incorrect: ' + '%(nodes)s'), {'nodes': nodes}) + def get_active_driver_dict(self, interval=None): if interval is None: interval = CONF.conductor.heartbeat_timeout diff --git a/ironic/tests/unit/db/test_conductor.py b/ironic/tests/unit/db/test_conductor.py index a6a4e1638..09341bf9c 100644 --- a/ironic/tests/unit/db/test_conductor.py +++ b/ironic/tests/unit/db/test_conductor.py @@ -110,6 +110,24 @@ class DbConductorTestCase(base.DbTestCase): self.assertEqual('hostname2', node2.reservation) self.assertIsNone(node3.reservation) + def test_clear_node_target_power_state(self): + node1 = self.dbapi.create_node({'reservation': 'hostname1', + 'target_power_state': 'power on'}) + node2 = self.dbapi.create_node({'reservation': 'hostname2', + 'target_power_state': 'power on'}) + node3 = self.dbapi.create_node({'reservation': None, + 'target_power_state': 'power on'}) + self.dbapi.clear_node_target_power_state('hostname1') + node1 = self.dbapi.get_node_by_id(node1.id) + node2 = self.dbapi.get_node_by_id(node2.id) + node3 = self.dbapi.get_node_by_id(node3.id) + self.assertIsNone(node1.target_power_state) + self.assertIn('power operation was aborted', node1.last_error) + self.assertEqual('power on', node2.target_power_state) + self.assertIsNone(node2.last_error) + self.assertEqual('power on', node3.target_power_state) + self.assertIsNone(node3.last_error) + @mock.patch.object(timeutils, 'utcnow', autospec=True) def test_get_active_driver_dict_one_host_no_driver(self, mock_utcnow): h = 'fake-host' diff --git a/releasenotes/notes/clear-node-target-power-state-de1f25be46d3e6d7.yaml b/releasenotes/notes/clear-node-target-power-state-de1f25be46d3e6d7.yaml new file mode 100644 index 000000000..d9319ad19 --- /dev/null +++ b/releasenotes/notes/clear-node-target-power-state-de1f25be46d3e6d7.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Clear target_power_state of the nodes locked by the conductor on its + startup. From 146f6b47810e1d4b5c29af664c71fb73d76b7f8c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 21 Jun 2016 17:59:45 +0000 Subject: [PATCH 006/166] Updated from global requirements Change-Id: Ia70565f6277eaadc15bcdd3f8e0c8de50563f219 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index a470e5ed5..2ae19801d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -21,7 +21,7 @@ WebTest>=2.0 # MIT bashate>=0.2 # Apache-2.0 # Doc requirements -sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD +sphinx!=1.3b1,<1.3,>=1.2.1 # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 sphinxcontrib-seqdiag # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 From e84051e5b28684c1b983196ffb0c6aca1edd29d2 Mon Sep 17 00:00:00 2001 From: stephane Date: Wed, 15 Jun 2016 16:47:56 -0700 Subject: [PATCH 007/166] Restore diskimage-builder install In the change I0fc25c64339bc4c1f03ccb35cbc4efad4a7ad966, we lost the ability to install diskimage-builder if not present. Add this back if we're using dib to build IPA. Change-Id: Ic4e59f6ad18af44cec33b770d1df91f5e9eade12 Closes-Bug: 1593005 --- devstack/lib/ironic | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 2682d2bf8..fdf83fe6b 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -1054,6 +1054,12 @@ function build_ipa_dib_ramdisk { local ramdisk_path=$2 local tempdir tempdir=$(mktemp -d --tmpdir=${DEST}) + + # install diskimage-builder if not present + if ! $(type -P disk-image-create > /dev/null); then + install_diskimage_builder + fi + echo "Building IPA ramdisk with DIB options: $IRONIC_DIB_RAMDISK_OPTIONS" disk-image-create "$IRONIC_DIB_RAMDISK_OPTIONS" \ -o "$tempdir/ironic-agent" \ From 2eab1ee09ac525e086b59c43c2581b9087b5a257 Mon Sep 17 00:00:00 2001 From: Shivanand Tendulker Date: Wed, 15 Jun 2016 01:29:12 -0700 Subject: [PATCH 008/166] Add validation of 'ilo_deploy_iso' in deploy.validate() iLO virtual media based drivers use ISO image supplied as 'ilo_deploy_iso' in node's 'driver_info' to boot node during provisioning process. This parameter was not getting validated during deploy.validate() call. This change fixes the issue. Change-Id: I8fc4a89621e09281349f88c32ed77d24aa11355b Closes-Bug: #1592335 --- ironic/drivers/modules/ilo/deploy.py | 58 ++++++++++++ .../unit/drivers/modules/ilo/test_deploy.py | 94 +++++++++++++++++++ .../notes/bug-1592335-7c5835868fe364ea.yaml | 5 + 3 files changed, 157 insertions(+) create mode 100644 releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml diff --git a/ironic/drivers/modules/ilo/deploy.py b/ironic/drivers/modules/ilo/deploy.py index 937c98996..270f0aac9 100644 --- a/ironic/drivers/modules/ilo/deploy.py +++ b/ironic/drivers/modules/ilo/deploy.py @@ -20,8 +20,10 @@ from oslo_log import log as logging from ironic.common import boot_devices from ironic.common import exception +from ironic.common.glance_service import service_utils from ironic.common.i18n import _ from ironic.common.i18n import _LW +from ironic.common import image_service from ironic.common import states from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils @@ -143,11 +145,53 @@ def _disable_secure_boot_if_supported(task): task.node.uuid) +def _validate(task): + """Validate the prerequisites for virtual media based deploy. + + This method validates whether the 'driver_info' property of the + supplied node contains the required information for this driver. + + :param task: a TaskManager instance containing the node to act on. + :raises: InvalidParameterValue if any parameters are incorrect + :raises: MissingParameterValue if some mandatory information + is missing on the node + """ + node = task.node + ilo_common.parse_driver_info(node) + if 'ilo_deploy_iso' not in node.driver_info: + raise exception.MissingParameterValue(_( + "Missing 'ilo_deploy_iso' parameter in node's 'driver_info'.")) + deploy_iso = node.driver_info['ilo_deploy_iso'] + if not service_utils.is_glance_image(deploy_iso): + try: + image_service.HttpImageService().validate_href(deploy_iso) + except exception.ImageRefValidationFailed: + raise exception.InvalidParameterValue(_( + "Virtual media deploy accepts only Glance images or " + "HTTP(S) as driver_info['ilo_deploy_iso']. Either '%s' " + "is not a glance UUID or not a valid HTTP(S) URL or " + "the given URL is not reachable.") % deploy_iso) + + class IloVirtualMediaIscsiDeploy(iscsi_deploy.ISCSIDeploy): def get_properties(self): return {} + def validate(self, task): + """Validate the prerequisites for virtual media based deploy. + + This method validates whether the 'driver_info' property of the + supplied node contains the required information for this driver. + + :param task: a TaskManager instance containing the node to act on. + :raises: InvalidParameterValue if any parameters are incorrect + :raises: MissingParameterValue if some mandatory information + is missing on the node + """ + _validate(task) + super(IloVirtualMediaIscsiDeploy, self).validate(task) + @task_manager.require_exclusive_lock def tear_down(self, task): """Tear down a previous deployment on the task's node. @@ -200,6 +244,20 @@ class IloVirtualMediaAgentDeploy(agent.AgentDeploy): """ return ilo_boot.COMMON_PROPERTIES + def validate(self, task): + """Validate the prerequisites for virtual media based deploy. + + This method validates whether the 'driver_info' property of the + supplied node contains the required information for this driver. + + :param task: a TaskManager instance containing the node to act on. + :raises: InvalidParameterValue if any parameters are incorrect + :raises: MissingParameterValue if some mandatory information + is missing on the node + """ + _validate(task) + super(IloVirtualMediaAgentDeploy, self).validate(task) + @task_manager.require_exclusive_lock def tear_down(self, task): """Tear down a previous deployment on the task's node. diff --git a/ironic/tests/unit/drivers/modules/ilo/test_deploy.py b/ironic/tests/unit/drivers/modules/ilo/test_deploy.py index d8d53edc7..44451005c 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_deploy.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_deploy.py @@ -20,6 +20,8 @@ import six from ironic.common import boot_devices from ironic.common import exception +from ironic.common.glance_service import service_utils +from ironic.common import image_service from ironic.common import states from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils @@ -188,6 +190,72 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase): self.assertIsNone(bootmode) self.assertNotIn('deploy_boot_mode', task.node.instance_info) + @mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True, + autospec=True) + def test__validate_MissingParam(self, mock_parse_driver_info): + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaisesRegex(exception.MissingParameterValue, + "Missing 'ilo_deploy_iso'", + ilo_deploy._validate, task) + mock_parse_driver_info.assert_called_once_with(task.node) + + @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True, + autospec=True) + def test__validate_valid_uuid(self, mock_parse_driver_info, + mock_is_glance_image): + mock_is_glance_image.return_value = True + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + deploy_iso = '8a81759a-f29b-454b-8ab3-161c6ca1882c' + task.node.driver_info['ilo_deploy_iso'] = deploy_iso + ilo_deploy._validate(task) + mock_parse_driver_info.assert_called_once_with(task.node) + mock_is_glance_image.assert_called_once_with(deploy_iso) + + @mock.patch.object(image_service.HttpImageService, 'validate_href', + spec_set=True, autospec=True) + @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True, + autospec=True) + def test__validate_InvalidParam(self, mock_parse_driver_info, + mock_is_glance_image, + mock_validate_href): + deploy_iso = 'http://abc.org/image/qcow2' + mock_validate_href.side_effect = iter( + [exception.ImageRefValidationFailed( + image_href='http://abc.org/image/qcow2', reason='fail')]) + mock_is_glance_image.return_value = False + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.node.driver_info['ilo_deploy_iso'] = deploy_iso + self.assertRaisesRegex(exception.InvalidParameterValue, + "Virtual media deploy accepts", + ilo_deploy._validate, task) + mock_parse_driver_info.assert_called_once_with(task.node) + mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso) + + @mock.patch.object(image_service.HttpImageService, 'validate_href', + spec_set=True, autospec=True) + @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True, + autospec=True) + def test__validate_valid_url(self, mock_parse_driver_info, + mock_is_glance_image, + mock_validate_href): + deploy_iso = 'http://abc.org/image/deploy.iso' + mock_is_glance_image.return_value = False + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.node.driver_info['ilo_deploy_iso'] = deploy_iso + ilo_deploy._validate(task) + mock_parse_driver_info.assert_called_once_with(task.node) + mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso) + class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase): @@ -197,6 +265,19 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase): self.node = obj_utils.create_test_node( self.context, driver='iscsi_ilo', driver_info=INFO_DICT) + @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'validate', spec_set=True, + autospec=True) + @mock.patch.object(ilo_deploy, '_validate', spec_set=True, + autospec=True) + def test_validate(self, + mock_validate, + iscsi_validate): + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.deploy.validate(task) + mock_validate.assert_called_once_with(task) + iscsi_validate.assert_called_once_with(mock.ANY, task) + @mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down', spec_set=True, @@ -314,6 +395,19 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase): self.node = obj_utils.create_test_node( self.context, driver='agent_ilo', driver_info=INFO_DICT) + @mock.patch.object(agent.AgentDeploy, 'validate', spec_set=True, + autospec=True) + @mock.patch.object(ilo_deploy, '_validate', spec_set=True, + autospec=True) + def test_validate(self, + mock_validate, + agent_validate): + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.deploy.validate(task) + mock_validate.assert_called_once_with(task) + agent_validate.assert_called_once_with(mock.ANY, task) + @mock.patch.object(agent.AgentDeploy, 'tear_down', spec_set=True, autospec=True) @mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True, diff --git a/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml b/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml new file mode 100644 index 000000000..371d9eb31 --- /dev/null +++ b/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - A node using 'agent_ilo' or 'iscsi_ilo' driver has + their 'driver_info/ilo_deploy_iso' field validated + during node validate. This closes bug #1592335. From 2e0716720a91030d9600d40dc949c27ff62bc43b Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Wed, 15 Jun 2016 19:45:20 +0300 Subject: [PATCH 009/166] Properly set ephemeral size in agent drivers This commit ensures that ephemeral_mb will be passed to an IPA ramdisk, as it is what is expected by both IPA and ironic-lib. This change also deduplicates some of the logic between iscsi_deploy.build_instance_info_for_deploy and deploy_utils.parse_instance_info methods. Closes-bug: #1592895 Change-Id: I0c0ea7a887f9aeb9fc4280aa86098d59ee8cad20 --- ironic/drivers/modules/deploy_utils.py | 10 +++++++--- ironic/drivers/modules/iscsi_deploy.py | 6 +++--- ironic/tests/unit/drivers/modules/test_deploy_utils.py | 7 ++++--- .../fix-ipa-ephemeral-partition-1f1e020727a49078.yaml | 5 +++++ 4 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 releasenotes/notes/fix-ipa-ephemeral-partition-1f1e020727a49078.yaml diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 205fb39fa..38a16c2f8 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -1123,7 +1123,9 @@ def parse_instance_info(node): " in node's instance_info") check_for_missing_params(i_info, error_msg) - i_info['swap_mb'] = int(info.get('swap_mb', 0)) + # NOTE(vdrok): We're casting disk layout parameters to int only after + # ensuring that it is possible + i_info['swap_mb'] = info.get('swap_mb', 0) i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0) err_msg_invalid = _("Cannot validate parameter for driver deploy. " "Invalid parameter %(param)s. Reason: %(reason)s") @@ -1136,10 +1138,12 @@ def parse_instance_info(node): {'param': param, 'reason': reason}) - i_info['root_mb'] = 1024 * int(info.get('root_gb')) + i_info['root_mb'] = 1024 * int(i_info['root_gb']) + i_info['swap_mb'] = int(i_info['swap_mb']) + i_info['ephemeral_mb'] = 1024 * int(i_info['ephemeral_gb']) if iwdi: - if int(i_info['swap_mb']) > 0 or int(i_info['ephemeral_gb']) > 0: + if i_info['swap_mb'] > 0 or i_info['ephemeral_mb'] > 0: err_msg_invalid = _("Cannot deploy whole disk image with " "swap or ephemeral size set") raise exception.InvalidParameterValue(err_msg_invalid) diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 82d05b28e..041813157 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -207,9 +207,9 @@ def get_deploy_info(node, address, iqn, port=None, lun='1'): is_whole_disk_image = node.driver_internal_info['is_whole_disk_image'] if not is_whole_disk_image: - params.update({'root_mb': 1024 * int(i_info['root_gb']), - 'swap_mb': int(i_info['swap_mb']), - 'ephemeral_mb': 1024 * int(i_info['ephemeral_gb']), + params.update({'root_mb': i_info['root_mb'], + 'swap_mb': i_info['swap_mb'], + 'ephemeral_mb': i_info['ephemeral_mb'], 'preserve_ephemeral': i_info['preserve_ephemeral'], 'boot_option': deploy_utils.get_boot_option(node), 'boot_mode': _get_boot_mode(node)}) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 07ac3827b..c44ad90ac 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -2125,6 +2125,7 @@ class InstanceInfoTestCase(db_base.DbTestCase): def test_parse_instance_info_valid_ephemeral_gb(self): ephemeral_gb = 10 + ephemeral_mb = 1024 * ephemeral_gb ephemeral_fmt = 'test-fmt' info = dict(INST_INFO_DICT) info['ephemeral_gb'] = ephemeral_gb @@ -2134,7 +2135,7 @@ class InstanceInfoTestCase(db_base.DbTestCase): driver_internal_info=DRV_INTERNAL_INFO_DICT, ) data = utils.parse_instance_info(node) - self.assertEqual(ephemeral_gb, data['ephemeral_gb']) + self.assertEqual(ephemeral_mb, data['ephemeral_mb']) self.assertEqual(ephemeral_fmt, data['ephemeral_format']) def test_parse_instance_info_unicode_swap_mb(self): @@ -2308,9 +2309,9 @@ class InstanceInfoTestCase(db_base.DbTestCase): ) instance_info = utils.parse_instance_info(node) self.assertIsNotNone(instance_info['image_source']) - self.assertIsNotNone(instance_info['root_gb']) + self.assertIsNotNone(instance_info['root_mb']) self.assertEqual(0, instance_info['swap_mb']) - self.assertEqual(0, instance_info['ephemeral_gb']) + self.assertEqual(0, instance_info['ephemeral_mb']) self.assertIsNone(instance_info['configdrive']) def test_parse_instance_info_whole_disk_image_missing_root(self): diff --git a/releasenotes/notes/fix-ipa-ephemeral-partition-1f1e020727a49078.yaml b/releasenotes/notes/fix-ipa-ephemeral-partition-1f1e020727a49078.yaml new file mode 100644 index 000000000..ff392b614 --- /dev/null +++ b/releasenotes/notes/fix-ipa-ephemeral-partition-1f1e020727a49078.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Fixed a bug where the ironic python agent ramdisk was not creating an + ephemeral partition because the ephemeral partition size was not being + passed correctly to the agent. From 9721dadf47ab6dee8c8d1fabf206fdc2307f62a3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 23 Jun 2016 08:40:33 +0000 Subject: [PATCH 010/166] Updated from global requirements Change-Id: I06dafe2611e2adb42226c62e4d4c2df21638a7d7 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 94e53393b..7eeadb951 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ eventlet!=0.18.3,>=0.18.2 # MIT WebOb>=1.2.3 # MIT greenlet>=0.3.2 # MIT netaddr!=0.7.16,>=0.7.12 # BSD -paramiko>=2.0 # LGPL +paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=4.2.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 From e6c1c1645795277f946929148f65c1e9257fd862 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 17 May 2016 13:59:07 +0300 Subject: [PATCH 011/166] Create common neutron module Move _build_client logic to ironic.common.neutron module. In future module will contain common functions to Neutron. Change-Id: I7b344d71d0f9ae34f7423099631bd25b5c5359bd --- etc/ironic/ironic.conf.sample | 103 ++++++++++++++------- ironic/common/neutron.py | 79 ++++++++++++++++ ironic/conf/opts.py | 4 +- ironic/dhcp/neutron.py | 77 ++-------------- ironic/tests/unit/common/test_neutron.py | 109 +++++++++++++++++++++++ ironic/tests/unit/dhcp/test_neutron.py | 69 -------------- 6 files changed, 270 insertions(+), 171 deletions(-) create mode 100644 ironic/common/neutron.py create mode 100644 ironic/tests/unit/common/test_neutron.py diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 797238952..7f62ff60b 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -234,11 +234,6 @@ # Allowed values: redis, dummy #rpc_zmq_matchmaker = redis -# Type of concurrency used. Either "native" or "eventlet" -# (string value) -# Allowed values: eventlet, native -#rpc_zmq_concurrency = eventlet - # Number of ZeroMQ contexts, defaults to 1. (integer value) #rpc_zmq_contexts = 1 @@ -268,13 +263,17 @@ # Expiration timeout in seconds of a name service record about # existing target ( < 0 means no timeout). (integer value) -#zmq_target_expire = 120 +#zmq_target_expire = 300 + +# Update period in seconds of a name service record about +# existing target. (integer value) +#zmq_target_update = 180 # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses # proxy. (boolean value) #use_pub_sub = true -# Use ROUTER remote proxy for direct methods. (boolean value) +# Use ROUTER remote proxy. (boolean value) #use_router_proxy = true # Minimal port number for random ports range. (port value) @@ -299,12 +298,14 @@ #rpc_response_timeout = 60 # A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) +# configuration. (string value) #transport_url = -# The messaging driver to use, defaults to rabbit. Other -# drivers include amqp and zmq. (string value) +# DEPRECATED: The messaging driver to use, defaults to rabbit. +# Other drivers include amqp and zmq. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rpc_backend = rabbit # The default exchange under which topics are scoped. May be @@ -1319,7 +1320,7 @@ # Optionally specify a list of memcached server(s) to use for # caching. If left undefined, tokens will instead be cached # in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers +# Deprecated group/name - [keystone_authtoken]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating @@ -1410,7 +1411,7 @@ #hash_algorithms = md5 # Authentication type to load (unknown value) -# Deprecated group/name - [DEFAULT]/auth_plugin +# Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options @@ -1424,19 +1425,34 @@ # From oslo.messaging # -# Host to locate redis. (string value) +# DEPRECATED: Host to locate redis. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #host = 127.0.0.1 -# Use this port to connect to redis host. (port value) +# DEPRECATED: Use this port to connect to redis host. (port +# value) # Minimum value: 0 # Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #port = 6379 -# Password for Redis server (optional). (string value) +# DEPRECATED: Password for Redis server (optional). (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #password = -# List of Redis Sentinel hosts (fault tolerance mode) e.g. -# [host:port, host1:port ... ] (list value) +# DEPRECATED: List of Redis Sentinel hosts (fault tolerance +# mode) e.g. [host:port, host1:port ... ] (list +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #sentinel_hosts = # Redis replica set name. (string value) @@ -1476,10 +1492,10 @@ # value) #retries = 3 -# Default authentication strategy to use when connecting to -# neutron. Running neutron in noauth mode (related to but not -# affected by this setting) is insecure and should only be -# used for testing. (string value) +# Authentication strategy to use when connecting to neutron. +# Running neutron in noauth mode (related to but not affected +# by this setting) is insecure and should only be used for +# testing. (string value) # Allowed values: keystone, noauth #auth_strategy = keystone @@ -1687,7 +1703,7 @@ # How long to wait a missing client beforce abandoning to send # it its replies. This value should not be longer than # rpc_response_timeout. (integer value) -# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the @@ -1697,40 +1713,59 @@ # Allowed values: round-robin, shuffle #kombu_failover_strategy = round-robin -# The RabbitMQ broker address where a single node is used. -# (string value) +# DEPRECATED: The RabbitMQ broker address where a single node +# is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_host = localhost -# The RabbitMQ broker port where a single node is used. (port -# value) +# DEPRECATED: The RabbitMQ broker port where a single node is +# used. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/rabbit_port +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_port = 5672 -# RabbitMQ HA cluster host:port pairs. (list value) +# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list +# value) # Deprecated group/name - [DEFAULT]/rabbit_hosts +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_hosts = $rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl = false -# The RabbitMQ userid. (string value) +# DEPRECATED: The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_userid = guest -# The RabbitMQ password. (string value) +# DEPRECATED: The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_password = guest # The RabbitMQ login method. (string value) # Deprecated group/name - [DEFAULT]/rabbit_login_method #rabbit_login_method = AMQPLAIN -# The RabbitMQ virtual host. (string value) +# DEPRECATED: The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url #rabbit_virtual_host = / # How frequently to retry connecting with RabbitMQ. (integer @@ -1815,6 +1850,10 @@ # error (floating point value) #host_connection_reconnect_delay = 0.25 +# Connection factory implementation (string value) +# Allowed values: new, single, read_write +#connection_factory = single + # Maximum number of connections to keep queued. (integer # value) #pool_max_size = 30 @@ -1840,7 +1879,7 @@ # Persist notification messages. (boolean value) #notification_persistence = false -# Exchange name for for sending notifications (string value) +# Exchange name for sending notifications (string value) #default_notification_exchange = ${control_exchange}_notification # Max number of not acknowledged message which RabbitMQ can diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py new file mode 100644 index 000000000..dcd75e6e0 --- /dev/null +++ b/ironic/common/neutron.py @@ -0,0 +1,79 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutronclient.v2_0 import client as clientv20 +from oslo_config import cfg +from oslo_log import log + +from ironic.common.i18n import _ +from ironic.common import keystone + + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('my_ip', 'ironic.netconf') + +neutron_opts = [ + cfg.StrOpt('url', + default='http://$my_ip:9696', + help=_('URL for connecting to neutron.')), + cfg.IntOpt('url_timeout', + default=30, + help=_('Timeout value for connecting to neutron in seconds.')), + cfg.IntOpt('port_setup_delay', + default=0, + min=0, + help=_('Delay value to wait for Neutron agents to setup ' + 'sufficient DHCP configuration for port.')), + cfg.IntOpt('retries', + default=3, + help=_('Client retries in the case of a failed request.')), + cfg.StrOpt('auth_strategy', + default='keystone', + choices=['keystone', 'noauth'], + help=_('Authentication strategy to use when connecting to ' + 'neutron. Running neutron in noauth mode (related to ' + 'but not affected by this setting) is insecure and ' + 'should only be used for testing.')), + cfg.StrOpt('cleaning_network_uuid', + help=_('UUID of the network to create Neutron ports on, when ' + 'booting to a ramdisk for cleaning using Neutron DHCP.')) +] + +CONF.register_opts(neutron_opts, group='neutron') + + +def get_client(token=None): + params = { + 'timeout': CONF.neutron.url_timeout, + 'retries': CONF.neutron.retries, + 'insecure': CONF.keystone_authtoken.insecure, + 'ca_cert': CONF.keystone_authtoken.certfile, + } + + if CONF.neutron.auth_strategy == 'noauth': + params['endpoint_url'] = CONF.neutron.url + params['auth_strategy'] = 'noauth' + else: + params['endpoint_url'] = ( + CONF.neutron.url or + keystone.get_service_url(service_type='network')) + params['username'] = CONF.keystone_authtoken.admin_user + params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name + params['password'] = CONF.keystone_authtoken.admin_password + params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '') + if CONF.keystone.region_name: + params['region_name'] = CONF.keystone.region_name + params['token'] = token + + return clientv20.Client(**params) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 9914d1f28..8541d95c7 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -21,11 +21,11 @@ import ironic.common.hash_ring import ironic.common.image_service import ironic.common.images import ironic.common.keystone +import ironic.common.neutron import ironic.common.paths import ironic.common.service import ironic.common.swift import ironic.common.utils -import ironic.dhcp.neutron import ironic.drivers.modules.agent import ironic.drivers.modules.agent_base_vendor import ironic.drivers.modules.agent_client @@ -97,7 +97,7 @@ _opts = [ ironic.drivers.modules.irmc.common.opts)), ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.common.keystone.keystone_opts), - ('neutron', ironic.dhcp.neutron.neutron_opts), + ('neutron', ironic.common.neutron.neutron_opts), ('oneview', ironic.drivers.modules.oneview.common.opts), ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 7d55cefd9..25f833ff0 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -17,7 +17,6 @@ import time from neutronclient.common import exceptions as neutron_client_exc -from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils @@ -26,74 +25,16 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LW -from ironic.common import keystone from ironic.common import network +from ironic.common import neutron from ironic.dhcp import base from ironic.drivers.modules import ssh -from ironic.objects.port import Port - - -neutron_opts = [ - cfg.StrOpt('url', - default='http://$my_ip:9696', - help=_('URL for connecting to neutron.')), - cfg.IntOpt('url_timeout', - default=30, - help=_('Timeout value for connecting to neutron in seconds.')), - cfg.IntOpt('port_setup_delay', - default=0, - min=0, - help=_('Delay value to wait for Neutron agents to setup ' - 'sufficient DHCP configuration for port.')), - cfg.IntOpt('retries', - default=3, - help=_('Client retries in the case of a failed request.')), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=['keystone', 'noauth'], - help=_('Default authentication strategy to use when connecting ' - 'to neutron. ' - 'Running neutron in noauth mode (related to but not ' - 'affected by this setting) is insecure and should only ' - 'be used for testing.')), - cfg.StrOpt('cleaning_network_uuid', - help=_('UUID of the network to create Neutron ports on, when ' - 'booting to a ramdisk for cleaning using Neutron DHCP.')) -] +from ironic import objects CONF = cfg.CONF -CONF.import_opt('my_ip', 'ironic.netconf') -CONF.register_opts(neutron_opts, group='neutron') LOG = logging.getLogger(__name__) -def _build_client(token=None): - """Utility function to create Neutron client.""" - params = { - 'timeout': CONF.neutron.url_timeout, - 'retries': CONF.neutron.retries, - 'insecure': CONF.keystone_authtoken.insecure, - 'ca_cert': CONF.keystone_authtoken.certfile, - } - - if CONF.neutron.auth_strategy == 'noauth': - params['endpoint_url'] = CONF.neutron.url - params['auth_strategy'] = 'noauth' - else: - params['endpoint_url'] = ( - CONF.neutron.url or - keystone.get_service_url(service_type='network')) - params['username'] = CONF.keystone_authtoken.admin_user - params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name - params['password'] = CONF.keystone_authtoken.admin_password - params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '') - if CONF.keystone.region_name: - params['region_name'] = CONF.keystone.region_name - params['token'] = token - - return clientv20.Client(**params) - - class NeutronDHCPApi(base.BaseDHCP): """API for communicating to neutron 2.x API.""" @@ -122,7 +63,7 @@ class NeutronDHCPApi(base.BaseDHCP): """ port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}} try: - _build_client(token).update_port(port_id, port_req_body) + neutron.get_client(token).update_port(port_id, port_req_body) except neutron_client_exc.NeutronClientException: LOG.exception(_LE("Failed to update Neutron port %s."), port_id) raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id) @@ -137,7 +78,7 @@ class NeutronDHCPApi(base.BaseDHCP): """ port_req_body = {'port': {'mac_address': address}} try: - _build_client(token).update_port(port_id, port_req_body) + neutron.get_client(token).update_port(port_id, port_req_body) except neutron_client_exc.NeutronClientException: LOG.exception(_LE("Failed to update MAC address on Neutron " "port %s."), port_id) @@ -267,7 +208,7 @@ class NeutronDHCPApi(base.BaseDHCP): vif = p_obj.extra.get('vif_port_id') if not vif: obj_name = 'portgroup' - if isinstance(p_obj, Port): + if isinstance(p_obj, objects.Port): obj_name = 'port' LOG.warning(_LW("No VIFs found for node %(node)s when attempting " "to get IP address for %(obj_name)s: %(obj_id)."), @@ -300,7 +241,7 @@ class NeutronDHCPApi(base.BaseDHCP): if failures: obj_name = 'portgroups' - if isinstance(pobj_list[0], Port): + if isinstance(pobj_list[0], objects.Port): obj_name = 'ports' LOG.warning(_LW( @@ -319,7 +260,7 @@ class NeutronDHCPApi(base.BaseDHCP): :returns: List of IP addresses associated with task's ports/portgroups. """ - client = _build_client(task.context.auth_token) + client = neutron.get_client(task.context.auth_token) port_ip_addresses = self._get_ip_addresses(task, task.ports, client) portgroup_ip_addresses = self._get_ip_addresses( @@ -337,7 +278,7 @@ class NeutronDHCPApi(base.BaseDHCP): if not CONF.neutron.cleaning_network_uuid: raise exception.InvalidParameterValue(_('Valid cleaning network ' 'UUID not provided')) - neutron_client = _build_client(task.context.auth_token) + neutron_client = neutron.get_client(task.context.auth_token) body = { 'port': { 'network_id': CONF.neutron.cleaning_network_uuid, @@ -373,7 +314,7 @@ class NeutronDHCPApi(base.BaseDHCP): :param task: a TaskManager instance. """ - neutron_client = _build_client(task.context.auth_token) + neutron_client = neutron.get_client(task.context.auth_token) macs = [p.address for p in task.ports] params = { 'network_id': CONF.neutron.cleaning_network_uuid diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py new file mode 100644 index 000000000..c55c115a3 --- /dev/null +++ b/ironic/tests/unit/common/test_neutron.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from neutronclient.v2_0 import client +from oslo_config import cfg + +from ironic.common import neutron +from ironic.tests import base + + +class TestNeutronClient(base.TestCase): + + def setUp(self): + super(TestNeutronClient, self).setUp() + self.config(url='test-url', + url_timeout=30, + retries=2, + group='neutron') + self.config(insecure=False, + certfile='test-file', + admin_user='test-admin-user', + admin_tenant_name='test-admin-tenant', + admin_password='test-admin-password', + auth_uri='test-auth-uri', + group='keystone_authtoken') + + @mock.patch.object(client.Client, "__init__") + def test_get_neutron_client_with_token(self, mock_client_init): + token = 'test-token-123' + expected = {'timeout': 30, + 'retries': 2, + 'insecure': False, + 'ca_cert': 'test-file', + 'token': token, + 'endpoint_url': 'test-url', + 'username': 'test-admin-user', + 'tenant_name': 'test-admin-tenant', + 'password': 'test-admin-password', + 'auth_url': 'test-auth-uri'} + + mock_client_init.return_value = None + neutron.get_client(token=token) + mock_client_init.assert_called_once_with(**expected) + + @mock.patch.object(client.Client, "__init__") + def test_get_neutron_client_without_token(self, mock_client_init): + expected = {'timeout': 30, + 'retries': 2, + 'insecure': False, + 'ca_cert': 'test-file', + 'token': None, + 'endpoint_url': 'test-url', + 'username': 'test-admin-user', + 'tenant_name': 'test-admin-tenant', + 'password': 'test-admin-password', + 'auth_url': 'test-auth-uri'} + + mock_client_init.return_value = None + neutron.get_client(token=None) + mock_client_init.assert_called_once_with(**expected) + + @mock.patch.object(client.Client, "__init__") + def test_get_neutron_client_with_region(self, mock_client_init): + expected = {'timeout': 30, + 'retries': 2, + 'insecure': False, + 'ca_cert': 'test-file', + 'token': None, + 'endpoint_url': 'test-url', + 'username': 'test-admin-user', + 'tenant_name': 'test-admin-tenant', + 'password': 'test-admin-password', + 'auth_url': 'test-auth-uri', + 'region_name': 'test-region'} + + self.config(region_name='test-region', + group='keystone') + mock_client_init.return_value = None + neutron.get_client(token=None) + mock_client_init.assert_called_once_with(**expected) + + @mock.patch.object(client.Client, "__init__") + def test_get_neutron_client_noauth(self, mock_client_init): + self.config(auth_strategy='noauth', group='neutron') + expected = {'ca_cert': 'test-file', + 'insecure': False, + 'endpoint_url': 'test-url', + 'timeout': 30, + 'retries': 2, + 'auth_strategy': 'noauth'} + + mock_client_init.return_value = None + neutron.get_client(token=None) + mock_client_init.assert_called_once_with(**expected) + + def test_out_range_auth_strategy(self): + self.assertRaises(ValueError, cfg.CONF.set_override, + 'auth_strategy', 'fake', 'neutron', + enforce_type=True) diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index 2647639e7..658d33497 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -66,75 +66,6 @@ class TestNeutron(db_base.DbTestCase): dhcp_factory.DHCPFactory._dhcp_provider = None - @mock.patch.object(client.Client, "__init__") - def test__build_client_with_token(self, mock_client_init): - token = 'test-token-123' - expected = {'timeout': 30, - 'retries': 2, - 'insecure': False, - 'ca_cert': 'test-file', - 'token': token, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri'} - - mock_client_init.return_value = None - neutron._build_client(token=token) - mock_client_init.assert_called_once_with(**expected) - - @mock.patch.object(client.Client, "__init__") - def test__build_client_without_token(self, mock_client_init): - expected = {'timeout': 30, - 'retries': 2, - 'insecure': False, - 'ca_cert': 'test-file', - 'token': None, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri'} - - mock_client_init.return_value = None - neutron._build_client(token=None) - mock_client_init.assert_called_once_with(**expected) - - @mock.patch.object(client.Client, "__init__") - def test__build_client_with_region(self, mock_client_init): - expected = {'timeout': 30, - 'retries': 2, - 'insecure': False, - 'ca_cert': 'test-file', - 'token': None, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri', - 'region_name': 'test-region'} - - self.config(region_name='test-region', - group='keystone') - mock_client_init.return_value = None - neutron._build_client(token=None) - mock_client_init.assert_called_once_with(**expected) - - @mock.patch.object(client.Client, "__init__") - def test__build_client_noauth(self, mock_client_init): - self.config(auth_strategy='noauth', group='neutron') - expected = {'ca_cert': 'test-file', - 'insecure': False, - 'endpoint_url': 'test-url', - 'timeout': 30, - 'retries': 2, - 'auth_strategy': 'noauth'} - - mock_client_init.return_value = None - neutron._build_client(token=None) - mock_client_init.assert_called_once_with(**expected) - @mock.patch.object(client.Client, 'update_port') @mock.patch.object(client.Client, "__init__") def test_update_port_dhcp_opts(self, mock_client_init, mock_update_port): From b0565fa5ea71d5fa3aca4e10681483aa8469220b Mon Sep 17 00:00:00 2001 From: Mathieu Mitchell Date: Tue, 21 Jun 2016 23:46:45 -0400 Subject: [PATCH 012/166] Use PRIVATE_NETWORK_NAME for devstack plugin The devstack plugin hardcodes "private" for the default cleaning network and when looking up the network to bind onto for IPA-conductor communication. Ensure we are using the devstack PRIVATE_NETWORK_NAME variable instead of hardcoding the commonly used name. Change-Id: I6f344edc75ad1e2ac29bd59a9804ab3cad6cf2b2 Partial-bug: #1595598 --- devstack/lib/ironic | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 97f04da8c..c1441fca8 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -118,7 +118,7 @@ IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} -IRONIC_CLEAN_NET_NAME=${IRONIC_CLEAN_NET_NAME:-private} +IRONIC_CLEAN_NET_NAME=${IRONIC_CLEAN_NET_NAME:-$PRIVATE_NETWORK_NAME} IRONIC_EXTRA_PXE_PARAMS=${IRONIC_EXTRA_PXE_PARAMS:-} IRONIC_TTY_DEV=${IRONIC_TTY_DEV:-ttyS0} @@ -749,7 +749,7 @@ function create_bridge_and_vms { vbmc_port=$((vbmc_port+1)) done local ironic_net_id - ironic_net_id=$(openstack network show "private" -c id -f value) + ironic_net_id=$(openstack network show "$PRIVATE_NETWORK_NAME" -c id -f value) create_ovs_taps $ironic_net_id } From 8e95cdf8ec5f80cc8551fe124128461d68d3ac54 Mon Sep 17 00:00:00 2001 From: Ruby Loo Date: Thu, 16 Jun 2016 20:15:17 -0400 Subject: [PATCH 013/166] Replace dict.get(key) in api & conductor tests This replaces dict.get(key) with dict[key] in tests/unit/api & tests/unit/conductor, since we should be explicit about whether we expect the key to exist or not. In particular, we shouldn't be doing self.assertIsNone(dict.get(key)) since it will pass if 1. the value is None or 2. if the key isn't in the dict. For the case of 2, .assertIsNone() is replaced with .assertNotIn(). Change-Id: Ia305a221389e39d759b0881620c200c625fc98e1 Closes-Bug: #1590808 --- ironic/tests/unit/api/v1/test_nodes.py | 16 ++++++---------- ironic/tests/unit/conductor/test_manager.py | 6 +++--- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 9a100a8ce..7e21f21fe 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -536,8 +536,7 @@ class TestListNodes(test_api_base.BaseApiTest): def test_associated_nodes_insensitive(self): associated_nodes = (self - ._create_association_test_nodes() - .get('associated')) + ._create_association_test_nodes()['associated']) data = self.get_json('/nodes?associated=true') data1 = self.get_json('/nodes?associated=True') @@ -555,9 +554,8 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertTrue(response.json['error_message']) def test_unassociated_nodes_insensitive(self): - unassociated_nodes = (self - ._create_association_test_nodes() - .get('unassociated')) + unassociated_nodes = ( + self._create_association_test_nodes()['unassociated']) data = self.get_json('/nodes?associated=false') data1 = self.get_json('/nodes?associated=FALSE') @@ -568,9 +566,8 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertEqual(sorted(unassociated_nodes), sorted(uuids)) def test_unassociated_nodes_with_limit(self): - unassociated_nodes = (self - ._create_association_test_nodes() - .get('unassociated')) + unassociated_nodes = ( + self._create_association_test_nodes()['unassociated']) data = self.get_json('/nodes?associated=False&limit=2') @@ -585,8 +582,7 @@ class TestListNodes(test_api_base.BaseApiTest): def test_detail_with_association_filter(self): associated_nodes = (self - ._create_association_test_nodes() - .get('associated')) + ._create_association_test_nodes()['associated']) data = self.get_json('/nodes/detail?associated=true') self.assertIn('driver', data['nodes'][0]) self.assertEqual(len(associated_nodes), len(data['nodes'])) diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index bcd4ff528..ac287fe39 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -1491,7 +1491,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, # Node will be moved to CLEANING self.assertEqual(states.CLEANING, node.provision_state) self.assertEqual(states.MANAGEABLE, node.target_provision_state) - self.assertIsNone(node.driver_internal_info.get('clean_steps')) + self.assertNotIn('clean_steps', node.driver_internal_info) self.assertIsNone(node.last_error) @mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker') @@ -1719,8 +1719,8 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, self.assertEqual(states.AVAILABLE, node.provision_state) self.assertEqual(states.NOSTATE, node.target_provision_state) self.assertEqual({}, node.clean_step) - self.assertIsNone(node.driver_internal_info.get('clean_steps')) - self.assertIsNone(node.driver_internal_info.get('clean_step_index')) + self.assertNotIn('clean_steps', node.driver_internal_info) + self.assertNotIn('clean_step_index', node.driver_internal_info) @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning') def __do_node_clean_prepare_clean_fail(self, mock_prep, clean_steps=None): From 474251ef7363fccbe8aaf6277c7f0149cd57beac Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 24 Jun 2016 08:40:10 +0000 Subject: [PATCH 014/166] Updated from global requirements Change-Id: I628ea6406dc0ba1e4e041f5733b4fdbb79508cb2 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 2ae19801d..2972b5e9f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -25,5 +25,5 @@ sphinx!=1.3b1,<1.3,>=1.2.1 # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 sphinxcontrib-seqdiag # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=1.6.2 # Apache2 +reno>=1.8.0 # Apache2 os-api-ref>=0.1.0 # Apache-2.0 From 0c50ee3d26ab1ac66452aec63a9efe6e9cb8f2d0 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 5 Apr 2016 11:13:55 -0700 Subject: [PATCH 015/166] Centralize config options - [iboot] Nova style refactor of config options in Ironic. Change-Id: Id472e88bd550088c8b0d4f3ed7e7c04b05fa8b8a Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/iboot.py | 42 +++++++++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +-- ironic/drivers/modules/iboot.py | 23 +----------------- 4 files changed, 46 insertions(+), 24 deletions(-) create mode 100644 ironic/conf/iboot.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index ec66aacd4..b01109e74 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -21,6 +21,7 @@ from ironic.conf import conductor from ironic.conf import console from ironic.conf import database from ironic.conf import dhcp +from ironic.conf import iboot CONF = cfg.CONF @@ -30,3 +31,4 @@ conductor.register_opts(CONF) console.register_opts(CONF) database.register_opts(CONF) dhcp.register_opts(CONF) +iboot.register_opts(CONF) diff --git a/ironic/conf/iboot.py b/ironic/conf/iboot.py new file mode 100644 index 000000000..305fba395 --- /dev/null +++ b/ironic/conf/iboot.py @@ -0,0 +1,42 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('max_retry', + default=3, + help=_('Maximum retries for iBoot operations')), + cfg.IntOpt('retry_interval', + default=1, + help=_('Time (in seconds) between retry attempts for iBoot ' + 'operations')), + cfg.IntOpt('reboot_delay', + default=5, + min=0, + help=_('Time (in seconds) to sleep between when rebooting ' + '(powering off and on again).')) +] + +opt_group = cfg.OptGroup(name='iboot', + title='Options for the iBoot power driver') + + +def register_opts(conf): + conf.register_group(opt_group) + conf.register_opts(opts, group=opt_group) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 8541d95c7..c91de6fef 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -32,7 +32,6 @@ import ironic.drivers.modules.agent_client import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils -import ironic.drivers.modules.iboot import ironic.drivers.modules.ilo.common import ironic.drivers.modules.ilo.deploy import ironic.drivers.modules.ilo.management @@ -84,7 +83,7 @@ _opts = [ ('glance', itertools.chain( ironic.common.glance_service.v2.image_service.glance_opts, ironic.common.image_service.glance_opts)), - ('iboot', ironic.drivers.modules.iboot.opts), + ('iboot', ironic.conf.iboot.opts), ('ilo', itertools.chain( ironic.drivers.modules.ilo.common.opts, ironic.drivers.modules.ilo.deploy.clean_opts, diff --git a/ironic/drivers/modules/iboot.py b/ironic/drivers/modules/iboot.py index 6f7b8bcc6..5b62d5237 100644 --- a/ironic/drivers/modules/iboot.py +++ b/ironic/drivers/modules/iboot.py @@ -21,7 +21,6 @@ Ironic iBoot PDU power manager. import time -from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils @@ -32,31 +31,11 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base iboot = importutils.try_import('iboot') -opts = [ - cfg.IntOpt('max_retry', - default=3, - help=_('Maximum retries for iBoot operations')), - cfg.IntOpt('retry_interval', - default=1, - help=_('Time (in seconds) between retry attempts for iBoot ' - 'operations')), - cfg.IntOpt('reboot_delay', - default=5, - min=0, - help=_('Time (in seconds) to sleep between when rebooting ' - '(powering off and on again).')) -] - -CONF = cfg.CONF -opt_group = cfg.OptGroup(name='iboot', - title='Options for the iBoot power driver') -CONF.register_group(opt_group) -CONF.register_opts(opts, opt_group) - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = { From dd0420c787161f2043dde0f924babe9446d28cf9 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 27 Jun 2016 09:29:26 +0200 Subject: [PATCH 016/166] Drop references to RPC calls from user-visible errors Presence of the RPC is the implementation detail. We should not present users with messages like "RPC failed to ". Change-Id: Ie78c7af2988007f59849ce936f5ee8ccd1f0e60b --- ironic/conductor/manager.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 8e0a9ff05..c53dc228f 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -446,8 +446,8 @@ class ConductorManager(base_manager.BaseConductorManager): except (exception.InvalidParameterValue, exception.MissingParameterValue) as e: raise exception.InstanceDeployFailure( - _("RPC do_node_deploy failed to validate deploy or " - "power info for node %(node_uuid)s. Error: %(msg)s") % + _("Failed to validate deploy or power info for node " + "%(node_uuid)s. Error: %(msg)s") % {'node_uuid': node.uuid, 'msg': e}) LOG.debug("do_node_deploy Calling event: %(event)s for node: " @@ -644,8 +644,8 @@ class ConductorManager(base_manager.BaseConductorManager): try: task.driver.power.validate(task) except exception.InvalidParameterValue as e: - msg = (_('RPC do_node_clean failed to validate power info.' - ' Cannot clean node %(node)s. Error: %(msg)s') % + msg = (_('Failed to validate power info. ' + 'Cannot clean node %(node)s. Error: %(msg)s') % {'node': node.uuid, 'msg': e}) raise exception.InvalidParameterValue(msg) @@ -1994,8 +1994,8 @@ class ConductorManager(base_manager.BaseConductorManager): task.driver.inspect.validate(task) except (exception.InvalidParameterValue, exception.MissingParameterValue) as e: - error = (_("RPC inspect_hardware failed to validate " - "inspection or power info. Error: %(msg)s") + error = (_("Failed to validate inspection or power info. " + "Error: %(msg)s") % {'msg': e}) raise exception.HardwareInspectionFailure(error=error) From 088f09903b310fe66cb76f1c5bd62f1ac076dd53 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 17 Jun 2016 09:42:33 +0200 Subject: [PATCH 017/166] Add dbapi and objects functions to get a node by associated MAC addresses Adds a new dbapi call get_node_by_port_addresses and associated objects call Node.get_by_port_addresses. The logic is the same as in "lookup" agent passthru. Will be used for a new lookup endpoint. Change-Id: Ia5549fb16cd363f3492b9ca0400177c92a1aea19 Partial-Bug: #1570841 --- ironic/db/api.py | 9 ++++ ironic/db/sqlalchemy/api.py | 17 ++++++- ironic/objects/node.py | 16 ++++++- ironic/tests/unit/db/test_nodes.py | 54 +++++++++++++++++++++++ ironic/tests/unit/objects/test_node.py | 11 +++++ ironic/tests/unit/objects/test_objects.py | 2 +- 6 files changed, 106 insertions(+), 3 deletions(-) diff --git a/ironic/db/api.py b/ironic/db/api.py index f8ab5a686..c0fd52c59 100644 --- a/ironic/db/api.py +++ b/ironic/db/api.py @@ -604,3 +604,12 @@ class Connection(object): :param tag: A tag string. :returns: True if the tag exists otherwise False. """ + + @abc.abstractmethod + def get_node_by_port_addresses(self, addresses): + """Find a node by any matching port address. + + :param addresses: list of port addresses (e.g. MACs). + :returns: Node object. + :raises: NodeNotFound if none or several nodes are found. + """ diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index 02bf4782c..0ac46b9fb 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -28,7 +28,7 @@ from oslo_log import log from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils -from sqlalchemy.orm.exc import NoResultFound +from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound from sqlalchemy.orm import joinedload from sqlalchemy import sql @@ -843,3 +843,18 @@ class Connection(api.Connection): def node_tag_exists(self, node_id, tag): q = model_query(models.NodeTag).filter_by(node_id=node_id, tag=tag) return model_query(q.exists()).scalar() + + def get_node_by_port_addresses(self, addresses): + q = model_query(models.Node).distinct().join(models.Port) + q = q.filter(models.Port.address.in_(addresses)) + + try: + return q.one() + except NoResultFound: + raise exception.NodeNotFound( + _('Node with port addresses %s was not found') + % addresses) + except MultipleResultsFound: + raise exception.NodeNotFound( + _('Multiple nodes with port addresses %s were found') + % addresses) diff --git a/ironic/objects/node.py b/ironic/objects/node.py index 2e5049b7a..aa4b851ae 100644 --- a/ironic/objects/node.py +++ b/ironic/objects/node.py @@ -45,7 +45,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.13: Add touch_provisioning() # Version 1.14: Add _validate_property_values() and make create() # and save() validate the input of property values. - VERSION = '1.14' + # Version 1.15: Add get_by_port_addresses + VERSION = '1.15' dbapi = db_api.get_instance() @@ -364,3 +365,16 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): def touch_provisioning(self, context=None): """Touch the database record to mark the provisioning as alive.""" self.dbapi.touch_node_provisioning(self.id) + + @classmethod + def get_by_port_addresses(cls, context, addresses): + """Get a node by associated port addresses. + + :param context: Security context. + :param addresses: A list of port addresses. + :raises: NodeNotFound if the node is not found. + :returns: a :class:`Node` object. + """ + db_node = cls.dbapi.get_node_by_port_addresses(addresses) + node = Node._from_db_object(cls(context), db_node) + return node diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py index 33522a1e4..3852f7bf6 100644 --- a/ironic/tests/unit/db/test_nodes.py +++ b/ironic/tests/unit/db/test_nodes.py @@ -570,3 +570,57 @@ class DbNodeTestCase(base.DbTestCase): self.assertRaises( exception.NodeNotFound, self.dbapi.touch_node_provisioning, uuidutils.generate_uuid()) + + def test_get_node_by_port_addresses(self): + wrong_node = utils.create_test_node( + driver='driver-one', + uuid=uuidutils.generate_uuid()) + node = utils.create_test_node( + driver='driver-two', + uuid=uuidutils.generate_uuid()) + addresses = [] + for i in (1, 2, 3): + address = '52:54:00:cf:2d:4%s' % i + utils.create_test_port(uuid=uuidutils.generate_uuid(), + node_id=node.id, address=address) + if i > 1: + addresses.append(address) + utils.create_test_port(uuid=uuidutils.generate_uuid(), + node_id=wrong_node.id, + address='aa:bb:cc:dd:ee:ff') + + res = self.dbapi.get_node_by_port_addresses(addresses) + self.assertEqual(node.uuid, res.uuid) + + def test_get_node_by_port_addresses_not_found(self): + node = utils.create_test_node( + driver='driver', + uuid=uuidutils.generate_uuid()) + utils.create_test_port(uuid=uuidutils.generate_uuid(), + node_id=node.id, + address='aa:bb:cc:dd:ee:ff') + + self.assertRaisesRegexp(exception.NodeNotFound, + 'was not found', + self.dbapi.get_node_by_port_addresses, + ['11:22:33:44:55:66']) + + def test_get_node_by_port_addresses_multiple_found(self): + node1 = utils.create_test_node( + driver='driver', + uuid=uuidutils.generate_uuid()) + node2 = utils.create_test_node( + driver='driver', + uuid=uuidutils.generate_uuid()) + addresses = ['52:54:00:cf:2d:4%s' % i for i in (1, 2)] + utils.create_test_port(uuid=uuidutils.generate_uuid(), + node_id=node1.id, + address=addresses[0]) + utils.create_test_port(uuid=uuidutils.generate_uuid(), + node_id=node2.id, + address=addresses[1]) + + self.assertRaisesRegexp(exception.NodeNotFound, + 'Multiple nodes', + self.dbapi.get_node_by_port_addresses, + addresses) diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py index b7ef8073a..8d972d180 100644 --- a/ironic/tests/unit/objects/test_node.py +++ b/ironic/tests/unit/objects/test_node.py @@ -54,6 +54,17 @@ class TestNodeObject(base.DbTestCase): self.assertRaises(exception.InvalidIdentity, objects.Node.get, self.context, 'not-a-uuid') + def test_get_by_port_addresses(self): + with mock.patch.object(self.dbapi, 'get_node_by_port_addresses', + autospec=True) as mock_get_node: + mock_get_node.return_value = self.fake_node + + node = objects.Node.get_by_port_addresses(self.context, + ['aa:bb:cc:dd:ee:ff']) + + mock_get_node.assert_called_once_with(['aa:bb:cc:dd:ee:ff']) + self.assertEqual(self.context, node._context) + def test_save(self): uuid = self.fake_node['uuid'] with mock.patch.object(self.dbapi, 'get_node_by_uuid', diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index 1564466c2..ae67537bd 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -404,7 +404,7 @@ class TestObject(_LocalTest, _TestObject): # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.14-9ee8ab283b06398545880dfdedb49891', + 'Node': '1.15-9ee8ab283b06398545880dfdedb49891', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.5-a224755c3da5bc5cf1a14a11c0d00f3f', From 13d25d4788ac5009f9afe73111bdc6766ff271fb Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Fri, 10 Jun 2016 12:00:10 +0100 Subject: [PATCH 018/166] Fail early if ramdisk type is dib, and not building Currently if IRONIC_BUILD_DEPLOY_RAMDISK is False, and ramdisk type is DIB, we won't fail until the script attempts to run wget using an empty address, this results in a hard to debug error. This patch ensures that if we are not building the ramdisk but the ramdisk type is DIB that the whole script fails early with an error message describing how to correct the problem. Change-Id: Ib9f31a3356ed64631129e5880e2657b0cf2014d2 --- devstack/lib/ironic | 48 +++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 87fabd96c..6c4f62ced 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -130,9 +130,15 @@ IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE) # Whether to build the ramdisk or download a prebuilt one. IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) -# Ironic IPA ramdisk type, supported types are: coreos, tinyipa and dib. +# Ironic IPA ramdisk type, supported types are: +IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(coreos|tinyipa|dib)$" IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-tinyipa} +# Confirm we have a supported ramdisk type or fail early. +if [[ ! "$IRONIC_RAMDISK_TYPE" =~ $IRONIC_SUPPORTED_RAMDISK_TYPES_RE ]]; then + die $LINENO "Unrecognized IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected 'coreos', 'tinyipa' or 'dib'" +fi + # If present, these files are used as deploy ramdisk/kernel. # (The value must be an absolute path) IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-} @@ -142,22 +148,30 @@ IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-} IPA_DOWNLOAD_BRANCH=${IPA_DOWNLOAD_BRANCH:-master} IPA_DOWNLOAD_BRANCH=$(echo $IPA_DOWNLOAD_BRANCH | tr / -) -case $IRONIC_RAMDISK_TYPE in - coreos) - IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-https://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe-${IPA_DOWNLOAD_BRANCH}.vmlinuz} - IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-https://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem-${IPA_DOWNLOAD_BRANCH}.cpio.gz} - ;; - tinyipa) - IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${IPA_DOWNLOAD_BRANCH}.vmlinuz} - IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${IPA_DOWNLOAD_BRANCH}.gz} - ;; - dib) - echo "IRONIC_RAMDISK_TYPE setting 'dib' has no pre-built images" - ;; - *) - die $LINENO "Unrecognised IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected 'coreos', 'tinyipa' or 'dib'" - ;; -esac +# Configure URLs required to download ramdisk if we're not building it, and +# IRONIC_DEPLOY_RAMDISK/KERNEL or the RAMDISK/KERNEL_URLs have not been +# preconfigured. +if [[ "$IRONIC_BUILD_DEPLOY_RAMDISK" == "False" && \ + ! (-e "$IRONIC_DEPLOY_RAMDISK" && -e "$IRONIC_DEPLOY_KERNEL") && \ + (-z "$IRONIC_AGENT_KERNEL_URL" || -z "$IRONIC_AGENT_RAMDISK_URL") ]]; then + case $IRONIC_RAMDISK_TYPE in + coreos) + IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-https://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe-${IPA_DOWNLOAD_BRANCH}.vmlinuz} + IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-https://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem-${IPA_DOWNLOAD_BRANCH}.cpio.gz} + ;; + tinyipa) + IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${IPA_DOWNLOAD_BRANCH}.vmlinuz} + IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${IPA_DOWNLOAD_BRANCH}.gz} + ;; + dib) + die "IRONIC_RAMDISK_TYPE 'dib' has no official pre-built "\ + "images. To fix this select a different ramdisk type, set "\ + "IRONIC_BUILD_DEPLOY_RAMDISK=True, or manually configure "\ + "IRONIC_DEPLOY_RAMDISK(_URL) and IRONIC_DEPLOY_KERNEL(_URL) "\ + "to use your own pre-built ramdisk." + ;; + esac +fi # This refers the options for disk-image-create and the platform on which # to build the dib based ironic-python-agent ramdisk. From f5e6e79e45c0c99038648dee29495b4ce2c6d326 Mon Sep 17 00:00:00 2001 From: Edan David Date: Mon, 27 Jun 2016 08:55:01 -0400 Subject: [PATCH 019/166] Remove unnecessary calls to dict.keys() It is preferred not to use the keys method when not needed because the keys() method of dict objects behaves differently under Python 2 and Python 3 and in most cases using keys is inefficient. Change-Id: Iee5b2822933ec1b91e847e47c71682cadc26967f --- ironic/drivers/modules/iscsi_deploy.py | 2 +- ironic/tests/base.py | 2 +- ironic/tests/unit/api/test_root.py | 2 +- ironic/tests/unit/api/v1/test_chassis.py | 8 ++++---- ironic/tests/unit/api/v1/test_nodes.py | 8 ++++---- ironic/tests/unit/drivers/modules/amt/test_common.py | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 041813157..2bd7deca2 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -272,7 +272,7 @@ def continue_deploy(task, **kwargs): if LOG.isEnabledFor(logging.logging.DEBUG): log_params = { k: params[k] if k != 'configdrive' else '***' - for k in params.keys() + for k in params } LOG.debug('Continuing deployment for node %(node)s, params %(params)s', {'node': node.uuid, 'params': log_params}) diff --git a/ironic/tests/base.py b/ironic/tests/base.py index ad9374be4..dd5834939 100644 --- a/ironic/tests/base.py +++ b/ironic/tests/base.py @@ -130,7 +130,7 @@ class TestCase(testtools.TestCase): # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite - for key in [k for k in self.__dict__.keys() if k[0] != '_']: + for key in [k for k in self.__dict__ if k[0] != '_']: del self.__dict__[key] def config(self, **kw): diff --git a/ironic/tests/unit/api/test_root.py b/ironic/tests/unit/api/test_root.py index 2aa4e25ba..3f41242f9 100644 --- a/ironic/tests/unit/api/test_root.py +++ b/ironic/tests/unit/api/test_root.py @@ -41,7 +41,7 @@ class TestV1Root(base.BaseApiTest): data = self.get_json('/') self.assertEqual('v1', data['id']) # Check fields are not empty - for f in data.keys(): + for f in data: self.assertNotIn(f, ['', []]) # Check if all known resources are present and there are no extra ones. not_resources = ('id', 'links', 'media_types') diff --git a/ironic/tests/unit/api/v1/test_chassis.py b/ironic/tests/unit/api/v1/test_chassis.py index cd78ac9ce..b3be071cc 100644 --- a/ironic/tests/unit/api/v1/test_chassis.py +++ b/ironic/tests/unit/api/v1/test_chassis.py @@ -137,7 +137,7 @@ class TestListChassis(test_api_base.BaseApiTest): uuid = uuidutils.generate_uuid() obj_utils.create_test_chassis(self.context, uuid=uuid) data = self.get_json('/chassis/%s' % uuid) - self.assertIn('links', data.keys()) + self.assertIn('links', data) self.assertEqual(2, len(data['links'])) self.assertIn(uuid, data['links'][0]['href']) for l in data['links']: @@ -201,7 +201,7 @@ class TestListChassis(test_api_base.BaseApiTest): def test_nodes_subresource_link(self): chassis = obj_utils.create_test_chassis(self.context) data = self.get_json('/chassis/%s' % chassis.uuid) - self.assertIn('nodes', data.keys()) + self.assertIn('nodes', data) def test_nodes_subresource(self): chassis = obj_utils.create_test_chassis(self.context) @@ -213,12 +213,12 @@ class TestListChassis(test_api_base.BaseApiTest): data = self.get_json('/chassis/%s/nodes' % chassis.uuid) self.assertEqual(2, len(data['nodes'])) - self.assertNotIn('next', data.keys()) + self.assertNotIn('next', data) # Test collection pagination data = self.get_json('/chassis/%s/nodes?limit=1' % chassis.uuid) self.assertEqual(1, len(data['nodes'])) - self.assertIn('next', data.keys()) + self.assertIn('next', data) def test_nodes_subresource_no_uuid(self): response = self.get_json('/chassis/nodes', expect_errors=True) diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 7e21f21fe..940f0233b 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -336,7 +336,7 @@ class TestListNodes(test_api_base.BaseApiTest): uuid = uuidutils.generate_uuid() obj_utils.create_test_node(self.context, uuid=uuid) data = self.get_json('/nodes/%s' % uuid) - self.assertIn('links', data.keys()) + self.assertIn('links', data) self.assertEqual(2, len(data['links'])) self.assertIn(uuid, data['links'][0]['href']) for l in data['links']: @@ -406,7 +406,7 @@ class TestListNodes(test_api_base.BaseApiTest): def test_ports_subresource_link(self): node = obj_utils.create_test_node(self.context) data = self.get_json('/nodes/%s' % node.uuid) - self.assertIn('ports', data.keys()) + self.assertIn('ports', data) def test_ports_subresource(self): node = obj_utils.create_test_node(self.context) @@ -418,12 +418,12 @@ class TestListNodes(test_api_base.BaseApiTest): data = self.get_json('/nodes/%s/ports' % node.uuid) self.assertEqual(2, len(data['ports'])) - self.assertNotIn('next', data.keys()) + self.assertNotIn('next', data) # Test collection pagination data = self.get_json('/nodes/%s/ports?limit=1' % node.uuid) self.assertEqual(1, len(data['ports'])) - self.assertIn('next', data.keys()) + self.assertIn('next', data) def test_ports_subresource_noid(self): node = obj_utils.create_test_node(self.context) diff --git a/ironic/tests/unit/drivers/modules/amt/test_common.py b/ironic/tests/unit/drivers/modules/amt/test_common.py index d82a9690a..18197455d 100644 --- a/ironic/tests/unit/drivers/modules/amt/test_common.py +++ b/ironic/tests/unit/drivers/modules/amt/test_common.py @@ -112,7 +112,7 @@ class AMTCommonMethodsTestCase(db_base.DbTestCase): class AMTCommonClientTestCase(base.TestCase): def setUp(self): super(AMTCommonClientTestCase, self).setUp() - self.info = {key[4:]: INFO_DICT[key] for key in INFO_DICT.keys()} + self.info = {key[4:]: INFO_DICT[key] for key in INFO_DICT} def test_wsman_get(self, mock_client_pywsman): namespace = resource_uris.CIM_AssociatedPowerManagementService From 97f96642a9a85e3d6d638ddabb0586ec5902b614 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 22 Jun 2016 17:58:35 +0200 Subject: [PATCH 020/166] Introduce new driver call and RPC for heartbeat Change-Id: Iec31feb07b85f9ed668d354967f8d265233a2bc1 Partial-Bug: #1570841 --- ironic/conductor/manager.py | 21 ++++++++++++++++++++- ironic/conductor/rpcapi.py | 15 ++++++++++++++- ironic/drivers/base.py | 13 ++++++++++++- ironic/tests/unit/conductor/test_manager.py | 13 +++++++++++++ ironic/tests/unit/conductor/test_rpcapi.py | 7 +++++++ ironic/tests/unit/drivers/test_base.py | 12 ++++++++++++ 6 files changed, 78 insertions(+), 3 deletions(-) diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 8e0a9ff05..2e60415f0 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -81,7 +81,7 @@ class ConductorManager(base_manager.BaseConductorManager): """Ironic Conductor manager main class.""" # NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's. - RPC_API_VERSION = '1.33' + RPC_API_VERSION = '1.34' target = messaging.Target(version=RPC_API_VERSION) @@ -2093,6 +2093,25 @@ class ConductorManager(base_manager.BaseConductorManager): return driver.raid.get_logical_disk_properties() + @messaging.expected_exceptions(exception.NoFreeConductorWorker) + def heartbeat(self, context, node_id, callback_url): + """Process a heartbeat from the ramdisk. + + :param context: request context. + :param node_id: node id or uuid. + :param callback_url: URL to reach back to the ramdisk. + :raises: NoFreeConductorWorker if there are no conductors to process + this heartbeat request. + """ + LOG.debug('RPC heartbeat called for node %s', node_id) + + # NOTE(dtantsur): we acquire a shared lock to begin with, drivers are + # free to promote it to an exclusive one. + with task_manager.acquire(context, node_id, shared=True, + purpose='heartbeat') as task: + task.spawn_after(self._spawn_worker, task.driver.deploy.heartbeat, + task, callback_url) + def _object_dispatch(self, target, method, context, args, kwargs): """Dispatch a call to an object method. diff --git a/ironic/conductor/rpcapi.py b/ironic/conductor/rpcapi.py index 794636c5f..9b4b5c765 100644 --- a/ironic/conductor/rpcapi.py +++ b/ironic/conductor/rpcapi.py @@ -80,11 +80,12 @@ class ConductorAPI(object): | object_backport_versions | 1.32 - Add do_node_clean | 1.33 - Added update and destroy portgroup. + | 1.34 - Added heartbeat """ # NOTE(rloo): This must be in sync with manager.ConductorManager's. - RPC_API_VERSION = '1.33' + RPC_API_VERSION = '1.34' def __init__(self, topic=None): super(ConductorAPI, self).__init__() @@ -646,6 +647,18 @@ class ConductorAPI(object): return cctxt.call(context, 'do_node_clean', node_id=node_id, clean_steps=clean_steps) + def heartbeat(self, context, node_id, callback_url, topic=None): + """Process a node heartbeat. + + :param context: request context. + :param node_id: node ID or UUID. + :param callback_url: URL to reach back to the ramdisk. + :param topic: RPC topic. Defaults to self.topic. + """ + cctxt = self.client.prepare(topic=topic or self.topic, version='1.34') + return cctxt.call(context, 'heartbeat', node_id=node_id, + callback_url=callback_url) + def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): """Perform an action on a VersionedObject class. diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py index 7a079b318..962048c23 100644 --- a/ironic/drivers/base.py +++ b/ironic/drivers/base.py @@ -30,7 +30,7 @@ from oslo_utils import excutils import six from ironic.common import exception -from ironic.common.i18n import _, _LE +from ironic.common.i18n import _, _LE, _LW from ironic.common import raid LOG = logging.getLogger(__name__) @@ -375,6 +375,17 @@ class DeployInterface(BaseInterface): """ pass + def heartbeat(self, task, callback_url): + """Record a heartbeat for the node. + + :param task: a TaskManager instance containing the node to act on. + :param callback_url: a URL to use to call to the ramdisk. + :return: None + """ + LOG.warning(_LW('Got heartbeat message from node %(node)s, but ' + 'the driver %(driver)s does not support heartbeating'), + {'node': task.node.uuid, 'driver': task.node.driver}) + @six.add_metaclass(abc.ABCMeta) class BootInterface(object): diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index ac287fe39..b5266d982 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -5016,3 +5016,16 @@ class DoNodeAdoptionTestCase( self.assertEqual(states.MANAGEABLE, node.provision_state) self.assertEqual(states.NOSTATE, node.target_provision_state) self.assertIsNone(node.last_error) + + @mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker') + def test_heartbeat(self, mock_spawn): + """Test heartbeating.""" + node = obj_utils.create_test_node( + self.context, driver='fake', + provision_state=states.DEPLOYING, + target_provision_state=states.ACTIVE) + + self._start_service() + self.service.heartbeat(self.context, node.uuid, 'http://callback') + mock_spawn.assert_called_with(self.driver.deploy.heartbeat, + mock.ANY, 'http://callback') diff --git a/ironic/tests/unit/conductor/test_rpcapi.py b/ironic/tests/unit/conductor/test_rpcapi.py index 908d68f83..99f57f364 100644 --- a/ironic/tests/unit/conductor/test_rpcapi.py +++ b/ironic/tests/unit/conductor/test_rpcapi.py @@ -393,3 +393,10 @@ class RPCAPITestCase(base.DbTestCase): 'call', version='1.33', portgroup=self.fake_portgroup) + + def test_heartbeat(self): + self._test_rpcapi('heartbeat', + 'call', + node_id='fake-node', + callback_url='http://ramdisk.url:port', + version='1.34') diff --git a/ironic/tests/unit/drivers/test_base.py b/ironic/tests/unit/drivers/test_base.py index 3276023ca..d6b48250e 100644 --- a/ironic/tests/unit/drivers/test_base.py +++ b/ironic/tests/unit/drivers/test_base.py @@ -20,6 +20,7 @@ import mock from ironic.common import exception from ironic.common import raid from ironic.drivers import base as driver_base +from ironic.drivers.modules import fake from ironic.tests import base @@ -389,3 +390,14 @@ class RAIDInterfaceTestCase(base.TestCase): raid_interface = MyRAIDInterface() raid_interface.get_logical_disk_properties() get_properties_mock.assert_called_once_with(raid_schema) + + +class TestDeployInterface(base.TestCase): + @mock.patch.object(driver_base.LOG, 'warning', autospec=True) + def test_warning_on_heartbeat(self, mock_log): + # NOTE(dtantsur): FakeDeploy does not override heartbeat + deploy = fake.FakeDeploy() + deploy.heartbeat(mock.Mock(node=mock.Mock(uuid='uuid', + driver='driver')), + 'url') + self.assertTrue(mock_log.called) From ba30be8238701a2949d19b119a5f582a738c95bc Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 5 Apr 2016 15:10:19 -0700 Subject: [PATCH 021/166] Centralize config options - [ilo] Nova style refactor of config options in Ironic. Change-Id: I6a84ba4ba1a7721c03efaf682b94b89e935da4a4 Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 + ironic/conf/ilo.py | 81 ++++++++++++++++++++++++ ironic/conf/opts.py | 10 +-- ironic/drivers/modules/ilo/common.py | 30 +-------- ironic/drivers/modules/ilo/deploy.py | 14 +--- ironic/drivers/modules/ilo/management.py | 30 +-------- ironic/drivers/modules/ilo/power.py | 17 +---- 7 files changed, 88 insertions(+), 96 deletions(-) create mode 100644 ironic/conf/ilo.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index b01109e74..80109c9a5 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -22,6 +22,7 @@ from ironic.conf import console from ironic.conf import database from ironic.conf import dhcp from ironic.conf import iboot +from ironic.conf import ilo CONF = cfg.CONF @@ -32,3 +33,4 @@ console.register_opts(CONF) database.register_opts(CONF) dhcp.register_opts(CONF) iboot.register_opts(CONF) +ilo.register_opts(CONF) diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py new file mode 100644 index 000000000..2276e94ed --- /dev/null +++ b/ironic/conf/ilo.py @@ -0,0 +1,81 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('client_timeout', + default=60, + help=_('Timeout (in seconds) for iLO operations')), + cfg.PortOpt('client_port', + default=443, + help=_('Port to be used for iLO operations')), + cfg.StrOpt('swift_ilo_container', + default='ironic_ilo_container', + help=_('The Swift iLO container to store data.')), + cfg.IntOpt('swift_object_expiry_timeout', + default=900, + help=_('Amount of time in seconds for Swift objects to ' + 'auto-expire.')), + cfg.BoolOpt('use_web_server_for_images', + default=False, + help=_('Set this to True to use http web server to host ' + 'floppy images and generated boot ISO. This ' + 'requires http_root and http_url to be configured ' + 'in the [deploy] section of the config file. If this ' + 'is set to False, then Ironic will use Swift ' + 'to host the floppy images and generated ' + 'boot_iso.')), + cfg.IntOpt('clean_priority_erase_devices', + help=_('Priority for erase devices clean step. If unset, ' + 'it defaults to 10. If set to 0, the step will be ' + 'disabled and will not run during cleaning.')), + cfg.IntOpt('clean_priority_reset_ilo', + default=0, + help=_('Priority for reset_ilo clean step.')), + cfg.IntOpt('clean_priority_reset_bios_to_default', + default=10, + help=_('Priority for reset_bios_to_default clean step.')), + cfg.IntOpt('clean_priority_reset_secure_boot_keys_to_default', + default=20, + help=_('Priority for reset_secure_boot_keys clean step. This ' + 'step will reset the secure boot keys to manufacturing ' + 'defaults.')), + cfg.IntOpt('clean_priority_clear_secure_boot_keys', + default=0, + help=_('Priority for clear_secure_boot_keys clean step. This ' + 'step is not enabled by default. It can be enabled to ' + 'clear all secure boot keys enrolled with iLO.')), + cfg.IntOpt('clean_priority_reset_ilo_credential', + default=30, + help=_('Priority for reset_ilo_credential clean step. This ' + 'step requires "ilo_change_password" parameter to be ' + 'updated in nodes\'s driver_info with the new ' + 'password.')), + cfg.IntOpt('power_retry', + default=6, + help=_('Number of times a power operation needs to be ' + 'retried')), + cfg.IntOpt('power_wait', + default=2, + help=_('Amount of time in seconds to wait in between power ' + 'operations')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='ilo') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index c91de6fef..a3113c7ce 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -32,10 +32,6 @@ import ironic.drivers.modules.agent_client import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils -import ironic.drivers.modules.ilo.common -import ironic.drivers.modules.ilo.deploy -import ironic.drivers.modules.ilo.management -import ironic.drivers.modules.ilo.power import ironic.drivers.modules.image_cache import ironic.drivers.modules.inspector import ironic.drivers.modules.ipminative @@ -84,11 +80,7 @@ _opts = [ ironic.common.glance_service.v2.image_service.glance_opts, ironic.common.image_service.glance_opts)), ('iboot', ironic.conf.iboot.opts), - ('ilo', itertools.chain( - ironic.drivers.modules.ilo.common.opts, - ironic.drivers.modules.ilo.deploy.clean_opts, - ironic.drivers.modules.ilo.management.clean_step_opts, - ironic.drivers.modules.ilo.power.opts)), + ('ilo', ironic.conf.ilo.opts), ('inspector', ironic.drivers.modules.inspector.inspector_opts), ('ipmi', ironic.drivers.modules.ipminative.opts), ('irmc', itertools.chain( diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py index ee8a57897..f48407a70 100644 --- a/ironic/drivers/modules/ilo/common.py +++ b/ironic/drivers/modules/ilo/common.py @@ -21,7 +21,6 @@ import shutil import tempfile from ironic_lib import utils as ironic_utils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import six @@ -39,6 +38,7 @@ from ironic.common import images from ironic.common import swift from ironic.common import utils from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers.modules import deploy_utils ilo_client = importutils.try_import('proliantutils.ilo.client') @@ -48,34 +48,6 @@ STANDARD_LICENSE = 1 ESSENTIALS_LICENSE = 2 ADVANCED_LICENSE = 3 -opts = [ - cfg.IntOpt('client_timeout', - default=60, - help=_('Timeout (in seconds) for iLO operations')), - cfg.PortOpt('client_port', - default=443, - help=_('Port to be used for iLO operations')), - cfg.StrOpt('swift_ilo_container', - default='ironic_ilo_container', - help=_('The Swift iLO container to store data.')), - cfg.IntOpt('swift_object_expiry_timeout', - default=900, - help=_('Amount of time in seconds for Swift objects to ' - 'auto-expire.')), - cfg.BoolOpt('use_web_server_for_images', - default=False, - help=_('Set this to True to use http web server to host ' - 'floppy images and generated boot ISO. This ' - 'requires http_root and http_url to be configured ' - 'in the [deploy] section of the config file. If this ' - 'is set to False, then Ironic will use Swift ' - 'to host the floppy images and generated ' - 'boot_iso.')), -] - -CONF = cfg.CONF -CONF.register_opts(opts, group='ilo') - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = { diff --git a/ironic/drivers/modules/ilo/deploy.py b/ironic/drivers/modules/ilo/deploy.py index 270f0aac9..a285ed4e1 100644 --- a/ironic/drivers/modules/ilo/deploy.py +++ b/ironic/drivers/modules/ilo/deploy.py @@ -15,7 +15,6 @@ iLO Deploy Driver(s) and supporting methods. """ -from oslo_config import cfg from oslo_log import log as logging from ironic.common import boot_devices @@ -27,6 +26,7 @@ from ironic.common import image_service from ironic.common import states from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers.modules import agent from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.ilo import boot as ilo_boot @@ -36,20 +36,8 @@ from ironic.drivers.modules import pxe LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -clean_opts = [ - cfg.IntOpt('clean_priority_erase_devices', - help=_('Priority for erase devices clean step. If unset, ' - 'it defaults to 10. If set to 0, the step will be ' - 'disabled and will not run during cleaning.')) -] - CONF.import_opt('pxe_append_params', 'ironic.drivers.modules.iscsi_deploy', group='pxe') -CONF.import_opt('swift_ilo_container', 'ironic.drivers.modules.ilo.common', - group='ilo') -CONF.register_opts(clean_opts, group='ilo') def _prepare_agent_vmedia_boot(task): diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py index 8e5abdc1a..cf5362df7 100644 --- a/ironic/drivers/modules/ilo/management.py +++ b/ironic/drivers/modules/ilo/management.py @@ -15,7 +15,6 @@ iLO Management Interface """ -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils @@ -25,6 +24,7 @@ from ironic.common import boot_devices from ironic.common import exception from ironic.common.i18n import _, _LE, _LI, _LW from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules.ilo import common as ilo_common from ironic.drivers.modules.ilo import firmware_processor @@ -45,34 +45,6 @@ BOOT_DEVICE_ILO_TO_GENERIC = { MANAGEMENT_PROPERTIES = ilo_common.REQUIRED_PROPERTIES.copy() MANAGEMENT_PROPERTIES.update(ilo_common.CLEAN_PROPERTIES) -clean_step_opts = [ - cfg.IntOpt('clean_priority_reset_ilo', - default=0, - help=_('Priority for reset_ilo clean step.')), - cfg.IntOpt('clean_priority_reset_bios_to_default', - default=10, - help=_('Priority for reset_bios_to_default clean step.')), - cfg.IntOpt('clean_priority_reset_secure_boot_keys_to_default', - default=20, - help=_('Priority for reset_secure_boot_keys clean step. This ' - 'step will reset the secure boot keys to manufacturing ' - 'defaults.')), - cfg.IntOpt('clean_priority_clear_secure_boot_keys', - default=0, - help=_('Priority for clear_secure_boot_keys clean step. This ' - 'step is not enabled by default. It can be enabled to ' - 'clear all secure boot keys enrolled with iLO.')), - cfg.IntOpt('clean_priority_reset_ilo_credential', - default=30, - help=_('Priority for reset_ilo_credential clean step. This ' - 'step requires "ilo_change_password" parameter to be ' - 'updated in nodes\'s driver_info with the new ' - 'password.')), -] - -CONF = cfg.CONF -CONF.register_opts(clean_step_opts, group='ilo') - def _execute_ilo_clean_step(node, step, *args, **kwargs): """Executes a particular clean step. diff --git a/ironic/drivers/modules/ilo/power.py b/ironic/drivers/modules/ilo/power.py index 6c8d044d0..53cd4082c 100644 --- a/ironic/drivers/modules/ilo/power.py +++ b/ironic/drivers/modules/ilo/power.py @@ -16,7 +16,6 @@ iLO Power Driver """ -from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils @@ -28,26 +27,12 @@ from ironic.common.i18n import _LE from ironic.common import states from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules.ilo import common as ilo_common ilo_error = importutils.try_import('proliantutils.exception') - -opts = [ - cfg.IntOpt('power_retry', - default=6, - help=_('Number of times a power operation needs to be ' - 'retried')), - cfg.IntOpt('power_wait', - default=2, - help=_('Amount of time in seconds to wait in between power ' - 'operations')), -] - -CONF = cfg.CONF -CONF.register_opts(opts, group='ilo') - LOG = logging.getLogger(__name__) From fffd74176940003e6d815138ea2c33a655e7118b Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 5 Apr 2016 15:37:42 -0700 Subject: [PATCH 022/166] Centralize config options - [inspector] Nova style refactor of config options in Ironic. Change-Id: I34bc2e5fb54661389d62fa51dc05f72022f482a6 Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/inspector.py | 33 +++++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +-- ironic/drivers/modules/inspector.py | 17 +-------------- 4 files changed, 37 insertions(+), 18 deletions(-) create mode 100644 ironic/conf/inspector.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 80109c9a5..b2bcda686 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -23,6 +23,7 @@ from ironic.conf import database from ironic.conf import dhcp from ironic.conf import iboot from ironic.conf import ilo +from ironic.conf import inspector CONF = cfg.CONF @@ -34,3 +35,4 @@ database.register_opts(CONF) dhcp.register_opts(CONF) iboot.register_opts(CONF) ilo.register_opts(CONF) +inspector.register_opts(CONF) diff --git a/ironic/conf/inspector.py b/ironic/conf/inspector.py new file mode 100644 index 000000000..05eeb7533 --- /dev/null +++ b/ironic/conf/inspector.py @@ -0,0 +1,33 @@ +# Copyright 2016 Intel Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.BoolOpt('enabled', default=False, + help=_('whether to enable inspection using ironic-inspector')), + cfg.StrOpt('service_url', + help=_('ironic-inspector HTTP endpoint. If this is not set, ' + 'the ironic-inspector client default ' + '(http://127.0.0.1:5050) will be used.')), + cfg.IntOpt('status_check_period', default=60, + help=_('period (in seconds) to check status of nodes ' + 'on inspection')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='inspector') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index a3113c7ce..c681e134f 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -33,7 +33,6 @@ import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache -import ironic.drivers.modules.inspector import ironic.drivers.modules.ipminative import ironic.drivers.modules.irmc.boot import ironic.drivers.modules.irmc.common @@ -81,7 +80,7 @@ _opts = [ ironic.common.image_service.glance_opts)), ('iboot', ironic.conf.iboot.opts), ('ilo', ironic.conf.ilo.opts), - ('inspector', ironic.drivers.modules.inspector.inspector_opts), + ('inspector', ironic.conf.inspector.opts), ('ipmi', ironic.drivers.modules.ipminative.opts), ('irmc', itertools.chain( ironic.drivers.modules.irmc.boot.opts, diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py index 7aaa74530..907ad2196 100644 --- a/ironic/drivers/modules/inspector.py +++ b/ironic/drivers/modules/inspector.py @@ -17,7 +17,6 @@ Modules required to work with ironic_inspector: import eventlet from futurist import periodics -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils @@ -28,26 +27,12 @@ from ironic.common.i18n import _LI from ironic.common import keystone from ironic.common import states from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base LOG = logging.getLogger(__name__) - -inspector_opts = [ - cfg.BoolOpt('enabled', default=False, - help=_('whether to enable inspection using ironic-inspector')), - cfg.StrOpt('service_url', - help=_('ironic-inspector HTTP endpoint. If this is not set, ' - 'the ironic-inspector client default ' - '(http://127.0.0.1:5050) will be used.')), - cfg.IntOpt('status_check_period', default=60, - help=_('period (in seconds) to check status of nodes ' - 'on inspection')), -] - -CONF = cfg.CONF -CONF.register_opts(inspector_opts, group='inspector') CONF.import_opt('auth_strategy', 'ironic.api.app') client = importutils.try_import('ironic_inspector_client') From 700ad0567b0f93bbb3c527093156e7c4f09f9402 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 5 Apr 2016 16:12:02 -0700 Subject: [PATCH 023/166] Centralize config options - [ipmi] Nova style refactor of config options in Ironic. Change-Id: I3ca346cca5161baeaefc1f976ae5ec38c0122eab Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/ipmi.py | 42 ++++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +- ironic/drivers/modules/ipminative.py | 22 +-------------- 4 files changed, 46 insertions(+), 23 deletions(-) create mode 100644 ironic/conf/ipmi.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index b2bcda686..fa62be1d9 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -24,6 +24,7 @@ from ironic.conf import dhcp from ironic.conf import iboot from ironic.conf import ilo from ironic.conf import inspector +from ironic.conf import ipmi CONF = cfg.CONF @@ -36,3 +37,4 @@ dhcp.register_opts(CONF) iboot.register_opts(CONF) ilo.register_opts(CONF) inspector.register_opts(CONF) +ipmi.register_opts(CONF) diff --git a/ironic/conf/ipmi.py b/ironic/conf/ipmi.py new file mode 100644 index 000000000..4dad3c2d6 --- /dev/null +++ b/ironic/conf/ipmi.py @@ -0,0 +1,42 @@ +# Copyright 2016 Intel Corporation +# +# Copyright 2013 International Business Machines Corporation +# All Rights Reserved. +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('retry_timeout', + default=60, + help=_('Maximum time in seconds to retry IPMI operations. ' + 'There is a tradeoff when setting this value. Setting ' + 'this too low may cause older BMCs to crash and require ' + 'a hard reset. However, setting too high can cause the ' + 'sync power state periodic task to hang when there are ' + 'slow or unresponsive BMCs.')), + cfg.IntOpt('min_command_interval', + default=5, + help=_('Minimum time, in seconds, between IPMI operations ' + 'sent to a server. There is a risk with some hardware ' + 'that setting this too low may cause the BMC to crash. ' + 'Recommended setting is 5 seconds.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='ipmi') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index c681e134f..ab1c96129 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -33,7 +33,6 @@ import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache -import ironic.drivers.modules.ipminative import ironic.drivers.modules.irmc.boot import ironic.drivers.modules.irmc.common import ironic.drivers.modules.iscsi_deploy @@ -81,7 +80,7 @@ _opts = [ ('iboot', ironic.conf.iboot.opts), ('ilo', ironic.conf.ilo.opts), ('inspector', ironic.conf.inspector.opts), - ('ipmi', ironic.drivers.modules.ipminative.opts), + ('ipmi', ironic.conf.ipmi.opts), ('irmc', itertools.chain( ironic.drivers.modules.irmc.boot.opts, ironic.drivers.modules.irmc.common.opts)), diff --git a/ironic/drivers/modules/ipminative.py b/ironic/drivers/modules/ipminative.py index 920d97369..07a038858 100644 --- a/ironic/drivers/modules/ipminative.py +++ b/ironic/drivers/modules/ipminative.py @@ -22,7 +22,6 @@ Ironic Native IPMI power manager. import os from ironic_lib import utils as ironic_utils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils @@ -36,6 +35,7 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import console_utils from ironic.drivers import utils as driver_utils @@ -45,26 +45,6 @@ if pyghmi: from pyghmi import exceptions as pyghmi_exception from pyghmi.ipmi import command as ipmi_command -opts = [ - cfg.IntOpt('retry_timeout', - default=60, - help=_('Maximum time in seconds to retry IPMI operations. ' - 'There is a tradeoff when setting this value. Setting ' - 'this too low may cause older BMCs to crash and require ' - 'a hard reset. However, setting too high can cause the ' - 'sync power state periodic task to hang when there are ' - 'slow or unresponsive BMCs.')), - cfg.IntOpt('min_command_interval', - default=5, - help=_('Minimum time, in seconds, between IPMI operations ' - 'sent to a server. There is a risk with some hardware ' - 'that setting this too low may cause the BMC to crash. ' - 'Recommended setting is 5 seconds.')), -] - -CONF = cfg.CONF -CONF.register_opts(opts, group='ipmi') - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = {'ipmi_address': _("IP of the node's BMC. Required."), From 038d600b8e8286500dadcbaa07d2f7116433b063 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Wed, 6 Apr 2016 16:33:33 -0700 Subject: [PATCH 024/166] Centralize config options - [irmc] Nova style refactor of config options in Ironic. Change-Id: Ia5d4e49c0a8d2de47d34ef9f0b4dfa740c500efa Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 + ironic/conf/irmc.py | 73 +++++++++++++++++++++++++++ ironic/conf/opts.py | 6 +-- ironic/drivers/modules/irmc/boot.py | 29 +---------- ironic/drivers/modules/irmc/common.py | 37 +------------- 5 files changed, 78 insertions(+), 69 deletions(-) create mode 100644 ironic/conf/irmc.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index fa62be1d9..6345b2493 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -25,6 +25,7 @@ from ironic.conf import iboot from ironic.conf import ilo from ironic.conf import inspector from ironic.conf import ipmi +from ironic.conf import irmc CONF = cfg.CONF @@ -38,3 +39,4 @@ iboot.register_opts(CONF) ilo.register_opts(CONF) inspector.register_opts(CONF) ipmi.register_opts(CONF) +irmc.register_opts(CONF) diff --git a/ironic/conf/irmc.py b/ironic/conf/irmc.py new file mode 100644 index 000000000..59f8c9144 --- /dev/null +++ b/ironic/conf/irmc.py @@ -0,0 +1,73 @@ +# Copyright 2016 Intel Corporation +# Copyright 2015 FUJITSU LIMITED +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('remote_image_share_root', + default='/remote_image_share_root', + help=_('Ironic conductor node\'s "NFS" or "CIFS" root path')), + cfg.StrOpt('remote_image_server', + help=_('IP of remote image server')), + cfg.StrOpt('remote_image_share_type', + default='CIFS', + choices=['CIFS', 'NFS'], + ignore_case=True, + help=_('Share type of virtual media')), + cfg.StrOpt('remote_image_share_name', + default='share', + help=_('share name of remote_image_server')), + cfg.StrOpt('remote_image_user_name', + help=_('User name of remote_image_server')), + cfg.StrOpt('remote_image_user_password', secret=True, + help=_('Password of remote_image_user_name')), + cfg.StrOpt('remote_image_user_domain', + default='', + help=_('Domain name of remote_image_user_name')), + cfg.PortOpt('port', + default=443, + choices=[443, 80], + help=_('Port to be used for iRMC operations')), + cfg.StrOpt('auth_method', + default='basic', + choices=['basic', 'digest'], + help=_('Authentication method to be used for iRMC ' + 'operations')), + cfg.IntOpt('client_timeout', + default=60, + help=_('Timeout (in seconds) for iRMC operations')), + cfg.StrOpt('sensor_method', + default='ipmitool', + choices=['ipmitool', 'scci'], + help=_('Sensor data retrieval method.')), + cfg.StrOpt('snmp_version', + default='v2c', + choices=['v1', 'v2c', 'v3'], + help=_('SNMP protocol version')), + cfg.PortOpt('snmp_port', + default=161, + help=_('SNMP port')), + cfg.StrOpt('snmp_community', + default='public', + help=_('SNMP community. Required for versions "v1" and "v2c"')), + cfg.StrOpt('snmp_security', + help=_('SNMP security name. Required for version "v3"')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='irmc') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index ab1c96129..068064a56 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -33,8 +33,6 @@ import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache -import ironic.drivers.modules.irmc.boot -import ironic.drivers.modules.irmc.common import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.oneview.common import ironic.drivers.modules.pxe @@ -81,9 +79,7 @@ _opts = [ ('ilo', ironic.conf.ilo.opts), ('inspector', ironic.conf.inspector.opts), ('ipmi', ironic.conf.ipmi.opts), - ('irmc', itertools.chain( - ironic.drivers.modules.irmc.boot.opts, - ironic.drivers.modules.irmc.common.opts)), + ('irmc', ironic.conf.irmc.opts), ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.common.keystone.keystone_opts), ('neutron', ironic.common.neutron.neutron_opts), diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py index 0c39d7bb6..c4bc59473 100644 --- a/ironic/drivers/modules/irmc/boot.py +++ b/ironic/drivers/modules/irmc/boot.py @@ -21,7 +21,6 @@ import shutil import tempfile from ironic_lib import utils as ironic_utils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils @@ -34,6 +33,7 @@ from ironic.common.i18n import _LI from ironic.common import images from ironic.common import states from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.irmc import common as irmc_common @@ -41,39 +41,12 @@ from ironic.drivers.modules.irmc import common as irmc_common scci = importutils.try_import('scciclient.irmc.scci') -CONF = cfg.CONF - try: if CONF.debug: scci.DEBUG = True except Exception: pass -opts = [ - cfg.StrOpt('remote_image_share_root', - default='/remote_image_share_root', - help=_('Ironic conductor node\'s "NFS" or "CIFS" root path')), - cfg.StrOpt('remote_image_server', - help=_('IP of remote image server')), - cfg.StrOpt('remote_image_share_type', - default='CIFS', - choices=['CIFS', 'NFS'], - ignore_case=True, - help=_('Share type of virtual media')), - cfg.StrOpt('remote_image_share_name', - default='share', - help=_('share name of remote_image_server')), - cfg.StrOpt('remote_image_user_name', - help=_('User name of remote_image_server')), - cfg.StrOpt('remote_image_user_password', secret=True, - help=_('Password of remote_image_user_name')), - cfg.StrOpt('remote_image_user_domain', - default='', - help=_('Domain name of remote_image_user_name')), -] - -CONF.register_opts(opts, group='irmc') - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = { diff --git a/ironic/drivers/modules/irmc/common.py b/ironic/drivers/modules/irmc/common.py index 252a6a24a..ba735299c 100644 --- a/ironic/drivers/modules/irmc/common.py +++ b/ironic/drivers/modules/irmc/common.py @@ -17,49 +17,14 @@ Common functionalities shared between different iRMC modules. """ import six -from oslo_config import cfg from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ +from ironic.conf import CONF scci = importutils.try_import('scciclient.irmc.scci') -opts = [ - cfg.PortOpt('port', - default=443, - choices=[443, 80], - help=_('Port to be used for iRMC operations')), - cfg.StrOpt('auth_method', - default='basic', - choices=['basic', 'digest'], - help=_('Authentication method to be used for iRMC ' - 'operations')), - cfg.IntOpt('client_timeout', - default=60, - help=_('Timeout (in seconds) for iRMC operations')), - cfg.StrOpt('sensor_method', - default='ipmitool', - choices=['ipmitool', 'scci'], - help=_('Sensor data retrieval method.')), - cfg.StrOpt('snmp_version', - default='v2c', - choices=['v1', 'v2c', 'v3'], - help=_('SNMP protocol version')), - cfg.PortOpt('snmp_port', - default=161, - help=_('SNMP port')), - cfg.StrOpt('snmp_community', - default='public', - help=_('SNMP community. Required for versions "v1" and "v2c"')), - cfg.StrOpt('snmp_security', - help=_('SNMP security name. Required for version "v3"')), -] - -CONF = cfg.CONF -CONF.register_opts(opts, group='irmc') - - REQUIRED_PROPERTIES = { 'irmc_address': _("IP address or hostname of the iRMC. Required."), 'irmc_username': _("Username for the iRMC with administrator privileges. " From d6a9f08051531b18aa9f89afe9e9d9dcbdc94139 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Thu, 7 Apr 2016 15:00:25 -0700 Subject: [PATCH 025/166] Centralize config options - [keystone] Nova style refactor of config options in Ironic. Change-Id: Ic95322f3a0c7bc36ed7f8def0c1aba7f4e813882 Partial-Bug: #1561100 --- ironic/common/keystone.py | 11 +---------- ironic/conf/__init__.py | 2 ++ ironic/conf/keystone.py | 27 +++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +-- 4 files changed, 31 insertions(+), 12 deletions(-) create mode 100644 ironic/conf/keystone.py diff --git a/ironic/common/keystone.py b/ironic/common/keystone.py index 46a5e64db..8f62123b3 100644 --- a/ironic/common/keystone.py +++ b/ironic/common/keystone.py @@ -14,21 +14,12 @@ from keystoneclient import exceptions as ksexception from oslo_concurrency import lockutils -from oslo_config import cfg from six.moves.urllib import parse from ironic.common import exception from ironic.common.i18n import _ +from ironic.conf import CONF -CONF = cfg.CONF - -keystone_opts = [ - cfg.StrOpt('region_name', - help=_('The region used for getting endpoints of OpenStack' - ' services.')), -] - -CONF.register_opts(keystone_opts, group='keystone') CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token') _KS_CLIENT = None diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 6345b2493..926d32650 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -26,6 +26,7 @@ from ironic.conf import ilo from ironic.conf import inspector from ironic.conf import ipmi from ironic.conf import irmc +from ironic.conf import keystone CONF = cfg.CONF @@ -40,3 +41,4 @@ ilo.register_opts(CONF) inspector.register_opts(CONF) ipmi.register_opts(CONF) irmc.register_opts(CONF) +keystone.register_opts(CONF) diff --git a/ironic/conf/keystone.py b/ironic/conf/keystone.py new file mode 100644 index 000000000..981e648a7 --- /dev/null +++ b/ironic/conf/keystone.py @@ -0,0 +1,27 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('region_name', + help=_('The region used for getting endpoints of OpenStack' + ' services.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='keystone') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 068064a56..ee2276df7 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -20,7 +20,6 @@ import ironic.common.glance_service.v2.image_service import ironic.common.hash_ring import ironic.common.image_service import ironic.common.images -import ironic.common.keystone import ironic.common.neutron import ironic.common.paths import ironic.common.service @@ -81,7 +80,7 @@ _opts = [ ('ipmi', ironic.conf.ipmi.opts), ('irmc', ironic.conf.irmc.opts), ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), - ('keystone', ironic.common.keystone.keystone_opts), + ('keystone', ironic.conf.keystone.opts), ('neutron', ironic.common.neutron.neutron_opts), ('oneview', ironic.drivers.modules.oneview.common.opts), ('pxe', itertools.chain( From de43a3308435f1d28e06f4cbf1f52ef3eed02ad0 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Thu, 7 Apr 2016 16:02:34 -0700 Subject: [PATCH 026/166] Centralize config options - [oneview] Nova style refactor of config options in Ironic. Change-Id: I47c6dea0a255b335c5f8001acfdaf152d55bd994 Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/oneview.py | 41 ++++++++++++++++++++++++ ironic/conf/opts.py | 3 +- ironic/drivers/modules/oneview/common.py | 23 +------------ 4 files changed, 45 insertions(+), 24 deletions(-) create mode 100644 ironic/conf/oneview.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 926d32650..9b5fa53d7 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -27,6 +27,7 @@ from ironic.conf import inspector from ironic.conf import ipmi from ironic.conf import irmc from ironic.conf import keystone +from ironic.conf import oneview CONF = cfg.CONF @@ -42,3 +43,4 @@ inspector.register_opts(CONF) ipmi.register_opts(CONF) irmc.register_opts(CONF) keystone.register_opts(CONF) +oneview.register_opts(CONF) diff --git a/ironic/conf/oneview.py b/ironic/conf/oneview.py new file mode 100644 index 000000000..47f5bd786 --- /dev/null +++ b/ironic/conf/oneview.py @@ -0,0 +1,41 @@ +# Copyright 2016 Intel Corporation +# Copyright 2015 Hewlett Packard Development Company, LP +# Copyright 2015 Universidade Federal de Campina Grande +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('manager_url', + help=_('URL where OneView is available')), + cfg.StrOpt('username', + help=_('OneView username to be used')), + cfg.StrOpt('password', + secret=True, + help=_('OneView password to be used')), + cfg.BoolOpt('allow_insecure_connections', + default=False, + help=_('Option to allow insecure connection with OneView')), + cfg.StrOpt('tls_cacert_file', + help=_('Path to CA certificate')), + cfg.IntOpt('max_polling_attempts', + default=12, + help=_('Max connection retries to check changes on OneView')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='oneview') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index ee2276df7..691aa8d2f 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -33,7 +33,6 @@ import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy -import ironic.drivers.modules.oneview.common import ironic.drivers.modules.pxe import ironic.drivers.modules.seamicro import ironic.drivers.modules.snmp @@ -82,7 +81,7 @@ _opts = [ ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.conf.keystone.opts), ('neutron', ironic.common.neutron.neutron_opts), - ('oneview', ironic.drivers.modules.oneview.common.opts), + ('oneview', ironic.conf.oneview.opts), ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, ironic.drivers.modules.pxe.pxe_opts)), diff --git a/ironic/drivers/modules/oneview/common.py b/ironic/drivers/modules/oneview/common.py index 05af0bdd2..275c65fcd 100644 --- a/ironic/drivers/modules/oneview/common.py +++ b/ironic/drivers/modules/oneview/common.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils @@ -23,6 +22,7 @@ from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LW from ironic.common import states +from ironic.conf import CONF from ironic.drivers import utils @@ -32,27 +32,6 @@ client = importutils.try_import('oneview_client.client') oneview_states = importutils.try_import('oneview_client.states') oneview_exceptions = importutils.try_import('oneview_client.exceptions') -opts = [ - cfg.StrOpt('manager_url', - help=_('URL where OneView is available')), - cfg.StrOpt('username', - help=_('OneView username to be used')), - cfg.StrOpt('password', - secret=True, - help=_('OneView password to be used')), - cfg.BoolOpt('allow_insecure_connections', - default=False, - help=_('Option to allow insecure connection with OneView')), - cfg.StrOpt('tls_cacert_file', - help=_('Path to CA certificate')), - cfg.IntOpt('max_polling_attempts', - default=12, - help=_('Max connection retries to check changes on OneView')), -] - -CONF = cfg.CONF -CONF.register_opts(opts, group='oneview') - REQUIRED_ON_DRIVER_INFO = { 'server_hardware_uri': _("Server Hardware URI. Required in driver_info."), } From 31e80ab9bd474c7b3333efde02fd2838919fa369 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Thu, 7 Apr 2016 16:33:43 -0700 Subject: [PATCH 027/166] Centralize config options - [seamicro] Nova style refactor of config options in Ironic. Change-Id: Ia8e38f448253b58ed63edd1c6aa64da324578cda Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/opts.py | 3 +-- ironic/conf/seamicro.py | 34 ++++++++++++++++++++++++++++++ ironic/drivers/modules/seamicro.py | 17 +-------------- 4 files changed, 38 insertions(+), 18 deletions(-) create mode 100644 ironic/conf/seamicro.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 9b5fa53d7..7e5703b91 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -28,6 +28,7 @@ from ironic.conf import ipmi from ironic.conf import irmc from ironic.conf import keystone from ironic.conf import oneview +from ironic.conf import seamicro CONF = cfg.CONF @@ -44,3 +45,4 @@ ipmi.register_opts(CONF) irmc.register_opts(CONF) keystone.register_opts(CONF) oneview.register_opts(CONF) +seamicro.register_opts(CONF) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 691aa8d2f..07853b871 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -34,7 +34,6 @@ import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe -import ironic.drivers.modules.seamicro import ironic.drivers.modules.snmp import ironic.drivers.modules.ssh import ironic.drivers.modules.virtualbox @@ -85,7 +84,7 @@ _opts = [ ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, ironic.drivers.modules.pxe.pxe_opts)), - ('seamicro', ironic.drivers.modules.seamicro.opts), + ('seamicro', ironic.conf.seamicro.opts), ('snmp', ironic.drivers.modules.snmp.opts), ('ssh', ironic.drivers.modules.ssh.libvirt_opts), ('swift', ironic.common.swift.swift_opts), diff --git a/ironic/conf/seamicro.py b/ironic/conf/seamicro.py new file mode 100644 index 000000000..821112245 --- /dev/null +++ b/ironic/conf/seamicro.py @@ -0,0 +1,34 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('max_retry', + default=3, + help=_('Maximum retries for SeaMicro operations')), + cfg.IntOpt('action_timeout', + default=10, + help=_('Seconds to wait for power action to be completed')) +] + +opt_group = cfg.OptGroup(name='seamicro', + title='Options for the seamicro power driver') + + +def register_opts(conf): + conf.register_group(opt_group) + conf.register_opts(opts, group=opt_group) diff --git a/ironic/drivers/modules/seamicro.py b/ironic/drivers/modules/seamicro.py index 82e100514..02b05378e 100644 --- a/ironic/drivers/modules/seamicro.py +++ b/ironic/drivers/modules/seamicro.py @@ -21,7 +21,6 @@ Provides vendor passthru methods for SeaMicro specific functionality. import os import re -from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils @@ -36,6 +35,7 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import console_utils @@ -44,21 +44,6 @@ if seamicroclient: from seamicroclient import client as seamicro_client from seamicroclient import exceptions as seamicro_client_exception -opts = [ - cfg.IntOpt('max_retry', - default=3, - help=_('Maximum retries for SeaMicro operations')), - cfg.IntOpt('action_timeout', - default=10, - help=_('Seconds to wait for power action to be completed')) -] - -CONF = cfg.CONF -opt_group = cfg.OptGroup(name='seamicro', - title='Options for the seamicro power driver') -CONF.register_group(opt_group) -CONF.register_opts(opts, opt_group) - LOG = logging.getLogger(__name__) _BOOT_DEVICES_MAP = { From 86666e7bc0b4030fdce84d6fed1762ad164de46f Mon Sep 17 00:00:00 2001 From: PollyZ Date: Thu, 23 Jun 2016 11:46:54 -0500 Subject: [PATCH 028/166] Improve tests for driver's parse_driver_info() There are some unit tests for drivers that test internal.parse_driver_info methods, but they only check if the information is returned using assertIsNotNone instead of the actual value of the information. Update those tests to check the expected value using assertEqual. Closes-Bug #1594410 Change-Id: I67148d72e961d1b674de8dfe1b7a5078416a7a89 --- ironic/tests/unit/db/utils.py | 2 +- .../unit/drivers/modules/amt/test_common.py | 11 +++--- .../unit/drivers/modules/cimc/test_common.py | 10 +++-- .../unit/drivers/modules/drac/test_common.py | 13 +++---- .../unit/drivers/modules/ilo/test_common.py | 14 ++++--- .../unit/drivers/modules/irmc/test_common.py | 20 +++++----- .../tests/unit/drivers/modules/test_iboot.py | 10 ++--- .../unit/drivers/modules/test_ipminative.py | 11 +++--- .../unit/drivers/modules/test_seamicro.py | 11 +++--- ironic/tests/unit/drivers/modules/test_ssh.py | 39 ++++++++++--------- .../unit/drivers/modules/ucs/test_helper.py | 9 +++-- 11 files changed, 80 insertions(+), 70 deletions(-) diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index 0513def1c..5d57bf2c0 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -102,7 +102,7 @@ def get_test_ilo_info(): def get_test_drac_info(): return { "drac_host": "1.2.3.4", - "drac_port": "443", + "drac_port": 443, "drac_path": "/wsman", "drac_protocol": "https", "drac_username": "admin", diff --git a/ironic/tests/unit/drivers/modules/amt/test_common.py b/ironic/tests/unit/drivers/modules/amt/test_common.py index d82a9690a..82e24f8ee 100644 --- a/ironic/tests/unit/drivers/modules/amt/test_common.py +++ b/ironic/tests/unit/drivers/modules/amt/test_common.py @@ -47,11 +47,12 @@ class AMTCommonMethodsTestCase(db_base.DbTestCase): def test_parse_driver_info(self): info = amt_common.parse_driver_info(self.node) - self.assertIsNotNone(info['address']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['password']) - self.assertIsNotNone(info['protocol']) - self.assertIsNotNone(info['uuid']) + self.assertEqual(b'1.2.3.4', info['address']) + self.assertEqual(b'admin', info['username']) + self.assertEqual(b'fake', info['password']) + self.assertEqual(INFO_DICT['amt_protocol'], info['protocol']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + info['uuid']) def test_parse_driver_info_missing_address(self): del self.node.driver_info['amt_address'] diff --git a/ironic/tests/unit/drivers/modules/cimc/test_common.py b/ironic/tests/unit/drivers/modules/cimc/test_common.py index 6dff4153b..1fadd8354 100644 --- a/ironic/tests/unit/drivers/modules/cimc/test_common.py +++ b/ironic/tests/unit/drivers/modules/cimc/test_common.py @@ -26,6 +26,8 @@ from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as obj_utils +INFO_DICT = db_utils.get_test_cimc_info() + imcsdk = importutils.try_import('ImcSdk') CONF = cfg.CONF @@ -39,7 +41,7 @@ class CIMCBaseTestCase(db_base.DbTestCase): self.node = obj_utils.create_test_node( self.context, driver='fake_cimc', - driver_info=db_utils.get_test_cimc_info(), + driver_info=INFO_DICT, instance_uuid=uuidutils.generate_uuid()) CONF.set_override('max_retry', 2, 'cimc') CONF.set_override('action_interval', 0, 'cimc') @@ -50,9 +52,9 @@ class ParseDriverInfoTestCase(CIMCBaseTestCase): def test_parse_driver_info(self): info = cimc_common.parse_driver_info(self.node) - self.assertIsNotNone(info['cimc_address']) - self.assertIsNotNone(info['cimc_username']) - self.assertIsNotNone(info['cimc_password']) + self.assertEqual(INFO_DICT['cimc_address'], info['cimc_address']) + self.assertEqual(INFO_DICT['cimc_username'], info['cimc_username']) + self.assertEqual(INFO_DICT['cimc_password'], info['cimc_password']) def test_parse_driver_info_missing_address(self): del self.node.driver_info['cimc_address'] diff --git a/ironic/tests/unit/drivers/modules/drac/test_common.py b/ironic/tests/unit/drivers/modules/drac/test_common.py index 4bd480a56..480005fee 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_common.py +++ b/ironic/tests/unit/drivers/modules/drac/test_common.py @@ -34,13 +34,12 @@ class DracCommonMethodsTestCase(db_base.DbTestCase): driver='fake_drac', driver_info=INFO_DICT) info = drac_common.parse_driver_info(node) - - self.assertIsNotNone(info['drac_host']) - self.assertIsNotNone(info['drac_port']) - self.assertIsNotNone(info['drac_path']) - self.assertIsNotNone(info['drac_protocol']) - self.assertIsNotNone(info['drac_username']) - self.assertIsNotNone(info['drac_password']) + self.assertEqual(INFO_DICT['drac_host'], info['drac_host']) + self.assertEqual(INFO_DICT['drac_port'], info['drac_port']) + self.assertEqual(INFO_DICT['drac_path'], info['drac_path']) + self.assertEqual(INFO_DICT['drac_protocol'], info['drac_protocol']) + self.assertEqual(INFO_DICT['drac_username'], info['drac_username']) + self.assertEqual(INFO_DICT['drac_password'], info['drac_password']) def test_parse_driver_info_missing_host(self): node = obj_utils.create_test_node(self.context, diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py index e42e99f1f..5dc492973 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_common.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py @@ -40,6 +40,8 @@ from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as obj_utils +INFO_DICT = db_utils.get_test_ilo_info() + ilo_client = importutils.try_import('proliantutils.ilo.client') ilo_error = importutils.try_import('proliantutils.exception') @@ -57,16 +59,16 @@ class IloValidateParametersTestCase(db_base.DbTestCase): super(IloValidateParametersTestCase, self).setUp() self.node = obj_utils.create_test_node( self.context, driver='fake_ilo', - driver_info=db_utils.get_test_ilo_info()) + driver_info=INFO_DICT) def test_parse_driver_info(self): info = ilo_common.parse_driver_info(self.node) - self.assertIsNotNone(info['ilo_address']) - self.assertIsNotNone(info['ilo_username']) - self.assertIsNotNone(info['ilo_password']) - self.assertIsNotNone(info['client_timeout']) - self.assertIsNotNone(info['client_port']) + self.assertEqual(INFO_DICT['ilo_address'], info['ilo_address']) + self.assertEqual(INFO_DICT['ilo_username'], info['ilo_username']) + self.assertEqual(INFO_DICT['ilo_password'], info['ilo_password']) + self.assertEqual(60, info['client_timeout']) + self.assertEqual(443, info['client_port']) def test_parse_driver_info_missing_address(self): del self.node.driver_info['ilo_address'] diff --git a/ironic/tests/unit/drivers/modules/irmc/test_common.py b/ironic/tests/unit/drivers/modules/irmc/test_common.py index dcb46646f..14f5aa3da 100644 --- a/ironic/tests/unit/drivers/modules/irmc/test_common.py +++ b/ironic/tests/unit/drivers/modules/irmc/test_common.py @@ -43,16 +43,16 @@ class IRMCValidateParametersTestCase(db_base.DbTestCase): def test_parse_driver_info(self): info = irmc_common.parse_driver_info(self.node) - self.assertIsNotNone(info['irmc_address']) - self.assertIsNotNone(info['irmc_username']) - self.assertIsNotNone(info['irmc_password']) - self.assertIsNotNone(info['irmc_client_timeout']) - self.assertIsNotNone(info['irmc_port']) - self.assertIsNotNone(info['irmc_auth_method']) - self.assertIsNotNone(info['irmc_sensor_method']) - self.assertIsNotNone(info['irmc_snmp_version']) - self.assertIsNotNone(info['irmc_snmp_port']) - self.assertIsNotNone(info['irmc_snmp_community']) + self.assertEqual('1.2.3.4', info['irmc_address']) + self.assertEqual('admin0', info['irmc_username']) + self.assertEqual('fake0', info['irmc_password']) + self.assertEqual(60, info['irmc_client_timeout']) + self.assertEqual(80, info['irmc_port']) + self.assertEqual('digest', info['irmc_auth_method']) + self.assertEqual('ipmitool', info['irmc_sensor_method']) + self.assertEqual('v2c', info['irmc_snmp_version']) + self.assertEqual(161, info['irmc_snmp_port']) + self.assertEqual('public', info['irmc_snmp_community']) self.assertFalse(info['irmc_snmp_security']) def test_parse_driver_option_default(self): diff --git a/ironic/tests/unit/drivers/modules/test_iboot.py b/ironic/tests/unit/drivers/modules/test_iboot.py index e0141b51a..529dc9ba9 100644 --- a/ironic/tests/unit/drivers/modules/test_iboot.py +++ b/ironic/tests/unit/drivers/modules/test_iboot.py @@ -48,11 +48,11 @@ class IBootPrivateMethodTestCase(db_base.DbTestCase): driver='fake_iboot', driver_info=INFO_DICT) info = iboot._parse_driver_info(node) - self.assertIsNotNone(info['address']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['password']) - self.assertIsNotNone(info['port']) - self.assertIsNotNone(info['relay_id']) + self.assertEqual(INFO_DICT['iboot_address'], info['address']) + self.assertEqual(INFO_DICT['iboot_username'], info['username']) + self.assertEqual(INFO_DICT['iboot_password'], info['password']) + self.assertEqual(9100, info['port']) + self.assertEqual(1, info['relay_id']) def test__parse_driver_info_good_with_explicit_port(self): info = dict(INFO_DICT) diff --git a/ironic/tests/unit/drivers/modules/test_ipminative.py b/ironic/tests/unit/drivers/modules/test_ipminative.py index d70b48c0d..ca0e3e6b1 100644 --- a/ironic/tests/unit/drivers/modules/test_ipminative.py +++ b/ironic/tests/unit/drivers/modules/test_ipminative.py @@ -51,11 +51,12 @@ class IPMINativePrivateMethodTestCase(db_base.DbTestCase): def test__parse_driver_info(self): # make sure we get back the expected things - self.assertIsNotNone(self.info['address']) - self.assertIsNotNone(self.info['username']) - self.assertIsNotNone(self.info['password']) - self.assertIsNotNone(self.info['uuid']) - self.assertIsNotNone(self.info['force_boot_device']) + self.assertEqual('1.2.3.4', self.info['address']) + self.assertEqual('admin', self.info['username']) + self.assertEqual('fake', self.info['password']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + self.info['uuid']) + self.assertEqual(False, self.info['force_boot_device']) # make sure error is raised when info, eg. username, is missing info = dict(INFO_DICT) diff --git a/ironic/tests/unit/drivers/modules/test_seamicro.py b/ironic/tests/unit/drivers/modules/test_seamicro.py index efd645e21..707f74c29 100644 --- a/ironic/tests/unit/drivers/modules/test_seamicro.py +++ b/ironic/tests/unit/drivers/modules/test_seamicro.py @@ -87,11 +87,12 @@ class SeaMicroValidateParametersTestCase(db_base.DbTestCase): driver='fake_seamicro', driver_info=INFO_DICT) info = seamicro._parse_driver_info(node) - self.assertIsNotNone(info['api_endpoint']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['password']) - self.assertIsNotNone(info['server_id']) - self.assertIsNotNone(info['uuid']) + self.assertEqual('http://1.2.3.4', info['api_endpoint']) + self.assertEqual('admin', info['username']) + self.assertEqual('fake', info['password']) + self.assertEqual('0/0', info['server_id']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + info['uuid']) def test__parse_driver_info_missing_api_endpoint(self): # make sure error is raised when info is missing diff --git a/ironic/tests/unit/drivers/modules/test_ssh.py b/ironic/tests/unit/drivers/modules/test_ssh.py index 64337c8e5..49786942a 100644 --- a/ironic/tests/unit/drivers/modules/test_ssh.py +++ b/ironic/tests/unit/drivers/modules/test_ssh.py @@ -50,13 +50,14 @@ class SSHValidateParametersTestCase(db_base.DbTestCase): driver='fake_ssh', driver_info=db_utils.get_test_ssh_info('password')) info = ssh._parse_driver_info(node) - self.assertIsNotNone(info['host']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['password']) - self.assertIsNotNone(info['port']) - self.assertIsNotNone(info['virt_type']) + self.assertEqual('1.2.3.4', info['host']) + self.assertEqual('admin', info['username']) + self.assertEqual('fake', info['password']) + self.assertEqual(22, info['port']) + self.assertEqual('virsh', info['virt_type']) self.assertIsNotNone(info['cmd_set']) - self.assertIsNotNone(info['uuid']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + info['uuid']) def test__parse_driver_info_good_key(self): # make sure we get back the expected things @@ -65,13 +66,14 @@ class SSHValidateParametersTestCase(db_base.DbTestCase): driver='fake_ssh', driver_info=db_utils.get_test_ssh_info('key')) info = ssh._parse_driver_info(node) - self.assertIsNotNone(info['host']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['key_contents']) - self.assertIsNotNone(info['port']) - self.assertIsNotNone(info['virt_type']) + self.assertEqual('1.2.3.4', info['host']) + self.assertEqual('admin', info['username']) + self.assertEqual('--BEGIN PRIVATE ...blah', info['key_contents']) + self.assertEqual(22, info['port']) + self.assertEqual('virsh', info['virt_type']) self.assertIsNotNone(info['cmd_set']) - self.assertIsNotNone(info['uuid']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + info['uuid']) def test__parse_driver_info_good_file(self): # make sure we get back the expected things @@ -85,13 +87,14 @@ class SSHValidateParametersTestCase(db_base.DbTestCase): driver='fake_ssh', driver_info=d_info) info = ssh._parse_driver_info(node) - self.assertIsNotNone(info['host']) - self.assertIsNotNone(info['username']) - self.assertIsNotNone(info['key_filename']) - self.assertIsNotNone(info['port']) - self.assertIsNotNone(info['virt_type']) + self.assertEqual('1.2.3.4', info['host']) + self.assertEqual('admin', info['username']) + self.assertEqual(key_path, info['key_filename']) + self.assertEqual(22, info['port']) + self.assertEqual('virsh', info['virt_type']) self.assertIsNotNone(info['cmd_set']) - self.assertIsNotNone(info['uuid']) + self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123', + info['uuid']) def test__parse_driver_info_bad_file(self): # A filename that doesn't exist errors. diff --git a/ironic/tests/unit/drivers/modules/ucs/test_helper.py b/ironic/tests/unit/drivers/modules/ucs/test_helper.py index cd78fa72c..43b1de9c8 100644 --- a/ironic/tests/unit/drivers/modules/ucs/test_helper.py +++ b/ironic/tests/unit/drivers/modules/ucs/test_helper.py @@ -46,10 +46,11 @@ class UcsValidateParametersTestCase(db_base.DbTestCase): def test_parse_driver_info(self): info = ucs_helper.parse_driver_info(self.node) - self.assertIsNotNone(info['ucs_address']) - self.assertIsNotNone(info['ucs_username']) - self.assertIsNotNone(info['ucs_password']) - self.assertIsNotNone(info['ucs_service_profile']) + self.assertEqual(INFO_DICT['ucs_address'], info['ucs_address']) + self.assertEqual(INFO_DICT['ucs_username'], info['ucs_username']) + self.assertEqual(INFO_DICT['ucs_password'], info['ucs_password']) + self.assertEqual(INFO_DICT['ucs_service_profile'], + info['ucs_service_profile']) def test_parse_driver_info_missing_address(self): From 41a489d674e62a0dbb2aac76e17d6f70078ad1ce Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Mon, 27 Jun 2016 16:24:47 -0700 Subject: [PATCH 029/166] Fix releasenotes formatting error Commit b12d184b73f6decb20f733ae642a54ae49e90b88 has release notes that are badly formatted. Fix the issue. Change-Id: If82d73bea1f45843dfdfd7304a18d2dc7c9044bd --- releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml b/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml index 7d6ec791a..372d7b04a 100644 --- a/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml +++ b/releasenotes/notes/bug-1570283-6cdc62e4ef43cb02.yaml @@ -1,4 +1,4 @@ --- fixes: - -Fixes the issue of not attaching virtual media during cleaning operation - for vmedia based drivers. + - Fixes the issue of not attaching virtual media during cleaning operation + for vmedia based drivers. From e75ff03b6e826e3811dbdb3648a7bd2ac75c8b9d Mon Sep 17 00:00:00 2001 From: Yuiko Takada Mori Date: Tue, 28 Jun 2016 11:14:21 +0900 Subject: [PATCH 030/166] Follow-up patch of 9a1aeb76da2ed53e042a94ead8640af9374a10bf This patch set removes unused option. Change-Id: I2fba6fd7800cab98e783fc29f2d60d5ecfdde458 Partial-Bug: #1589370 --- devstack/tools/ironic/scripts/create-node.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/tools/ironic/scripts/create-node.sh b/devstack/tools/ironic/scripts/create-node.sh index 14f53245b..172a22ee3 100755 --- a/devstack/tools/ironic/scripts/create-node.sh +++ b/devstack/tools/ironic/scripts/create-node.sh @@ -9,7 +9,7 @@ set -ex # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -while getopts "n:c:m:d:a:b:e:p:v:f:l:" arg; do +while getopts "n:c:m:d:a:b:e:p:f:l:" arg; do case $arg in n) NAME=$OPTARG;; c) CPU=$OPTARG;; From 957e8d16b18cb07aa7cada4079cd97ec3a72ab84 Mon Sep 17 00:00:00 2001 From: Lin Tan Date: Wed, 1 Jun 2016 14:40:52 +0800 Subject: [PATCH 031/166] Throwing an exception when creating a node with tags Currently, creating node with tags is not supported. If tags values were passed over, an exception should be thrown. A follow-up patch of 19ef56cf00a78871e0d30a160b07f280488ddd0c Change-Id: I83e7db6a8da014962454e2b1248fe061985d98d8 --- ironic/db/api.py | 1 + ironic/db/sqlalchemy/api.py | 7 ++----- ironic/tests/unit/db/test_nodes.py | 9 ++++----- ironic/tests/unit/db/utils.py | 4 ++++ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/ironic/db/api.py b/ironic/db/api.py index f8ab5a686..dbbbb3494 100644 --- a/ironic/db/api.py +++ b/ironic/db/api.py @@ -143,6 +143,7 @@ class Connection(object): 'properties': { ... }, 'extra': { ... }, } + :raises: InvalidParameterValue if create a node with tags. :returns: A node. """ diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index 02bf4782c..faad75f47 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -298,11 +298,8 @@ class Connection(api.Connection): # TODO(zhenguo): Support creating node with tags if 'tags' in values: - LOG.warning( - _LW('Ignore the specified tags %(tags)s when creating node: ' - '%(node)s.'), {'tags': values['tags'], - 'node': values['uuid']}) - del values['tags'] + msg = _("Cannot create node with tags.") + raise exception.InvalidParameterValue(err=msg) node = models.Node() node.update(values) diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py index 33522a1e4..da5744a7f 100644 --- a/ironic/tests/unit/db/test_nodes.py +++ b/ironic/tests/unit/db/test_nodes.py @@ -24,7 +24,6 @@ import six from ironic.common import exception from ironic.common import states -from ironic.db.sqlalchemy import api from ironic.tests.unit.db import base from ironic.tests.unit.db import utils @@ -34,10 +33,10 @@ class DbNodeTestCase(base.DbTestCase): def test_create_node(self): utils.create_test_node() - @mock.patch.object(api.LOG, 'warning', autospec=True) - def test_create_node_with_tags(self, mock_log): - utils.create_test_node(tags=['tag1', 'tag2']) - self.assertTrue(mock_log.called) + def test_create_node_with_tags(self): + self.assertRaises(exception.InvalidParameterValue, + utils.create_test_node, + tags=['tag1', 'tag2']) def test_create_node_already_exists(self): utils.create_test_node() diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index e33696703..676158ef6 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -241,6 +241,10 @@ def create_test_node(**kw): # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del node['id'] + # Create node with tags will raise an exception. If tags are not + # specified explicitly just delete it. + if 'tags' not in kw: + del node['tags'] dbapi = db_api.get_instance() return dbapi.create_node(node) From e5a13e3dd8cf3bb7a5e74c62b8e1774f7da2d297 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 28 Jun 2016 19:58:43 +0800 Subject: [PATCH 032/166] Add missing translation marker to clear_node_target_power_state This is a follow up of d52077f4fe8c668b258702e8298a4beaa19476d8 Change-Id: Ifc397efa118f3cb7ec9146e513fd6c71f7e1924e --- ironic/db/sqlalchemy/api.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index c89d17d7a..0a5f42439 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -763,8 +763,9 @@ class Connection(api.Connection): query = query.filter(models.Node.target_power_state != sql.null()) nodes = [node['uuid'] for node in query] query.update({'target_power_state': None, - 'last_error': "Pending power operation was aborted " - "due to conductor restart"}) + 'last_error': _("Pending power operation was " + "aborted due to conductor " + "restart")}) if nodes: nodes = ', '.join(nodes) From 1e2e4136f20f18f2d8b9205949de498900d34745 Mon Sep 17 00:00:00 2001 From: pradeepcsekar Date: Tue, 28 Jun 2016 13:44:38 -0700 Subject: [PATCH 033/166] Fix typo in inspection.rst Change-Id: Ibca40613d3b5c7701484249e78c612ed24178ff9 --- doc/source/deploy/inspection.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/deploy/inspection.rst b/doc/source/deploy/inspection.rst index 04526dd5d..f517b3476 100644 --- a/doc/source/deploy/inspection.rst +++ b/doc/source/deploy/inspection.rst @@ -116,7 +116,7 @@ configuration file must be set:: keep_ports = present .. note:: - During Kilo cycle we used on older verions of Inspector called + During Kilo cycle we used an older version of Inspector called ironic-discoverd_. Inspector is expected to be a mostly drop-in replacement, and the same client library should be used to connect to both. From b186297f324231db43f77999359714c67aa45973 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 17 May 2016 13:59:24 +0300 Subject: [PATCH 034/166] Add network_interface node field to DB and object This patch adds a new field 'network_interface' to node object. Its default value is None meaning 'use the default network interface' to comply with driver composition reform. The default setting logic will be added to the driver factory in the next patch. Partial-bug: #1526403 Co-Authored-By: Vladyslav Drok Change-Id: Id9b9f4199c2640ca30b2d87c3a517c65f9cb5527 --- ...e294876e8028_add_node_network_interface.py | 31 +++++++++++++++++++ ironic/db/sqlalchemy/models.py | 2 ++ ironic/objects/node.py | 5 ++- ironic/tests/unit/api/utils.py | 3 ++ .../unit/db/sqlalchemy/test_migrations.py | 7 +++++ ironic/tests/unit/db/utils.py | 1 + ironic/tests/unit/objects/test_objects.py | 2 +- 7 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 ironic/db/sqlalchemy/alembic/versions/e294876e8028_add_node_network_interface.py diff --git a/ironic/db/sqlalchemy/alembic/versions/e294876e8028_add_node_network_interface.py b/ironic/db/sqlalchemy/alembic/versions/e294876e8028_add_node_network_interface.py new file mode 100644 index 000000000..8bc6aeedb --- /dev/null +++ b/ironic/db/sqlalchemy/alembic/versions/e294876e8028_add_node_network_interface.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add-node-network-interface + +Revision ID: e294876e8028 +Revises: f6fdb920c182 +Create Date: 2016-03-02 14:30:54.402864 + +""" + +# revision identifiers, used by Alembic. +revision = 'e294876e8028' +down_revision = 'f6fdb920c182' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('nodes', sa.Column('network_interface', sa.String(255), + nullable=True)) diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py index 5fbe4655f..dddf23bea 100644 --- a/ironic/db/sqlalchemy/models.py +++ b/ironic/db/sqlalchemy/models.py @@ -143,6 +143,8 @@ class Node(Base): inspection_started_at = Column(DateTime, nullable=True) extra = Column(db_types.JsonEncodedDict) + network_interface = Column(String(255), nullable=True) + class Port(Base): """Represents a network port of a bare metal node.""" diff --git a/ironic/objects/node.py b/ironic/objects/node.py index aa4b851ae..6077dd2fe 100644 --- a/ironic/objects/node.py +++ b/ironic/objects/node.py @@ -46,7 +46,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.14: Add _validate_property_values() and make create() # and save() validate the input of property values. # Version 1.15: Add get_by_port_addresses - VERSION = '1.15' + # Version 1.16: Add network_interface field + VERSION = '1.16' dbapi = db_api.get_instance() @@ -102,6 +103,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): 'inspection_started_at': object_fields.DateTimeField(nullable=True), 'extra': object_fields.FlexibleDictField(nullable=True), + + 'network_interface': object_fields.StringField(nullable=True), } def _validate_property_values(self, properties): diff --git a/ironic/tests/unit/api/utils.py b/ironic/tests/unit/api/utils.py index 41978e256..024fd07e7 100644 --- a/ironic/tests/unit/api/utils.py +++ b/ironic/tests/unit/api/utils.py @@ -94,6 +94,9 @@ def node_post_data(**kw): node.pop('conductor_affinity') node.pop('chassis_id') node.pop('tags') + # TODO(vdrok): Remove popping network_interface when it's exposed in API + if 'network_interface' not in kw: + node.pop('network_interface') internal = node_controller.NodePatchType.internal_attrs() return remove_internal(node, internal) diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py index 9d89b232e..fbaec663b 100644 --- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py +++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py @@ -414,6 +414,13 @@ class MigrationCheckersMixin(object): if _was_inserted(row['uuid']): self.assertTrue(row['pxe_enabled']) + def _check_e294876e8028(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes.c] + self.assertIn('network_interface', col_names) + self.assertIsInstance(nodes.c.network_interface.type, + sqlalchemy.types.String) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index 5d57bf2c0..358b05d22 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -226,6 +226,7 @@ def get_test_node(**kw): 'raid_config': kw.get('raid_config'), 'target_raid_config': kw.get('target_raid_config'), 'tags': kw.get('tags', []), + 'network_interface': kw.get('network_interface'), } diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index ae67537bd..5affc4b60 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -404,7 +404,7 @@ class TestObject(_LocalTest, _TestObject): # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.15-9ee8ab283b06398545880dfdedb49891', + 'Node': '1.16-2a6646627cb937f083f428f5d54e6458', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.5-a224755c3da5bc5cf1a14a11c0d00f3f', From b203d984bd3b4a591163f252ba156a51ea56789c Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 14 Jun 2016 16:51:59 -0700 Subject: [PATCH 035/166] Add Ironic specs process to the code contribution guide Currently, Ironic Specs Process is available only as a wiki page. This patch adds it to the main documentation, specifically to the code contribution guide at: http://docs.openstack.org/developer/ironic/dev/code-contribution-guide.html Change-Id: I36f6950cf99f8f334a3883a0d8cc2eafe5a56e20 Closes-Bug: #1590759 --- doc/source/dev/code-contribution-guide.rst | 54 ++++++++++++++++++++-- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/code-contribution-guide.rst b/doc/source/dev/code-contribution-guide.rst index 581deb0be..43017916e 100644 --- a/doc/source/dev/code-contribution-guide.rst +++ b/doc/source/dev/code-contribution-guide.rst @@ -8,7 +8,7 @@ This document provides some necessary points for developers to consider when writing and reviewing Ironic code. The checklist will help developers get things right. -Adding new features +Adding New Features =================== Starting with the Mitaka development cycle, Ironic tracks new features using @@ -49,9 +49,7 @@ Ironic: #. The ironic-drivers team will evaluate the RFE and may advise the submitter to file a spec in ironic-specs to elaborate on the feature request, in case the RFE requires extra scrutiny, more design discussion, etc. For the spec - submission process, please see the - `specs process `_ - wiki page. + submission process, please see the `Ironic Specs Process`_. #. If a spec is not required, once the discussion has happened and there is positive consensus among the ironic-drivers team on the RFE, the RFE is @@ -163,3 +161,51 @@ Agent driver attributes: variable names with a specific interface name(e.g., iboot_bar, amt_xyz), so as to minimize or avoid any conflicts between interfaces. + +Ironic Specs Process +==================== + +Specifications must follow the template which can be found at +`specs/template.rst `_, which is quite self-documenting. Specifications are +proposed by adding them to the `specs/approved` directory, adding a soft link +to it from the `specs/not-implemented` directory, and posting it for +review to Gerrit. For more information, please see the `README `_. + +The same `Gerrit process +`_ as with source code, +using the repository `ironic-specs `_, is used to add new specifications. + +All approved specifications are available at: +http://specs.openstack.org/openstack/ironic-specs. If a specification has +been approved but not completed within one or more releases since the +approval, it may be re-reviewed to make sure it still makes sense as written. + +Ironic specifications are part of the `RFE (Requests for Feature Enhancements) +process <#adding-new-features>`_. +You are welcome to submit patches associated with an RFE, but they will have +a -2 ("do not merge") until the specification has been approved. This is to +ensure that the patches don't get accidentally merged beforehand. You will +still be able to get reviewer feedback and push new patch sets, even with a -2. +The `list of core reviewers `_ for the specifications is small but mighty. (This is not +necessarily the same list of core reviewers for code patches.) + +Changes to existing specs +------------------------- + +For approved but not-completed specs: + - cosmetic cleanup, fixing errors, and changing the definition of a feature + can be done to the spec. + +For approved and completed specs: + - changing a previously approved and completed spec should only be done + for cosmetic cleanup or fixing errors. + - changing the definition of the feature should be done in a new spec. + + +Please see the `Ironic specs process wiki page `_ for further reference. + From c3d8918f975f5d24284350e5d663bb9c2810708f Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 8 Apr 2016 15:27:59 -0700 Subject: [PATCH 036/166] Centralize config options - [snmp] Nova style refactor of config options in Ironic. Change-Id: I2a803ced4080c5f3358b9ac939d4864151cca5b6 Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/opts.py | 3 +-- ironic/conf/snmp.py | 41 ++++++++++++++++++++++++++++++++++ ironic/drivers/modules/snmp.py | 18 +-------------- 4 files changed, 45 insertions(+), 19 deletions(-) create mode 100644 ironic/conf/snmp.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 7e5703b91..8f95572d5 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -29,6 +29,7 @@ from ironic.conf import irmc from ironic.conf import keystone from ironic.conf import oneview from ironic.conf import seamicro +from ironic.conf import snmp CONF = cfg.CONF @@ -46,3 +47,4 @@ irmc.register_opts(CONF) keystone.register_opts(CONF) oneview.register_opts(CONF) seamicro.register_opts(CONF) +snmp.register_opts(CONF) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 07853b871..8d08968c6 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -34,7 +34,6 @@ import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe -import ironic.drivers.modules.snmp import ironic.drivers.modules.ssh import ironic.drivers.modules.virtualbox import ironic.netconf @@ -85,7 +84,7 @@ _opts = [ ironic.drivers.modules.iscsi_deploy.pxe_opts, ironic.drivers.modules.pxe.pxe_opts)), ('seamicro', ironic.conf.seamicro.opts), - ('snmp', ironic.drivers.modules.snmp.opts), + ('snmp', ironic.conf.snmp.opts), ('ssh', ironic.drivers.modules.ssh.libvirt_opts), ('swift', ironic.common.swift.swift_opts), ('virtualbox', ironic.drivers.modules.virtualbox.opts), diff --git a/ironic/conf/snmp.py b/ironic/conf/snmp.py new file mode 100644 index 000000000..acaae9ad0 --- /dev/null +++ b/ironic/conf/snmp.py @@ -0,0 +1,41 @@ +# Copyright 2016 Intel Corporation +# Copyright 2013,2014 Cray Inc +# +# Authors: David Hewson +# Stig Telfer +# Mark Goddard +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('power_timeout', + default=10, + help=_('Seconds to wait for power action to be completed')), + # NOTE(yuriyz): some of SNMP-enabled hardware have own options for pause + # between off and on. This option guarantees minimal value. + cfg.IntOpt('reboot_delay', + default=0, + min=0, + help=_('Time (in seconds) to sleep between when rebooting ' + '(powering off and on again)')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='snmp') diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py index 4787b5636..af5d7b72b 100644 --- a/ironic/drivers/modules/snmp.py +++ b/ironic/drivers/modules/snmp.py @@ -30,7 +30,6 @@ models. import abc import time -from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils @@ -42,6 +41,7 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base pysnmp = importutils.try_import('pysnmp') @@ -54,24 +54,8 @@ else: snmp_error = None rfc1902 = None -opts = [ - cfg.IntOpt('power_timeout', - default=10, - help=_('Seconds to wait for power action to be completed')), - # NOTE(yuriyz): some of SNMP-enabled hardware have own options for pause - # between off and on. This option guarantees minimal value. - cfg.IntOpt('reboot_delay', - default=0, - min=0, - help=_('Time (in seconds) to sleep between when rebooting ' - '(powering off and on again)')) -] - LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.register_opts(opts, group='snmp') - SNMP_V1 = '1' SNMP_V2C = '2c' From b05491433fbd9d6bfde9817ce7d3b60f773f4d87 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 8 Apr 2016 15:39:54 -0700 Subject: [PATCH 037/166] Centralize config options - [ssh] Nova style refactor of config options in Ironic. Change-Id: I1b3e1f6d05d9fbea125933f6a410e4907fb0fadb Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/opts.py | 3 +-- ironic/conf/ssh.py | 38 +++++++++++++++++++++++++++++++++++ ironic/drivers/modules/ssh.py | 20 +----------------- 4 files changed, 42 insertions(+), 21 deletions(-) create mode 100644 ironic/conf/ssh.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 8f95572d5..929b9ec17 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -30,6 +30,7 @@ from ironic.conf import keystone from ironic.conf import oneview from ironic.conf import seamicro from ironic.conf import snmp +from ironic.conf import ssh CONF = cfg.CONF @@ -48,3 +49,4 @@ keystone.register_opts(CONF) oneview.register_opts(CONF) seamicro.register_opts(CONF) snmp.register_opts(CONF) +ssh.register_opts(CONF) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 8d08968c6..63c65202e 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -34,7 +34,6 @@ import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe -import ironic.drivers.modules.ssh import ironic.drivers.modules.virtualbox import ironic.netconf @@ -85,7 +84,7 @@ _opts = [ ironic.drivers.modules.pxe.pxe_opts)), ('seamicro', ironic.conf.seamicro.opts), ('snmp', ironic.conf.snmp.opts), - ('ssh', ironic.drivers.modules.ssh.libvirt_opts), + ('ssh', ironic.conf.ssh.opts), ('swift', ironic.common.swift.swift_opts), ('virtualbox', ironic.drivers.modules.virtualbox.opts), ] diff --git a/ironic/conf/ssh.py b/ironic/conf/ssh.py new file mode 100644 index 000000000..4ae4aabc1 --- /dev/null +++ b/ironic/conf/ssh.py @@ -0,0 +1,38 @@ +# Copyright 2016 Intel Corporation +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('libvirt_uri', + default='qemu:///system', + help=_('libvirt URI.')), + cfg.IntOpt('get_vm_name_attempts', + default=3, + help=_("Number of attempts to try to get VM name used by the " + "host that corresponds to a node's MAC address.")), + cfg.IntOpt('get_vm_name_retry_interval', + default=3, + help=_("Number of seconds to wait between attempts to get " + "VM name used by the host that corresponds to a " + "node's MAC address.")), +] + + +def register_opts(conf): + conf.register_opts(opts, group='ssh') diff --git a/ironic/drivers/modules/ssh.py b/ironic/drivers/modules/ssh.py index 38da701ed..9002c7acb 100644 --- a/ironic/drivers/modules/ssh.py +++ b/ironic/drivers/modules/ssh.py @@ -31,7 +31,6 @@ Currently supported environments are: import os from oslo_concurrency import processutils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils @@ -46,28 +45,11 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import console_utils from ironic.drivers import utils as driver_utils -libvirt_opts = [ - cfg.StrOpt('libvirt_uri', - default='qemu:///system', - help=_('libvirt URI.')), - cfg.IntOpt('get_vm_name_attempts', - default=3, - help=_("Number of attempts to try to get VM name used by the " - "host that corresponds to a node's MAC address.")), - cfg.IntOpt('get_vm_name_retry_interval', - default=3, - help=_("Number of seconds to wait between attempts to get " - "VM name used by the host that corresponds to a " - "node's MAC address.")), -] - -CONF = cfg.CONF -CONF.register_opts(libvirt_opts, group='ssh') - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = { From 4938c8ec63c155ff91c414966d522703c233f2bb Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 8 Apr 2016 15:53:07 -0700 Subject: [PATCH 038/166] Centralize config options - [swift] Nova style refactor of config options in Ironic. Change-Id: If2539aa019e9f2e03f74c9e32da50b9fad6f611d Partial-Bug: #1561100 --- ironic/common/swift.py | 13 +------------ ironic/conf/__init__.py | 2 ++ ironic/conf/opts.py | 3 +-- ironic/conf/swift.py | 30 ++++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 14 deletions(-) create mode 100644 ironic/conf/swift.py diff --git a/ironic/common/swift.py b/ironic/common/swift.py index b5c66c662..5362571a9 100644 --- a/ironic/common/swift.py +++ b/ironic/common/swift.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg from six.moves import http_client from six.moves.urllib import parse from swiftclient import client as swift_client @@ -24,17 +23,7 @@ from swiftclient import utils as swift_utils from ironic.common import exception from ironic.common.i18n import _ from ironic.common import keystone - -swift_opts = [ - cfg.IntOpt('swift_max_retries', - default=2, - help=_('Maximum number of times to retry a Swift request, ' - 'before failing.')) -] - - -CONF = cfg.CONF -CONF.register_opts(swift_opts, group='swift') +from ironic.conf import CONF CONF.import_opt('admin_user', 'keystonemiddleware.auth_token', group='keystone_authtoken') diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 929b9ec17..c2192ecfd 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -31,6 +31,7 @@ from ironic.conf import oneview from ironic.conf import seamicro from ironic.conf import snmp from ironic.conf import ssh +from ironic.conf import swift CONF = cfg.CONF @@ -50,3 +51,4 @@ oneview.register_opts(CONF) seamicro.register_opts(CONF) snmp.register_opts(CONF) ssh.register_opts(CONF) +swift.register_opts(CONF) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 63c65202e..23c715691 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -23,7 +23,6 @@ import ironic.common.images import ironic.common.neutron import ironic.common.paths import ironic.common.service -import ironic.common.swift import ironic.common.utils import ironic.drivers.modules.agent import ironic.drivers.modules.agent_base_vendor @@ -85,7 +84,7 @@ _opts = [ ('seamicro', ironic.conf.seamicro.opts), ('snmp', ironic.conf.snmp.opts), ('ssh', ironic.conf.ssh.opts), - ('swift', ironic.common.swift.swift_opts), + ('swift', ironic.conf.swift.opts), ('virtualbox', ironic.drivers.modules.virtualbox.opts), ] diff --git a/ironic/conf/swift.py b/ironic/conf/swift.py new file mode 100644 index 000000000..66ba9daf3 --- /dev/null +++ b/ironic/conf/swift.py @@ -0,0 +1,30 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('swift_max_retries', + default=2, + help=_('Maximum number of times to retry a Swift request, ' + 'before failing.')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='swift') From 28f74a8c9c0f954851ce71bcde9605850c17dbba Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 8 Apr 2016 16:05:09 -0700 Subject: [PATCH 039/166] Centralize config options - [virtualbox] Nova style refactor of config options in Ironic. Change-Id: I9c7b1ea0c4c35eaacc4655d605993297242f0c0a Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 ++ ironic/conf/opts.py | 3 +-- ironic/conf/virtualbox.py | 27 +++++++++++++++++++++++++++ ironic/drivers/modules/virtualbox.py | 10 +--------- 4 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 ironic/conf/virtualbox.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index c2192ecfd..a8a57d11e 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -32,6 +32,7 @@ from ironic.conf import seamicro from ironic.conf import snmp from ironic.conf import ssh from ironic.conf import swift +from ironic.conf import virtualbox CONF = cfg.CONF @@ -52,3 +53,4 @@ seamicro.register_opts(CONF) snmp.register_opts(CONF) ssh.register_opts(CONF) swift.register_opts(CONF) +virtualbox.register_opts(CONF) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 23c715691..10755b30a 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -33,7 +33,6 @@ import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe -import ironic.drivers.modules.virtualbox import ironic.netconf _default_opt_lists = [ @@ -85,7 +84,7 @@ _opts = [ ('snmp', ironic.conf.snmp.opts), ('ssh', ironic.conf.ssh.opts), ('swift', ironic.conf.swift.opts), - ('virtualbox', ironic.drivers.modules.virtualbox.opts), + ('virtualbox', ironic.conf.virtualbox.opts), ] diff --git a/ironic/conf/virtualbox.py b/ironic/conf/virtualbox.py new file mode 100644 index 000000000..24a619338 --- /dev/null +++ b/ironic/conf/virtualbox.py @@ -0,0 +1,27 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.PortOpt('port', + default=18083, + help=_('Port on which VirtualBox web service is listening.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='virtualbox') diff --git a/ironic/drivers/modules/virtualbox.py b/ironic/drivers/modules/virtualbox.py index b58319d3c..e8095232e 100644 --- a/ironic/drivers/modules/virtualbox.py +++ b/ironic/drivers/modules/virtualbox.py @@ -14,7 +14,6 @@ VirtualBox Driver Modules """ -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils @@ -26,6 +25,7 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager +from ironic.conf import CONF from ironic.drivers import base pyremotevbox = importutils.try_import('pyremotevbox') @@ -47,14 +47,6 @@ VIRTUALBOX_TO_IRONIC_POWER_MAPPING = { 'Error': states.ERROR } -opts = [ - cfg.PortOpt('port', - default=18083, - help=_('Port on which VirtualBox web service is listening.')), -] -CONF = cfg.CONF -CONF.register_opts(opts, group='virtualbox') - LOG = logging.getLogger(__name__) REQUIRED_PROPERTIES = { From 648f00a115ebc8bbb0f74484878580fffb41f231 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 1 Jul 2016 04:18:45 +0000 Subject: [PATCH 040/166] Updated from global requirements Change-Id: Ic4ce5d8e0d16fe5b36b090f93c894564964ec129 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7eeadb951..dd6e6adff 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=1.9.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.11.0 # Apache-2.0 +oslo.utils>=3.14.0 # Apache-2.0 pecan>=1.0.0 # BSD requests>=2.10.0 # Apache-2.0 six>=1.9.0 # MIT From 58fbfabedaa18418415957b9f754c204dcceb3f1 Mon Sep 17 00:00:00 2001 From: vsaienko Date: Wed, 16 Mar 2016 15:16:28 +0200 Subject: [PATCH 041/166] Allow to use network interfaces in devstack This patch allow to set enabled_network_interfaces configuration option via IRONIC_ENABLED_NETWORK_INTERFACES and registered ironic node with IRONIC_NETWORK_INTERFACE. Change-Id: Ie01ca4be3a085bc283879b5528c5c174a35a843b Depends-On: I0c26582b6b6e9d32650ff3e2b9a3269c3c2d5454 Partial-bug: #1526403 --- devstack/lib/ironic | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 080672b7b..be36df84e 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -212,6 +212,12 @@ IRONIC_TERMINAL_CERT_DIR=${IRONIC_TERMINAL_CERT_DIR:-$IRONIC_DATA_DIR/terminal_c # IRONIC_{VM,HW}_NODES_FILE IRONIC_USE_LINK_LOCAL=$(trueorfalse False IRONIC_USE_LINK_LOCAL) +# This flag is used to specify enabled network drivers +IRONIC_ENABLED_NETWORK_INTERFACES=${IRONIC_ENABLED_NETWORK_INTERFACES:-} + +# This is the network interface to use for a node +IRONIC_NETWORK_INTERFACE=${IRONIC_NETWORK_INTERFACE:-} + # get_pxe_boot_file() - Get the PXE/iPXE boot file path function get_pxe_boot_file { local relpath=syslinux/pxelinux.0 @@ -549,6 +555,10 @@ function configure_ironic_conductor { if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then iniset $IRONIC_CONF_FILE neutron port_setup_delay 15 fi + + if [[ -n "$IRONIC_ENABLED_NETWORK_INTERFACES" ]]; then + iniset $IRONIC_CONF_FILE DEFAULT enabled_network_interfaces $IRONIC_ENABLED_NETWORK_INTERFACES + fi } # create_ironic_cache_dir() - Part of the init_ironic() process @@ -911,6 +921,17 @@ function enroll_nodes { # Othervise API will return 406 ERROR ironic $ironic_api_version port-create --address $mac_address --node $node_id $llc_opts + # NOTE(vsaienko) use node-update instead of specifying network_interface + # during node creation. If node is added with latest version of API it + # will NOT go to available state automatically. + if [[ -n "${IRONIC_NETWORK_INTERFACE}" ]]; then + local n_id + ironic node-set-maintenance $node_id true + n_id=$(ironic $ironic_api_version node-update $node_id add network_interface=$IRONIC_NETWORK_INTERFACE) + die_if_not_set $LINENO n_id "Failed to update network interface for node" + ironic node-set-maintenance $node_id false + fi + total_nodes=$((total_nodes+1)) total_cpus=$((total_cpus+$ironic_node_cpu)) done < $ironic_hwinfo_file From f30f48e74321c179b0b26e9af9db57381b2bbf39 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 5 Jul 2016 09:52:29 +0300 Subject: [PATCH 042/166] Drop IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA from documentation Support of old ramdisk has been deprecated at [0]. This patch removes IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA variable from documentation. Reference: [0] https://review.openstack.org/327083/ Change-Id: I37448131b71d8e1580a765096e9d3698bd4b1c6c --- doc/source/dev/dev-quickstart.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index cb0352fa8..df4e78f9d 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -429,7 +429,6 @@ and uses the ``pxe_ssh`` driver by default:: IRONIC_VM_SSH_PORT=22 IRONIC_BAREMETAL_BASIC_OPS=True DEFAULT_INSTANCE_TYPE=baremetal - IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True # Enable Ironic drivers. IRONIC_ENABLED_DRIVERS=fake,agent_ssh,agent_ipmitool,pxe_ssh,pxe_ipmitool From 1bdf197366882f5a37e1f6e6c9465f5792111199 Mon Sep 17 00:00:00 2001 From: Lin Tan Date: Wed, 1 Jun 2016 14:12:51 +0800 Subject: [PATCH 043/166] Improve the readability of configuration drive doc part This corrects the presentation of section ``Enabling the configuration drive (configdrive)`` to improve the readability. Change-Id: Iff480d8a45da76f4ec96835b02ebd5cb61774b80 Co-Authored-By: Devananda van der Veen --- doc/source/deploy/install-guide.rst | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index 4c62a3983..6ded72df1 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -2191,10 +2191,11 @@ Enabling the configuration drive (configdrive) Starting with the Kilo release, the Bare Metal service supports exposing a configuration drive image to the instances. -Configuration drive can store metadata and attaches to the instance when it -boots. One use case for using the configuration drive is to expose a -networking configuration when you do not use DHCP to assign IP addresses to -instances. +The configuration drive is used to store instance-specific metadata and is present to +the instance as a disk partition labeled ``config-2``. The configuration drive has +a maximum size of 64MB. One use case for using the configuration drive is to +expose a networking configuration when you do not use DHCP to assign IP +addresses to instances. The configuration drive is usually used in conjunction with the Compute service, but the Bare Metal service also offers a standalone way of using it. @@ -2204,14 +2205,10 @@ The following sections will describe both methods. When used with Compute service ------------------------------ -To enable the configuration drive and passes user customized script when deploying an -instance, pass ``--config-drive true`` parameter and ``--user-data`` to the -``nova boot`` command, for example:: +To enable the configuration drive for a specific request, pass +``--config-drive true`` parameter to the ``nova boot`` command, for example:: - nova boot --config-drive true --flavor baremetal --image test-image --user-data ./my-script instance-1 - -Then ``my-script`` is accessible from the configuration drive and could be -performed automatically by cloud-init if it is integrated with the instance image. + nova boot --config-drive true --flavor baremetal --image test-image instance-1 It's also possible to enable the configuration drive automatically on all instances by configuring the ``OpenStack Compute service`` to always @@ -2223,6 +2220,10 @@ create a configuration drive by setting the following option in the force_config_drive=True +In some cases, you may wish to pass a user customized script when deploying an instance. +To do this, pass ``--user-data /path/to/file`` to the ``nova boot`` command. +More information can be found at `Provide user data to instances `_ + When used standalone -------------------- From e65fb13da8477c4e959aaf501af7d195391d13d7 Mon Sep 17 00:00:00 2001 From: Yuiko Takada Mori Date: Wed, 6 Jul 2016 15:08:01 +0900 Subject: [PATCH 044/166] Remove iterated form of side effects Currently, mock side_effect is iterated form because of bug #1473381, but this bug has been fixed already. This patch set reverts it to previous form. Change-Id: Iff81effb75179be20c3f87b718a1509af51435bf Closes-Bug: #1473383 --- ironic/tests/unit/api/v1/test_drivers.py | 5 +- ironic/tests/unit/api/v1/test_nodes.py | 11 ++- .../tests/unit/common/test_image_service.py | 4 +- ironic/tests/unit/common/test_images.py | 23 +++--- ironic/tests/unit/conductor/test_manager.py | 11 ++- .../drivers/modules/amt/test_management.py | 3 +- .../unit/drivers/modules/amt/test_power.py | 19 +++-- .../unit/drivers/modules/ilo/test_boot.py | 5 +- .../unit/drivers/modules/ilo/test_deploy.py | 5 +- .../unit/drivers/modules/ilo/test_power.py | 2 +- .../unit/drivers/modules/irmc/test_boot.py | 6 +- .../drivers/modules/irmc/test_management.py | 2 +- .../unit/drivers/modules/irmc/test_power.py | 2 +- .../modules/msftocs/test_management.py | 3 +- .../modules/msftocs/test_msftocsclient.py | 2 +- .../drivers/modules/msftocs/test_power.py | 3 +- .../drivers/modules/oneview/test_vendor.py | 6 +- .../tests/unit/drivers/modules/test_agent.py | 5 +- .../drivers/modules/test_agent_base_vendor.py | 12 +-- .../drivers/modules/test_console_utils.py | 10 +-- .../unit/drivers/modules/test_deploy_utils.py | 15 ++-- .../tests/unit/drivers/modules/test_iboot.py | 2 +- .../unit/drivers/modules/test_inspector.py | 4 +- .../unit/drivers/modules/test_ipminative.py | 6 +- .../unit/drivers/modules/test_ipmitool.py | 78 +++++++++---------- .../unit/drivers/modules/test_iscsi_deploy.py | 4 +- ironic/tests/unit/drivers/modules/test_pxe.py | 4 +- .../unit/drivers/modules/test_seamicro.py | 12 +-- ironic/tests/unit/drivers/modules/test_ssh.py | 50 ++++++------ .../unit/drivers/modules/ucs/test_power.py | 2 +- ironic/tests/unit/drivers/test_irmc.py | 8 +- ironic/tests/unit/objects/test_node.py | 6 +- 32 files changed, 150 insertions(+), 180 deletions(-) diff --git a/ironic/tests/unit/api/v1/test_drivers.py b/ironic/tests/unit/api/v1/test_drivers.py index decd66086..40850c9b9 100644 --- a/ironic/tests/unit/api/v1/test_drivers.py +++ b/ironic/tests/unit/api/v1/test_drivers.py @@ -259,9 +259,8 @@ class TestListDrivers(base.BaseApiTest): self, disk_prop_mock): driver._RAID_PROPERTIES = {} self.register_fake_conductors() - disk_prop_mock.side_effect = iter( - [exception.UnsupportedDriverExtension( - extension='raid', driver='fake')]) + disk_prop_mock.side_effect = exception.UnsupportedDriverExtension( + extension='raid', driver='fake') path = '/drivers/%s/raid/logical_disk_properties' % self.d1 ret = self.get_json(path, headers={api_base.Version.string: "1.12"}, diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 940f0233b..483cd069a 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -1981,8 +1981,7 @@ class TestPut(test_api_base.BaseApiTest): node.target_provision_state = states.NOSTATE node.reservation = 'fake-host' node.save() - self.mock_dnd.side_effect = iter([exception.NodeLocked(node='', - host='')]) + self.mock_dnd.side_effect = exception.NodeLocked(node='', host='') ret = self.put_json('/nodes/%s/states/provision' % node.uuid, {'target': states.ACTIVE}, expect_errors=True) @@ -2378,9 +2377,9 @@ class TestPut(test_api_base.BaseApiTest): autospec=True) def test_put_raid_iface_not_supported(self, set_raid_config_mock): raid_config = {'logical_disks': [{'size_gb': 100, 'raid_level': 1}]} - set_raid_config_mock.side_effect = iter([ + set_raid_config_mock.side_effect = ( exception.UnsupportedDriverExtension(extension='raid', - driver='fake')]) + driver='fake')) ret = self.put_json( '/nodes/%s/states/raid' % self.node.uuid, raid_config, headers={api_base.Version.string: "1.12"}, @@ -2394,8 +2393,8 @@ class TestPut(test_api_base.BaseApiTest): autospec=True) def test_put_raid_invalid_parameter_value(self, set_raid_config_mock): raid_config = {'logical_disks': [{'size_gb': 100, 'raid_level': 1}]} - set_raid_config_mock.side_effect = iter([ - exception.InvalidParameterValue('foo')]) + set_raid_config_mock.side_effect = exception.InvalidParameterValue( + 'foo') ret = self.put_json( '/nodes/%s/states/raid' % self.node.uuid, raid_config, headers={api_base.Version.string: "1.12"}, diff --git a/ironic/tests/unit/common/test_image_service.py b/ironic/tests/unit/common/test_image_service.py index 4eb2c8506..079f07afd 100644 --- a/ironic/tests/unit/common/test_image_service.py +++ b/ironic/tests/unit/common/test_image_service.py @@ -63,7 +63,7 @@ class HttpImageServiceTestCase(base.TestCase): @mock.patch.object(requests, 'head', autospec=True) def test_validate_href_error(self, head_mock): - head_mock.side_effect = iter([requests.ConnectionError()]) + head_mock.side_effect = requests.ConnectionError() self.assertRaises(exception.ImageRefValidationFailed, self.service.validate_href, self.href) head_mock.assert_called_once_with(self.href) @@ -116,7 +116,7 @@ class HttpImageServiceTestCase(base.TestCase): @mock.patch.object(requests, 'get', autospec=True) def test_download_fail_connerror(self, req_get_mock): - req_get_mock.side_effect = iter([requests.ConnectionError()]) + req_get_mock.side_effect = requests.ConnectionError() file_mock = mock.Mock(spec=file) self.assertRaises(exception.ImageDownloadFailed, self.service.download, self.href, file_mock) diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py index bdeec8f45..863cb16a4 100644 --- a/ironic/tests/unit/common/test_images.py +++ b/ironic/tests/unit/common/test_images.py @@ -269,8 +269,8 @@ class FsImageTestCase(base.TestCase): 'a3': 'sub_dir/b3'} path_exists_mock.side_effect = path_exists_mock_func - dirname_mock.side_effect = iter( - ['root_dir', 'root_dir', 'root_dir/sub_dir', 'root_dir/sub_dir']) + dirname_mock.side_effect = ['root_dir', 'root_dir', 'root_dir/sub_dir', + 'root_dir/sub_dir'] images._create_root_fs('root_dir', files_info) cp_mock.assert_any_call('a1', 'root_dir/b1') cp_mock.assert_any_call('a2', 'root_dir/b2') @@ -425,8 +425,8 @@ class FsImageTestCase(base.TestCase): ('/tmpdir1/isolinux', [], ['efiboot.img', 'isolinux.bin', 'isolinux.cfg'])] - relpath_mock.side_effect = iter( - ['EFI/ubuntu/grub.cfg', 'isolinux/efiboot.img']) + relpath_mock.side_effect = ['EFI/ubuntu/grub.cfg', + 'isolinux/efiboot.img'] images._mount_deploy_iso('path/to/deployiso', 'tmpdir1') mount_mock.assert_called_once_with('path/to/deployiso', @@ -443,7 +443,7 @@ class FsImageTestCase(base.TestCase): walk_mock.return_value = [('/tmpdir1/EFI/ubuntu', [], ['grub.cfg']), ('/tmpdir1/isolinux', [], ['isolinux.bin', 'isolinux.cfg'])] - relpath_mock.side_effect = iter(['EFI/ubuntu/grub.cfg']) + relpath_mock.side_effect = 'EFI/ubuntu/grub.cfg' self.assertRaises(exception.ImageCreationFailed, images._mount_deploy_iso, @@ -464,7 +464,7 @@ class FsImageTestCase(base.TestCase): ('/tmpdir1/isolinux', '', ['efiboot.img', 'isolinux.bin', 'isolinux.cfg'])] - relpath_mock.side_effect = iter(['isolinux/efiboot.img']) + relpath_mock.side_effect = 'isolinux/efiboot.img' self.assertRaises(exception.ImageCreationFailed, images._mount_deploy_iso, @@ -503,7 +503,7 @@ class FsImageTestCase(base.TestCase): cfg_file = 'tmpdir/isolinux/isolinux.cfg' grubcfg = "grubcfg" grub_file = 'tmpdir/relpath/to/grub.cfg' - gen_cfg_mock.side_effect = iter([cfg, grubcfg]) + gen_cfg_mock.side_effect = cfg, grubcfg params = ['a=b', 'c'] isolinux_options = {'kernel': '/vmlinuz', @@ -520,8 +520,7 @@ class FsImageTestCase(base.TestCase): mock_file_handle.__enter__.return_value = 'tmpdir' mock_file_handle1 = mock.MagicMock(spec=file) mock_file_handle1.__enter__.return_value = 'mountdir' - tempdir_mock.side_effect = iter( - [mock_file_handle, mock_file_handle1]) + tempdir_mock.side_effect = mock_file_handle, mock_file_handle1 mount_mock.return_value = (uefi_path_info, e_img_rel_path, grub_rel_path) @@ -603,8 +602,7 @@ class FsImageTestCase(base.TestCase): mock_file_handle.__enter__.return_value = 'tmpdir' mock_file_handle1 = mock.MagicMock(spec=file) mock_file_handle1.__enter__.return_value = 'mountdir' - tempdir_mock.side_effect = iter( - [mock_file_handle, mock_file_handle1]) + tempdir_mock.side_effect = mock_file_handle, mock_file_handle1 create_root_fs_mock.side_effect = IOError self.assertRaises(exception.ImageCreationFailed, @@ -648,8 +646,7 @@ class FsImageTestCase(base.TestCase): mock_file_handle.__enter__.return_value = 'tmpdir' mock_file_handle1 = mock.MagicMock(spec=file) mock_file_handle1.__enter__.return_value = 'mountdir' - tempdir_mock.side_effect = iter( - [mock_file_handle, mock_file_handle1]) + tempdir_mock.side_effect = mock_file_handle, mock_file_handle1 mount_mock.return_value = ({'a': 'a'}, 'b', 'c') utils_mock.side_effect = processutils.ProcessExecutionError diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index b5266d982..1c3545c76 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -2270,7 +2270,7 @@ class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, last_error=None, power_state=states.NOSTATE) - mock_validate.side_effect = iter([RuntimeError("boom")]) + mock_validate.side_effect = RuntimeError("boom") self._start_service() with task_manager.acquire( @@ -2298,7 +2298,7 @@ class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, last_error=None, power_state=states.NOSTATE) - mock_get_power_state.side_effect = iter([RuntimeError("boom")]) + mock_get_power_state.side_effect = RuntimeError("boom") self._start_service() with task_manager.acquire( @@ -4676,8 +4676,8 @@ class ManagerCheckDeployingStatusTestCase(mgr_utils.ServiceSetUpMixin, reservation='fake-conductor') mock_mapped.return_value = True - mock_release.side_effect = iter([exception.NodeNotFound('not found'), - exception.NodeLocked('locked')]) + mock_release.side_effect = [exception.NodeNotFound('not found'), + exception.NodeLocked('locked')] self.service._check_deploying_status(self.context) self.node.refresh() @@ -4693,8 +4693,7 @@ class ManagerCheckDeployingStatusTestCase(mgr_utils.ServiceSetUpMixin, self, mock_release, mock_off_cond, mock_mapped, mock_fail_if): mock_off_cond.return_value = ['fake-conductor'] mock_mapped.return_value = True - mock_release.side_effect = iter([ - exception.NodeNotLocked('not locked')]) + mock_release.side_effect = exception.NodeNotLocked('not locked') self.service._check_deploying_status(self.context) self.node.refresh() diff --git a/ironic/tests/unit/drivers/modules/amt/test_management.py b/ironic/tests/unit/drivers/modules/amt/test_management.py index 297fa0f2b..18703f48a 100644 --- a/ironic/tests/unit/drivers/modules/amt/test_management.py +++ b/ironic/tests/unit/drivers/modules/amt/test_management.py @@ -152,8 +152,7 @@ class AMTManagementTestCase(db_base.DbTestCase): def test_validate_fail(self, mock_drvinfo): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mock_drvinfo.side_effect = iter( - [exception.InvalidParameterValue('x')]) + mock_drvinfo.side_effect = exception.InvalidParameterValue('x') self.assertRaises(exception.InvalidParameterValue, task.driver.management.validate, task) diff --git a/ironic/tests/unit/drivers/modules/amt/test_power.py b/ironic/tests/unit/drivers/modules/amt/test_power.py index 041ad2887..d2e9d4bba 100644 --- a/ironic/tests/unit/drivers/modules/amt/test_power.py +++ b/ironic/tests/unit/drivers/modules/amt/test_power.py @@ -128,7 +128,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): mock_ps, mock_enbd): target_state = states.POWER_ON boot_device = boot_devices.PXE - mock_ps.side_effect = iter([states.POWER_OFF, states.POWER_ON]) + mock_ps.side_effect = [states.POWER_OFF, states.POWER_ON] mock_enbd.return_value = None with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -147,7 +147,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): def test__set_and_wait_power_on_without_boot_device(self, mock_sps, mock_ps): target_state = states.POWER_ON - mock_ps.side_effect = iter([states.POWER_OFF, states.POWER_ON]) + mock_ps.side_effect = [states.POWER_OFF, states.POWER_ON] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertEqual(states.POWER_ON, @@ -157,7 +157,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): boot_device = boot_devices.DISK self.node.driver_internal_info['amt_boot_device'] = boot_device - mock_ps.side_effect = iter([states.POWER_OFF, states.POWER_ON]) + mock_ps.side_effect = [states.POWER_OFF, states.POWER_ON] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertEqual(states.POWER_ON, @@ -179,8 +179,8 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): def test__set_and_wait_exceed_iterations(self, mock_sps, mock_ps): target_state = states.POWER_ON - mock_ps.side_effect = iter([states.POWER_OFF, states.POWER_OFF, - states.POWER_OFF]) + mock_ps.side_effect = [states.POWER_OFF, states.POWER_OFF, + states.POWER_OFF] mock_sps.return_value = exception.AMTFailure('x') with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -194,7 +194,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): autospec=True) def test__set_and_wait_already_target_state(self, mock_ps): target_state = states.POWER_ON - mock_ps.side_effect = iter([states.POWER_ON]) + mock_ps.side_effect = [states.POWER_ON] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertEqual(states.POWER_ON, @@ -207,7 +207,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase): autospec=True) def test__set_and_wait_power_off(self, mock_sps, mock_ps): target_state = states.POWER_OFF - mock_ps.side_effect = iter([states.POWER_ON, states.POWER_OFF]) + mock_ps.side_effect = [states.POWER_ON, states.POWER_OFF] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertEqual(states.POWER_OFF, @@ -245,8 +245,7 @@ class AMTPowerTestCase(db_base.DbTestCase): def test_validate_fail(self, mock_drvinfo): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mock_drvinfo.side_effect = iter( - [exception.InvalidParameterValue('x')]) + mock_drvinfo.side_effect = exception.InvalidParameterValue('x') self.assertRaises(exception.InvalidParameterValue, task.driver.power.validate, task) @@ -277,7 +276,7 @@ class AMTPowerTestCase(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: pstate = states.POWER_ON - mock_saw.side_effect = iter([exception.PowerStateFailure('x')]) + mock_saw.side_effect = exception.PowerStateFailure('x') self.assertRaises(exception.PowerStateFailure, task.driver.power.set_power_state, task, pstate) diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py index 1f4e31fda..4d3964c61 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py @@ -100,9 +100,8 @@ class IloBootPrivateMethodsTestCase(db_base.DbTestCase): @mock.patch.object(image_service.HttpImageService, 'validate_href', spec_set=True, autospec=True) def test__get_boot_iso_unsupported_url(self, validate_href_mock): - validate_href_mock.side_effect = iter( - [exception.ImageRefValidationFailed( - image_href='file://img.qcow2', reason='fail')]) + validate_href_mock.side_effect = exception.ImageRefValidationFailed( + image_href='file://img.qcow2', reason='fail') url = 'file://img.qcow2' i_info = self.node.instance_info i_info['ilo_boot_iso'] = url diff --git a/ironic/tests/unit/drivers/modules/ilo/test_deploy.py b/ironic/tests/unit/drivers/modules/ilo/test_deploy.py index 44451005c..877b1ccfd 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_deploy.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_deploy.py @@ -225,9 +225,8 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase): mock_is_glance_image, mock_validate_href): deploy_iso = 'http://abc.org/image/qcow2' - mock_validate_href.side_effect = iter( - [exception.ImageRefValidationFailed( - image_href='http://abc.org/image/qcow2', reason='fail')]) + mock_validate_href.side_effect = exception.ImageRefValidationFailed( + image_href='http://abc.org/image/qcow2', reason='fail') mock_is_glance_image.return_value = False with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: diff --git a/ironic/tests/unit/drivers/modules/ilo/test_power.py b/ironic/tests/unit/drivers/modules/ilo/test_power.py index 4df4a8ab8..392a30cea 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_power.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_power.py @@ -191,7 +191,7 @@ class IloPowerTestCase(db_base.DbTestCase): @mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True, autospec=True) def test_validate_fail(self, mock_drvinfo): - side_effect = iter([exception.InvalidParameterValue("Invalid Input")]) + side_effect = exception.InvalidParameterValue("Invalid Input") mock_drvinfo.side_effect = side_effect with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/modules/irmc/test_boot.py b/ironic/tests/unit/drivers/modules/irmc/test_boot.py index a06ca4e7a..1e6dbe65a 100644 --- a/ironic/tests/unit/drivers/modules/irmc/test_boot.py +++ b/ironic/tests/unit/drivers/modules/irmc/test_boot.py @@ -448,7 +448,7 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase): mock_image_file_obj = mock.MagicMock() mock_image_file_obj.name = 'image-tmp-file' mock_image_file_handle.__enter__.return_value = mock_image_file_obj - tempfile_mock.side_effect = iter([mock_image_file_handle]) + tempfile_mock.side_effect = [mock_image_file_handle] deploy_args = {'arg1': 'val1', 'arg2': 'val2'} CONF.irmc.remote_image_share_name = '/remote_image_share_root' @@ -476,11 +476,11 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase): mock_image_file_obj = mock.MagicMock() mock_image_file_obj.name = 'image-tmp-file' mock_image_file_handle.__enter__.return_value = mock_image_file_obj - tempfile_mock.side_effect = iter([mock_image_file_handle]) + tempfile_mock.side_effect = [mock_image_file_handle] deploy_args = {'arg1': 'val1', 'arg2': 'val2'} CONF.irmc.remote_image_share_name = '/remote_image_share_root' - copyfile_mock.side_effect = iter([IOError("fake error")]) + copyfile_mock.side_effect = IOError("fake error") with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: diff --git a/ironic/tests/unit/drivers/modules/irmc/test_management.py b/ironic/tests/unit/drivers/modules/irmc/test_management.py index fb75b394f..f197692a4 100644 --- a/ironic/tests/unit/drivers/modules/irmc/test_management.py +++ b/ironic/tests/unit/drivers/modules/irmc/test_management.py @@ -70,7 +70,7 @@ class IRMCManagementTestCase(db_base.DbTestCase): @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True, autospec=True) def test_validate_fail(self, mock_drvinfo): - side_effect = iter([exception.InvalidParameterValue("Invalid Input")]) + side_effect = exception.InvalidParameterValue("Invalid Input") mock_drvinfo.side_effect = side_effect with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/modules/irmc/test_power.py b/ironic/tests/unit/drivers/modules/irmc/test_power.py index 3bd82ff12..c1f22bffe 100644 --- a/ironic/tests/unit/drivers/modules/irmc/test_power.py +++ b/ironic/tests/unit/drivers/modules/irmc/test_power.py @@ -132,7 +132,7 @@ class IRMCPowerTestCase(db_base.DbTestCase): @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True, autospec=True) def test_validate_fail(self, mock_drvinfo): - side_effect = iter([exception.InvalidParameterValue("Invalid Input")]) + side_effect = exception.InvalidParameterValue("Invalid Input") mock_drvinfo.side_effect = side_effect with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/modules/msftocs/test_management.py b/ironic/tests/unit/drivers/modules/msftocs/test_management.py index f0ab604f6..aaa54a879 100644 --- a/ironic/tests/unit/drivers/modules/msftocs/test_management.py +++ b/ironic/tests/unit/drivers/modules/msftocs/test_management.py @@ -59,8 +59,7 @@ class MSFTOCSManagementTestCase(db_base.DbTestCase): def test_validate_fail(self, mock_drvinfo): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mock_drvinfo.side_effect = iter( - [exception.InvalidParameterValue('x')]) + mock_drvinfo.side_effect = exception.InvalidParameterValue('x') self.assertRaises(exception.InvalidParameterValue, task.driver.power.validate, task) diff --git a/ironic/tests/unit/drivers/modules/msftocs/test_msftocsclient.py b/ironic/tests/unit/drivers/modules/msftocs/test_msftocsclient.py index 517429fc6..66097d05e 100644 --- a/ironic/tests/unit/drivers/modules/msftocs/test_msftocsclient.py +++ b/ironic/tests/unit/drivers/modules/msftocs/test_msftocsclient.py @@ -92,7 +92,7 @@ class MSFTOCSClientApiTestCase(base.TestCase): @mock.patch.object(requests, 'get', autospec=True) def test__exec_cmd_http_get_fail(self, mock_get): fake_rel_url = 'fake_rel_url' - mock_get.side_effect = iter([requests_exceptions.ConnectionError('x')]) + mock_get.side_effect = requests_exceptions.ConnectionError('x') self.assertRaises(exception.MSFTOCSClientApiException, self._client._exec_cmd, diff --git a/ironic/tests/unit/drivers/modules/msftocs/test_power.py b/ironic/tests/unit/drivers/modules/msftocs/test_power.py index e8f3c705f..38f07a4b9 100644 --- a/ironic/tests/unit/drivers/modules/msftocs/test_power.py +++ b/ironic/tests/unit/drivers/modules/msftocs/test_power.py @@ -58,8 +58,7 @@ class MSFTOCSPowerTestCase(db_base.DbTestCase): def test_validate_fail(self, mock_drvinfo): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mock_drvinfo.side_effect = iter( - [exception.InvalidParameterValue('x')]) + mock_drvinfo.side_effect = exception.InvalidParameterValue('x') self.assertRaises(exception.InvalidParameterValue, task.driver.power.validate, task) diff --git a/ironic/tests/unit/drivers/modules/oneview/test_vendor.py b/ironic/tests/unit/drivers/modules/oneview/test_vendor.py index e379477e0..8c2da9353 100644 --- a/ironic/tests/unit/drivers/modules/oneview/test_vendor.py +++ b/ironic/tests/unit/drivers/modules/oneview/test_vendor.py @@ -111,7 +111,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): spec=types.FunctionType) def test_reboot_and_finish_deploy_soft_poweroff_fails( self, power_off_mock, node_power_action_mock): - power_off_mock.side_effect = iter([RuntimeError("boom")]) + power_off_mock.side_effect = RuntimeError("boom") self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -140,7 +140,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - get_power_state_mock.side_effect = iter([RuntimeError("boom")]) + get_power_state_mock.side_effect = RuntimeError("boom") self.passthru.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(GET_POWER_STATE_RETRIES + 1, @@ -167,7 +167,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: get_power_state_mock.return_value = states.POWER_ON - node_power_action_mock.side_effect = iter([RuntimeError("boom")]) + node_power_action_mock.side_effect = RuntimeError("boom") self.assertRaises(exception.InstanceDeployFailure, self.passthru.reboot_and_finish_deploy, task) diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py index ea16bcdaa..514d98f7b 100644 --- a/ironic/tests/unit/drivers/modules/test_agent.py +++ b/ironic/tests/unit/drivers/modules/test_agent.py @@ -217,9 +217,8 @@ class TestAgentMethods(db_base.DbTestCase): autospec=True) def test_build_instance_info_for_deploy_nonsupported_image( self, validate_href_mock): - validate_href_mock.side_effect = iter( - [exception.ImageRefValidationFailed( - image_href='file://img.qcow2', reason='fail')]) + validate_href_mock.side_effect = exception.ImageRefValidationFailed( + image_href='file://img.qcow2', reason='fail') i_info = self.node.instance_info i_info['image_source'] = 'file://img.qcow2' i_info['image_checksum'] = 'aa' diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index a1a18743d..9176aca74 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -324,7 +324,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): kwargs = { 'agent_url': 'http://127.0.0.1:9999/bar' } - done_mock.side_effect = iter([Exception('LlamaException')]) + done_mock.side_effect = Exception('LlamaException') with task_manager.acquire( self.context, self.node['uuid'], shared=False) as task: task.node.provision_state = states.DEPLOYWAIT @@ -617,7 +617,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): spec=types.FunctionType) def test_reboot_and_finish_deploy_soft_poweroff_fails( self, power_off_mock, node_power_action_mock): - power_off_mock.side_effect = iter([RuntimeError("boom")]) + power_off_mock.side_effect = RuntimeError("boom") self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -644,7 +644,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - get_power_state_mock.side_effect = iter([RuntimeError("boom")]) + get_power_state_mock.side_effect = RuntimeError("boom") self.passthru.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) @@ -668,7 +668,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: get_power_state_mock.return_value = states.POWER_ON - node_power_action_mock.side_effect = iter([RuntimeError("boom")]) + node_power_action_mock.side_effect = RuntimeError("boom") self.assertRaises(exception.InstanceDeployFailure, self.passthru.reboot_and_finish_deploy, task) @@ -824,7 +824,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self, install_bootloader_mock, try_set_boot_device_mock): install_bootloader_mock.return_value = { 'command_status': 'SUCCESS', 'command_error': None} - try_set_boot_device_mock.side_effect = iter([RuntimeError('error')]) + try_set_boot_device_mock.side_effect = RuntimeError('error') self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -965,7 +965,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) def test__cleaning_reboot_fail(self, mock_reboot, mock_handler): - mock_reboot.side_effect = iter([RuntimeError("broken")]) + mock_reboot.side_effect = RuntimeError("broken") with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: diff --git a/ironic/tests/unit/drivers/modules/test_console_utils.py b/ironic/tests/unit/drivers/modules/test_console_utils.py index d23a95d36..66b79a52c 100644 --- a/ironic/tests/unit/drivers/modules/test_console_utils.py +++ b/ironic/tests/unit/drivers/modules/test_console_utils.py @@ -156,8 +156,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, '_get_console_pid', autospec=True) def test__stop_console_nopid(self, mock_pid, mock_kill, mock_unlink): pid_file = console_utils._get_console_pid_file(self.info['uuid']) - mock_pid.side_effect = iter( - [exception.NoConsolePid(pid_path="/tmp/blah")]) + mock_pid.side_effect = exception.NoConsolePid(pid_path="/tmp/blah") self.assertRaises(exception.NoConsolePid, console_utils._stop_console, @@ -291,7 +290,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase): mock_pid, mock_popen): # no existing PID file before starting - mock_stop.side_effect = iter([exception.NoConsolePid('/tmp/blah')]) + mock_stop.side_effect = exception.NoConsolePid('/tmp/blah') mock_popen.return_value.poll.return_value = 0 mock_pid.return_value = 12345 mock_pid_exists.return_value = True @@ -381,8 +380,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase): def test_start_shellinabox_console_fail_nopiddir(self, mock_stop, mock_dir_exists, mock_popen): - mock_dir_exists.side_effect = iter( - [exception.ConsoleError(message='fail')]) + mock_dir_exists.side_effect = exception.ConsoleError(message='fail') mock_popen.return_value.poll.return_value = 0 self.assertRaises(exception.ConsoleError, @@ -404,7 +402,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, '_stop_console', autospec=True) def test_stop_shellinabox_console_fail_nopid(self, mock_stop): - mock_stop.side_effect = iter([exception.NoConsolePid('/tmp/blah')]) + mock_stop.side_effect = exception.NoConsolePid('/tmp/blah') console_utils.stop_shellinabox_console(self.info['uuid']) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index c44ad90ac..b64d2d9c8 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1334,7 +1334,7 @@ class OtherFunctionTestCase(db_base.DbTestCase): mock_cache = mock.MagicMock( spec_set=['master_dir'], master_dir='master_dir') - mock_clean_up_caches.side_effect = iter([exc]) + mock_clean_up_caches.side_effect = [exc] self.assertRaises(exception.InstanceDeployFailure, utils.fetch_images, None, @@ -1549,8 +1549,7 @@ class TrySetBootDeviceTestCase(db_base.DbTestCase): self, node_set_boot_device_mock, log_mock): self.node.properties = {'capabilities': 'boot_mode:uefi'} self.node.save() - node_set_boot_device_mock.side_effect = iter( - [exception.IPMIFailure(cmd='a')]) + node_set_boot_device_mock.side_effect = exception.IPMIFailure(cmd='a') with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: utils.try_set_boot_device(task, boot_devices.DISK, @@ -1562,8 +1561,7 @@ class TrySetBootDeviceTestCase(db_base.DbTestCase): @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) def test_try_set_boot_device_ipmifailure_bios( self, node_set_boot_device_mock): - node_set_boot_device_mock.side_effect = iter( - [exception.IPMIFailure(cmd='a')]) + node_set_boot_device_mock.side_effect = exception.IPMIFailure(cmd='a') with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IPMIFailure, @@ -1576,7 +1574,7 @@ class TrySetBootDeviceTestCase(db_base.DbTestCase): def test_try_set_boot_device_some_other_exception( self, node_set_boot_device_mock): exc = exception.IloOperationError(operation="qwe", error="error") - node_set_boot_device_mock.side_effect = iter([exc]) + node_set_boot_device_mock.side_effect = exc with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IloOperationError, @@ -1987,9 +1985,8 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase): 'ramdisk': 'file://initrd', 'root_gb': 100, } - img_service_show_mock.side_effect = iter( - [exception.ImageRefValidationFailed( - image_href='http://ubuntu', reason='HTTPError')]) + img_service_show_mock.side_effect = exception.ImageRefValidationFailed( + image_href='http://ubuntu', reason='HTTPError') node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=instance_info, diff --git a/ironic/tests/unit/drivers/modules/test_iboot.py b/ironic/tests/unit/drivers/modules/test_iboot.py index 529dc9ba9..bcdb3286f 100644 --- a/ironic/tests/unit/drivers/modules/test_iboot.py +++ b/ironic/tests/unit/drivers/modules/test_iboot.py @@ -402,7 +402,7 @@ class IBootDriverTestCase(db_base.DbTestCase): @mock.patch.object(iboot, '_parse_driver_info', autospec=True) def test_validate_fails(self, parse_drv_info_mock): - side_effect = iter([exception.InvalidParameterValue("Bad input")]) + side_effect = exception.InvalidParameterValue("Bad input") parse_drv_info_mock.side_effect = side_effect with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py index 8504fa73f..132a2e4a1 100644 --- a/ironic/tests/unit/drivers/modules/test_inspector.py +++ b/ironic/tests/unit/drivers/modules/test_inspector.py @@ -229,8 +229,8 @@ class PeriodicTaskTestCase(BaseTestCase): def test_node_locked(self, mock_check, mock_acquire): iter_nodes_ret = [('1', 'd1'), ('2', 'd2')] - mock_acquire.side_effect = iter([exception.NodeLocked("boom")] * - len(iter_nodes_ret)) + mock_acquire.side_effect = ([exception.NodeLocked("boom")] * + len(iter_nodes_ret)) mgr = mock.MagicMock(spec=['iter_nodes']) mgr.iter_nodes.return_value = iter_nodes_ret inspector.Inspector()._periodic_check_result( diff --git a/ironic/tests/unit/drivers/modules/test_ipminative.py b/ironic/tests/unit/drivers/modules/test_ipminative.py index ca0e3e6b1..9ec8ed6e8 100644 --- a/ironic/tests/unit/drivers/modules/test_ipminative.py +++ b/ironic/tests/unit/drivers/modules/test_ipminative.py @@ -510,8 +510,8 @@ class IPMINativeDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) def test_start_console_fail(self, mock_exec): - mock_exec.side_effect = iter( - [exception.ConsoleSubprocessFailed(error='error')]) + mock_exec.side_effect = exception.ConsoleSubprocessFailed( + error='error') with task_manager.acquire(self.context, self.node.uuid) as task: @@ -534,7 +534,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'stop_shellinabox_console', autospec=True) def test_stop_console_fail(self, mock_stop): - mock_stop.side_effect = iter([exception.ConsoleError()]) + mock_stop.side_effect = exception.ConsoleError() with task_manager.acquire(self.context, self.node.uuid) as task: diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py index fb6d89874..aacc986f4 100644 --- a/ironic/tests/unit/drivers/modules/test_ipmitool.py +++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py @@ -78,8 +78,7 @@ class IPMIToolCheckInitTestCase(base.TestCase): def test_power_init_calls_raises_1(self, mock_check_dir, mock_support): mock_support.return_value = True ipmi.TMP_DIR_CHECKED = None - mock_check_dir.side_effect = iter( - [exception.PathNotFound(dir="foo_dir")]) + mock_check_dir.side_effect = exception.PathNotFound(dir="foo_dir") self.assertRaises(exception.PathNotFound, ipmi.IPMIPower) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @@ -87,8 +86,8 @@ class IPMIToolCheckInitTestCase(base.TestCase): def test_power_init_calls_raises_2(self, mock_check_dir, mock_support): mock_support.return_value = True ipmi.TMP_DIR_CHECKED = None - mock_check_dir.side_effect = iter( - [exception.DirectoryNotWritable(dir="foo_dir")]) + mock_check_dir.side_effect = exception.DirectoryNotWritable( + dir="foo_dir") self.assertRaises(exception.DirectoryNotWritable, ipmi.IPMIPower) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @@ -96,8 +95,8 @@ class IPMIToolCheckInitTestCase(base.TestCase): def test_power_init_calls_raises_3(self, mock_check_dir, mock_support): mock_support.return_value = True ipmi.TMP_DIR_CHECKED = None - mock_check_dir.side_effect = iter([exception.InsufficientDiskSpace( - path="foo_dir", required=1, actual=0)]) + mock_check_dir.side_effect = exception.InsufficientDiskSpace( + path="foo_dir", required=1, actual=0) self.assertRaises(exception.InsufficientDiskSpace, ipmi.IPMIPower) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @@ -189,8 +188,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_timing_fail(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter( - [subprocess.CalledProcessError(1, 'ipmitool')]) + mock_chkcall.side_effect = subprocess.CalledProcessError(1, 'ipmitool') mock_support.return_value = None expected = [mock.call('timing'), mock.call('timing', False)] @@ -200,7 +198,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_timing_no_ipmitool(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter([OSError()]) + mock_chkcall.side_effect = OSError() mock_support.return_value = None expected = [mock.call('timing')] @@ -219,8 +217,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_single_bridge_fail(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter( - [subprocess.CalledProcessError(1, 'ipmitool')]) + mock_chkcall.side_effect = subprocess.CalledProcessError(1, 'ipmitool') mock_support.return_value = None expected = [mock.call('single_bridge'), mock.call('single_bridge', False)] @@ -231,7 +228,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): def test_check_single_bridge_no_ipmitool(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter([OSError()]) + mock_chkcall.side_effect = OSError() mock_support.return_value = None expected = [mock.call('single_bridge')] @@ -251,8 +248,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_dual_bridge_fail(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter( - [subprocess.CalledProcessError(1, 'ipmitool')]) + mock_chkcall.side_effect = subprocess.CalledProcessError(1, 'ipmitool') mock_support.return_value = None expected = [mock.call('dual_bridge'), mock.call('dual_bridge', False)] @@ -262,7 +258,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_dual_bridge_no_ipmitool(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter([OSError()]) + mock_chkcall.side_effect = OSError() mock_support.return_value = None expected = [mock.call('dual_bridge')] @@ -286,8 +282,8 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): def test_check_all_options_fail(self, mock_chkcall, mock_support): options = ['timing', 'single_bridge', 'dual_bridge'] - mock_chkcall.side_effect = iter( - [subprocess.CalledProcessError(1, 'ipmitool')] * len(options)) + mock_chkcall.side_effect = [subprocess.CalledProcessError( + 1, 'ipmitool')] * len(options) mock_support.return_value = None expected = [ mock.call('timing'), mock.call('timing', False), @@ -301,7 +297,7 @@ class IPMIToolCheckOptionSupportedTestCase(base.TestCase): self.assertEqual(expected, mock_support.call_args_list) def test_check_all_options_no_ipmitool(self, mock_chkcall, mock_support): - mock_chkcall.side_effect = iter([OSError()]) + mock_chkcall.side_effect = OSError() mock_support.return_value = None # exception is raised once ipmitool was not found for an command expected = [mock.call('timing')] @@ -705,7 +701,7 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): expected = [mock.call('timing'), mock.call('timing')] mock_support.return_value = False - mock_exec.side_effect = iter([(None, None), (None, None)]) + mock_exec.side_effect = [(None, None), (None, None)] ipmi._exec_ipmitool(self.info, 'A B C') mock_exec.assert_called_with(*args[0]) @@ -742,7 +738,7 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): expected = [mock.call('timing'), mock.call('timing')] mock_support.return_value = False - mock_exec.side_effect = iter([(None, None), (None, None)]) + mock_exec.side_effect = [(None, None), (None, None)] ipmi._exec_ipmitool(self.info, 'A B C') mock_exec.assert_called_with(*args[0]) @@ -781,7 +777,7 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): expected = [mock.call('timing'), mock.call('timing')] mock_support.return_value = False - mock_exec.side_effect = iter([(None, None), (None, None)]) + mock_exec.side_effect = [(None, None), (None, None)] ipmi._exec_ipmitool(self.info, 'A B C') mock_exec.assert_called_with(*args[0]) @@ -1045,12 +1041,12 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): ipmi.LAST_CMD_TIME = {} mock_support.return_value = False - mock_exec.side_effect = iter([ + mock_exec.side_effect = [ processutils.ProcessExecutionError( stderr="insufficient resources for session" ), (None, None) - ]) + ] # Directly set the configuration values such that # the logic will cause _exec_ipmitool to retry twice. @@ -1070,9 +1066,9 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): ipmi.LAST_CMD_TIME = {} mock_support.return_value = False - mock_exec.side_effect = iter([processutils.ProcessExecutionError( + mock_exec.side_effect = [processutils.ProcessExecutionError( stderr="insufficient resources for session" - )]) + )] # Directly set the configuration values such that # the logic will cause _exec_ipmitool to timeout. @@ -1096,14 +1092,14 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): # Return a retryable error, then an error that cannot # be retried thus resulting in a single retry # attempt by _exec_ipmitool. - mock_exec.side_effect = iter([ + mock_exec.side_effect = [ processutils.ProcessExecutionError( stderr="insufficient resources for session" ), processutils.ProcessExecutionError( stderr="Unknown" ), - ]) + ] # Directly set the configuration values such that # the logic will cause _exec_ipmitool to retry up @@ -1196,8 +1192,7 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test__power_status_exception(self, mock_exec, mock_sleep): - mock_exec.side_effect = iter( - [processutils.ProcessExecutionError("error")]) + mock_exec.side_effect = processutils.ProcessExecutionError("error") self.assertRaises(exception.IPMIFailure, ipmi._power_status, self.info) @@ -1281,8 +1276,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_get_power_state_exception(self, mock_exec): - mock_exec.side_effect = iter( - [processutils.ProcessExecutionError("error")]) + mock_exec.side_effect = processutils.ProcessExecutionError("error") with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.IPMIFailure, self.driver.power.get_power_state, @@ -1371,8 +1365,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_send_raw_bytes_fail(self, mock_exec): - mock_exec.side_effect = iter( - [exception.PasswordFileFailedToCreate('error')]) + mock_exec.side_effect = exception.PasswordFileFailedToCreate('error') with task_manager.acquire(self.context, self.node['uuid']) as task: @@ -1404,7 +1397,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test__bmc_reset_fail(self, mock_exec): - mock_exec.side_effect = iter([processutils.ProcessExecutionError()]) + mock_exec.side_effect = processutils.ProcessExecutionError() with task_manager.acquire(self.context, self.node['uuid']) as task: @@ -1452,7 +1445,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_parse_driver_info', autospec=True) def test_vendor_passthru_validate__parse_driver_info_fail(self, info_mock): - info_mock.side_effect = iter([exception.InvalidParameterValue("bad")]) + info_mock.side_effect = exception.InvalidParameterValue("bad") with task_manager.acquire(self.context, self.node['uuid']) as task: self.assertRaises(exception.InvalidParameterValue, self.driver.vendor.validate, @@ -1577,8 +1570,8 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) def test_start_console_fail(self, mock_exec): - mock_exec.side_effect = iter( - [exception.ConsoleSubprocessFailed(error='error')]) + mock_exec.side_effect = exception.ConsoleSubprocessFailed( + error='error') with task_manager.acquire(self.context, self.node['uuid']) as task: @@ -1589,7 +1582,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) def test_start_console_fail_nodir(self, mock_exec): - mock_exec.side_effect = iter([exception.ConsoleError()]) + mock_exec.side_effect = exception.ConsoleError() with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1632,7 +1625,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'stop_shellinabox_console', autospec=True) def test_stop_console_fail(self, mock_stop): - mock_stop.side_effect = iter([exception.ConsoleError()]) + mock_stop.side_effect = exception.ConsoleError() with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1712,7 +1705,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_management_interface_set_boot_device_exec_failed(self, mock_exec): - mock_exec.side_effect = iter([processutils.ProcessExecutionError()]) + mock_exec.side_effect = processutils.ProcessExecutionError() with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.IPMIFailure, self.driver.management.set_boot_device, @@ -1725,7 +1718,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): class FakeException(Exception): pass - mock_exec.side_effect = iter([FakeException('boom')]) + mock_exec.side_effect = FakeException('boom') with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(FakeException, self.driver.management.set_boot_device, @@ -1778,8 +1771,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_management_interface_get_boot_device_fail(self, mock_exec): with task_manager.acquire(self.context, self.node.uuid) as task: - mock_exec.side_effect = iter( - [processutils.ProcessExecutionError()]) + mock_exec.side_effect = processutils.ProcessExecutionError() self.assertRaises(exception.IPMIFailure, task.driver.management.get_boot_device, task) mock_exec.assert_called_with(mock.ANY, "chassis bootparam get 5") diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index 2a6e77146..b8d3ca06f 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -165,8 +165,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): def test_continue_deploy_fail(self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} - deploy_mock.side_effect = iter([ - exception.InstanceDeployFailure("test deploy error")]) + deploy_mock.side_effect = exception.InstanceDeployFailure( + "test deploy error") self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() diff --git a/ironic/tests/unit/drivers/modules/test_pxe.py b/ironic/tests/unit/drivers/modules/test_pxe.py index 873aca35f..97785b418 100644 --- a/ironic/tests/unit/drivers/modules/test_pxe.py +++ b/ironic/tests/unit/drivers/modules/test_pxe.py @@ -626,7 +626,7 @@ class PXEBootTestCase(db_base.DbTestCase): @mock.patch.object(base_image_service.BaseImageService, '_show', autospec=True) def test_validate_fail_glance_image_doesnt_exists(self, mock_glance): - mock_glance.side_effect = iter([exception.ImageNotFound('not found')]) + mock_glance.side_effect = exception.ImageNotFound('not found') with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertRaises(exception.InvalidParameterValue, @@ -638,7 +638,7 @@ class PXEBootTestCase(db_base.DbTestCase): exceptions = (exception.GlanceConnectionFailed('connection fail'), exception.ImageNotAuthorized('not authorized'), exception.Invalid('invalid')) - mock_glance.side_effect = iter(exceptions) + mock_glance.side_effect = exceptions for exc in exceptions: with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/modules/test_seamicro.py b/ironic/tests/unit/drivers/modules/test_seamicro.py index 707f74c29..f2b2489cb 100644 --- a/ironic/tests/unit/drivers/modules/test_seamicro.py +++ b/ironic/tests/unit/drivers/modules/test_seamicro.py @@ -331,7 +331,7 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase): @mock.patch.object(seamicro, '_parse_driver_info', autospec=True) def test_power_interface_validate_fails(self, parse_drv_info_mock): - side_effect = iter([exception.InvalidParameterValue("Bad input")]) + side_effect = exception.InvalidParameterValue("Bad input") parse_drv_info_mock.side_effect = side_effect with task_manager.acquire(self.context, self.node['uuid'], shared=True) as task: @@ -415,7 +415,7 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase): @mock.patch.object(seamicro, '_parse_driver_info', autospec=True) def test_vendor_passthru_validate_parse_driver_info_fail(self, mock_info): - mock_info.side_effect = iter([exception.InvalidParameterValue("bad")]) + mock_info.side_effect = exception.InvalidParameterValue("bad") with task_manager.acquire(self.context, self.node['uuid'], shared=True) as task: method = list(task.driver.vendor.vendor_routes)[0] @@ -630,8 +630,8 @@ class SeaMicroDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) def test_start_console_fail(self, mock_exec): - mock_exec.side_effect = iter( - [exception.ConsoleSubprocessFailed(error='error')]) + mock_exec.side_effect = exception.ConsoleSubprocessFailed( + error='error') with task_manager.acquire(self.context, self.node.uuid) as task: @@ -652,7 +652,7 @@ class SeaMicroDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'stop_shellinabox_console', autospec=True) def test_stop_console_fail(self, mock_stop): - mock_stop.side_effect = iter([exception.ConsoleError()]) + mock_stop.side_effect = exception.ConsoleError() with task_manager.acquire(self.context, self.node.uuid) as task: @@ -665,7 +665,7 @@ class SeaMicroDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) def test_start_console_fail_nodir(self, mock_exec): - mock_exec.side_effect = iter([exception.ConsoleError()]) + mock_exec.side_effect = exception.ConsoleError() with task_manager.acquire(self.context, self.node.uuid) as task: diff --git a/ironic/tests/unit/drivers/modules/test_ssh.py b/ironic/tests/unit/drivers/modules/test_ssh.py index 49786942a..f4e302db7 100644 --- a/ironic/tests/unit/drivers/modules/test_ssh.py +++ b/ironic/tests/unit/drivers/modules/test_ssh.py @@ -209,8 +209,7 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): @mock.patch.object(utils, 'ssh_connect', autospec=True) def test__get_connection_exception(self, ssh_connect_mock): - ssh_connect_mock.side_effect = iter( - [exception.SSHConnectFailed(host='fake')]) + ssh_connect_mock.side_effect = exception.SSHConnectFailed(host='fake') self.assertRaises(exception.SSHConnectFailed, ssh._get_connection, self.node) @@ -325,8 +324,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'], info['cmd_set']['get_node_macs']) cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName') - exec_ssh_mock.side_effect = iter([('NodeName', ''), - ('52:54:00:cf:2d:31', '')]) + exec_ssh_mock.side_effect = [('NodeName', ''), + ('52:54:00:cf:2d:31', '')] expected = [mock.call(self.sshclient, ssh_cmd), mock.call(self.sshclient, cmd_to_exec)] @@ -341,8 +340,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): self.config(group='ssh', get_vm_name_retry_interval=0) info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "22:22:22:22:22:22"] - exec_ssh_mock.side_effect = iter([('NodeName', ''), - ('52:54:00:cf:2d:31', '')] * 2) + exec_ssh_mock.side_effect = ([('NodeName', ''), + ('52:54:00:cf:2d:31', '')] * 2) ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'], info['cmd_set']['list_all']) @@ -364,10 +363,10 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): self.config(group='ssh', get_vm_name_retry_interval=0) info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "22:22:22:22:22:22"] - exec_ssh_mock.side_effect = iter([('NodeName', ''), - ('', ''), - ('NodeName', ''), - ('11:11:11:11:11:11', '')]) + exec_ssh_mock.side_effect = [('NodeName', ''), + ('', ''), + ('NodeName', ''), + ('11:11:11:11:11:11', '')] ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'], info['cmd_set']['list_all']) @@ -395,8 +394,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): info['cmd_set']['get_node_macs']) cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName') - exec_ssh_mock.side_effect = iter( - [('NodeName', ''), processutils.ProcessExecutionError]) + exec_ssh_mock.side_effect = [('NodeName', ''), + processutils.ProcessExecutionError] expected = [mock.call(self.sshclient, ssh_cmd), mock.call(self.sshclient, cmd_to_exec)] @@ -414,8 +413,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] - get_power_status_mock.side_effect = iter([states.POWER_OFF, - states.POWER_ON]) + get_power_status_mock.side_effect = [states.POWER_OFF, + states.POWER_ON] get_hosts_name_mock.return_value = "NodeName" expected = [mock.call(self.sshclient, info), mock.call(self.sshclient, info)] @@ -437,8 +436,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): exec_ssh_mock): info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] - get_power_status_mock.side_effect = iter([states.POWER_OFF, - states.POWER_OFF]) + get_power_status_mock.side_effect = ([states.POWER_OFF, + states.POWER_OFF]) get_hosts_name_mock.return_value = "NodeName" expected = [mock.call(self.sshclient, info), mock.call(self.sshclient, info)] @@ -462,8 +461,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] exec_ssh_mock.side_effect = processutils.ProcessExecutionError - get_power_status_mock.side_effect = iter([states.POWER_OFF, - states.POWER_ON]) + get_power_status_mock.side_effect = ([states.POWER_OFF, + states.POWER_ON]) get_hosts_name_mock.return_value = "NodeName" cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'], @@ -485,8 +484,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): get_power_status_mock, exec_ssh_mock): info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] - get_power_status_mock.side_effect = iter([states.POWER_ON, - states.POWER_OFF]) + get_power_status_mock.side_effect = [states.POWER_ON, + states.POWER_OFF] get_hosts_name_mock.return_value = "NodeName" expected = [mock.call(self.sshclient, info), mock.call(self.sshclient, info)] @@ -508,8 +507,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): get_power_status_mock, exec_ssh_mock): info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] - get_power_status_mock.side_effect = iter([states.POWER_ON, - states.POWER_ON]) + get_power_status_mock.side_effect = [states.POWER_ON, + states.POWER_ON] get_hosts_name_mock.return_value = "NodeName" expected = [mock.call(self.sshclient, info), mock.call(self.sshclient, info)] @@ -532,8 +531,8 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase): info = ssh._parse_driver_info(self.node) info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"] exec_ssh_mock.side_effect = processutils.ProcessExecutionError - get_power_status_mock.side_effect = iter([states.POWER_ON, - states.POWER_OFF]) + get_power_status_mock.side_effect = [states.POWER_ON, + states.POWER_OFF] get_hosts_name_mock.return_value = "NodeName" cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'], @@ -562,8 +561,7 @@ class SSHDriverTestCase(db_base.DbTestCase): @mock.patch.object(utils, 'ssh_connect', autospec=True) def test__validate_info_ssh_connect_failed(self, ssh_connect_mock): - ssh_connect_mock.side_effect = iter( - [exception.SSHConnectFailed(host='fake')]) + ssh_connect_mock.side_effect = exception.SSHConnectFailed(host='fake') with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.InvalidParameterValue, diff --git a/ironic/tests/unit/drivers/modules/ucs/test_power.py b/ironic/tests/unit/drivers/modules/ucs/test_power.py index 98a68b7ca..3c16847f4 100644 --- a/ironic/tests/unit/drivers/modules/ucs/test_power.py +++ b/ironic/tests/unit/drivers/modules/ucs/test_power.py @@ -65,7 +65,7 @@ class UcsPowerTestCase(db_base.DbTestCase): @mock.patch.object(ucs_helper, 'parse_driver_info', spec_set=True, autospec=True) def test_validate_fail(self, mock_parse_driver_info): - side_effect = iter([exception.InvalidParameterValue('Invalid Input')]) + side_effect = exception.InvalidParameterValue('Invalid Input') mock_parse_driver_info.side_effect = side_effect with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: diff --git a/ironic/tests/unit/drivers/test_irmc.py b/ironic/tests/unit/drivers/test_irmc.py index c4549d56c..cd9fbfdf8 100644 --- a/ironic/tests/unit/drivers/test_irmc.py +++ b/ironic/tests/unit/drivers/test_irmc.py @@ -61,8 +61,8 @@ class IRMCVirtualMediaIscsiTestCase(testtools.TestCase): @mock.patch.object(irmc.boot.IRMCVirtualMediaBoot, '__init__', spec_set=True, autospec=True) def test___init___share_fs_not_mounted_exception(self, __init___mock): - __init___mock.side_effect = iter( - [exception.IRMCSharedFileSystemNotMounted(share='/share')]) + __init___mock.side_effect = exception.IRMCSharedFileSystemNotMounted( + share='/share') self.assertRaises(exception.IRMCSharedFileSystemNotMounted, irmc.IRMCVirtualMediaIscsiDriver) @@ -104,8 +104,8 @@ class IRMCVirtualMediaAgentTestCase(testtools.TestCase): @mock.patch.object(irmc.boot.IRMCVirtualMediaBoot, '__init__', spec_set=True, autospec=True) def test___init___share_fs_not_mounted_exception(self, __init___mock): - __init___mock.side_effect = iter([ - exception.IRMCSharedFileSystemNotMounted(share='/share')]) + __init___mock.side_effect = exception.IRMCSharedFileSystemNotMounted( + share='/share') self.assertRaises(exception.IRMCSharedFileSystemNotMounted, irmc.IRMCVirtualMediaAgentDriver) diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py index 8d972d180..34690e8a6 100644 --- a/ironic/tests/unit/objects/test_node.py +++ b/ironic/tests/unit/objects/test_node.py @@ -127,8 +127,7 @@ class TestNodeObject(base.DbTestCase): with mock.patch.object(self.dbapi, 'reserve_node', autospec=True) as mock_reserve: node_id = 'non-existent' - mock_reserve.side_effect = iter( - [exception.NodeNotFound(node=node_id)]) + mock_reserve.side_effect = exception.NodeNotFound(node=node_id) self.assertRaises(exception.NodeNotFound, objects.Node.reserve, self.context, 'fake-tag', node_id) @@ -145,8 +144,7 @@ class TestNodeObject(base.DbTestCase): with mock.patch.object(self.dbapi, 'release_node', autospec=True) as mock_release: node_id = 'non-existent' - mock_release.side_effect = iter( - [exception.NodeNotFound(node=node_id)]) + mock_release.side_effect = exception.NodeNotFound(node=node_id) self.assertRaises(exception.NodeNotFound, objects.Node.release, self.context, 'fake-tag', node_id) From 790fcef4d131866cfa4f05600d538392ee7d2b1e Mon Sep 17 00:00:00 2001 From: zhufl Date: Thu, 7 Jul 2016 14:11:01 +0800 Subject: [PATCH 045/166] Remove unused LOG This is to remove unused LOG to keep code clean. Change-Id: I1532f36edd97a2c01004b17f878f292d851469a9 --- ironic/common/neutron.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index dcd75e6e0..99d73043e 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -12,14 +12,10 @@ from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg -from oslo_log import log from ironic.common.i18n import _ from ironic.common import keystone - -LOG = log.getLogger(__name__) - CONF = cfg.CONF CONF.import_opt('my_ip', 'ironic.netconf') From e5c23a2b9c6e026c318a249880b92629f6cf13da Mon Sep 17 00:00:00 2001 From: Miles Gould Date: Thu, 7 Jul 2016 13:02:09 +0100 Subject: [PATCH 046/166] Fix two types in ironic.conf.sample I ran `tox -e genconfig` with a re-built virtualenv, and this was the result. Change-Id: I54b2376411524f5b9758a771b76fd735013f69f1 --- etc/ironic/ironic.conf.sample | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 7f62ff60b..1ff5b0974 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1410,12 +1410,12 @@ # (list value) #hash_algorithms = md5 -# Authentication type to load (unknown value) +# Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options -# (unknown value) +# (string value) #auth_section = From 8458f9bccffd6ae33693986e28afa5c5dc5f977b Mon Sep 17 00:00:00 2001 From: Mario Villaplana Date: Tue, 5 Jul 2016 21:09:03 +0000 Subject: [PATCH 047/166] Document API max_limit configuration option The max_limit option in the [api] section of ironic.conf is not obviously pointed out in API documentation. This may mislead a user into believing that using the "limit" parameter when listing resources from the API may return more resources than the API configuration actually allows. This change adds documentation regarding how the max_limit option affects API usage. Change-Id: Ic55b31758b144aa8ef8b048f3dd90a4ffeee6fa4 Closes-Bug: 1597834 --- api-ref/source/parameters.yaml | 12 +++++++----- ironic/api/controllers/v1/chassis.py | 6 ++++++ ironic/api/controllers/v1/node.py | 6 ++++++ ironic/api/controllers/v1/port.py | 6 ++++++ 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 20c9192ac..dde007698 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -86,11 +86,13 @@ fields: type: array limit: description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. + Requests a page size of items. Returns a number of items up to a limit + value. Use the ``limit`` parameter to make an initial limited request and + use the ID of the last-seen item from the response as the ``marker`` + parameter value in a subsequent limited request. This value cannot be + larger than the ``max_limit`` option in the ``[api]`` section of the + configuration. If it is higher than ``max_limit``, only ``max-limit`` + resources will be returned. in: query required: false type: integer diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py index 4c5ffe5e3..090f684ff 100644 --- a/ironic/api/controllers/v1/chassis.py +++ b/ironic/api/controllers/v1/chassis.py @@ -198,6 +198,9 @@ class ChassisController(rest.RestController): :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields @@ -216,6 +219,9 @@ class ChassisController(rest.RestController): :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 0be3bb624..dcb4846f1 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -1119,6 +1119,9 @@ class NodesController(rest.RestController): that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that @@ -1159,6 +1162,9 @@ class NodesController(rest.RestController): that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index e8f3abc14..baedd156e 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -264,6 +264,9 @@ class PortsController(rest.RestController): this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields @@ -303,6 +306,9 @@ class PortsController(rest.RestController): this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ From dbdd01bda219285fd5a9a564eb7e136157413188 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 12 Apr 2016 12:25:50 -0700 Subject: [PATCH 048/166] Centralize config options - [glance] Nova style refactor of config options in Ironic. Change-Id: If91ab834f32f571e9c669ec5888bfc8de03bb2e0 Partial-Bug: #1561100 --- .../common/glance_service/v2/image_service.py | 97 +----------- ironic/common/image_service.py | 46 +----- ironic/conf/__init__.py | 2 + ironic/conf/glance.py | 147 ++++++++++++++++++ ironic/conf/opts.py | 6 +- 5 files changed, 154 insertions(+), 144 deletions(-) create mode 100644 ironic/conf/glance.py diff --git a/ironic/common/glance_service/v2/image_service.py b/ironic/common/glance_service/v2/image_service.py index 95c0f2af7..02c41de87 100644 --- a/ironic/common/glance_service/v2/image_service.py +++ b/ironic/common/glance_service/v2/image_service.py @@ -16,7 +16,6 @@ import collections import time -from oslo_config import cfg from oslo_utils import uuidutils import six from six.moves.urllib import parse as urlparse @@ -27,101 +26,7 @@ from ironic.common.glance_service import base_image_service from ironic.common.glance_service import service from ironic.common.glance_service import service_utils from ironic.common.i18n import _ - - -glance_opts = [ - cfg.ListOpt('allowed_direct_url_schemes', - default=[], - help=_('A list of URL schemes that can be downloaded directly ' - 'via the direct_url. Currently supported schemes: ' - '[file].')), - # To upload this key to Swift: - # swift post -m Temp-Url-Key:secretkey - # When using radosgw, temp url key could be uploaded via the above swift - # command, or with: - # radosgw-admin user modify --uid=user --temp-url-key=secretkey - cfg.StrOpt('swift_temp_url_key', - help=_('The secret token given to Swift to allow temporary URL ' - 'downloads. Required for temporary URLs.'), - secret=True), - cfg.IntOpt('swift_temp_url_duration', - default=1200, - help=_('The length of time in seconds that the temporary URL ' - 'will be valid for. Defaults to 20 minutes. If some ' - 'deploys get a 401 response code when trying to ' - 'download from the temporary URL, try raising this ' - 'duration. This value must be greater than or equal to ' - 'the value for ' - 'swift_temp_url_expected_download_start_delay')), - cfg.BoolOpt('swift_temp_url_cache_enabled', - default=False, - help=_('Whether to cache generated Swift temporary URLs. ' - 'Setting it to true is only useful when an image ' - 'caching proxy is used. Defaults to False.')), - cfg.IntOpt('swift_temp_url_expected_download_start_delay', - default=0, min=0, - help=_('This is the delay (in seconds) from the time of the ' - 'deploy request (when the Swift temporary URL is ' - 'generated) to when the IPA ramdisk starts up and URL ' - 'is used for the image download. This value is used to ' - 'check if the Swift temporary URL duration is large ' - 'enough to let the image download begin. Also if ' - 'temporary URL caching is enabled this will determine ' - 'if a cached entry will still be valid when the ' - 'download starts. swift_temp_url_duration value must be ' - 'greater than or equal to this option\'s value. ' - 'Defaults to 0.')), - cfg.StrOpt( - 'swift_endpoint_url', - help=_('The "endpoint" (scheme, hostname, optional port) for ' - 'the Swift URL of the form ' - '"endpoint_url/api_version/[account/]container/object_id". ' - 'Do not include trailing "/". ' - 'For example, use "https://swift.example.com". If using RADOS ' - 'Gateway, endpoint may also contain /swift path; if it does ' - 'not, it will be appended. Required for temporary URLs.')), - cfg.StrOpt( - 'swift_api_version', - default='v1', - help=_('The Swift API version to create a temporary URL for. ' - 'Defaults to "v1". Swift temporary URL format: ' - '"endpoint_url/api_version/[account/]container/object_id"')), - cfg.StrOpt( - 'swift_account', - help=_('The account that Glance uses to communicate with ' - 'Swift. The format is "AUTH_uuid". "uuid" is the ' - 'UUID for the account configured in the glance-api.conf. ' - 'Required for temporary URLs when Glance backend is Swift. ' - 'For example: "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". ' - 'Swift temporary URL format: ' - '"endpoint_url/api_version/[account/]container/object_id"')), - cfg.StrOpt( - 'swift_container', - default='glance', - help=_('The Swift container Glance is configured to store its ' - 'images in. Defaults to "glance", which is the default ' - 'in glance-api.conf. ' - 'Swift temporary URL format: ' - '"endpoint_url/api_version/[account/]container/object_id"')), - cfg.IntOpt('swift_store_multiple_containers_seed', - default=0, - help=_('This should match a config by the same name in the ' - 'Glance configuration file. When set to 0, a ' - 'single-tenant store will only use one ' - 'container to store all images. When set to an integer ' - 'value between 1 and 32, a single-tenant store will use ' - 'multiple containers to store images, and this value ' - 'will determine how many containers are created.')), - cfg.StrOpt('temp_url_endpoint_type', - default='swift', - choices=['swift', 'radosgw'], - help=_('Type of endpoint to use for temporary URLs. If the ' - 'Glance backend is Swift, use "swift"; if it is CEPH ' - 'with RADOS gateway, use "radosgw".')), -] - -CONF = cfg.CONF -CONF.register_opts(glance_opts, group='glance') +from ironic.conf import CONF TempUrlCacheElement = collections.namedtuple('TempUrlCacheElement', ['url', 'url_expires_at']) diff --git a/ironic/common/image_service.py b/ironic/common/image_service.py index 434e24fe0..4e219715e 100644 --- a/ironic/common/image_service.py +++ b/ironic/common/image_service.py @@ -20,7 +20,6 @@ import datetime import os import shutil -from oslo_config import cfg from oslo_utils import importutils import requests import sendfile @@ -32,53 +31,14 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common import keystone from ironic.common import utils - +from ironic.conf import CONF IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb - -CONF = cfg.CONF -# Import this opt early so that it is available when registering -# glance_opts below. +# TODO(rama_y): This import should be removed, +# once https://review.openstack.org/#/c/309070 is merged. CONF.import_opt('my_ip', 'ironic.netconf') -glance_opts = [ - cfg.StrOpt('glance_host', - default='$my_ip', - help=_('Default glance hostname or IP address.')), - cfg.PortOpt('glance_port', - default=9292, - help=_('Default glance port.')), - cfg.StrOpt('glance_protocol', - default='http', - choices=['http', 'https'], - help=_('Default protocol to use when connecting to glance. ' - 'Set to https for SSL.')), - cfg.ListOpt('glance_api_servers', - help=_('A list of the glance api servers available to ironic. ' - 'Prefix with https:// for SSL-based glance API ' - 'servers. Format is [hostname|IP]:port.')), - cfg.BoolOpt('glance_api_insecure', - default=False, - help=_('Allow to perform insecure SSL (https) requests to ' - 'glance.')), - cfg.IntOpt('glance_num_retries', - default=0, - help=_('Number of retries when downloading an image from ' - 'glance.')), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=['keystone', 'noauth'], - help=_('Authentication strategy to use when connecting to ' - 'glance.')), - cfg.StrOpt('glance_cafile', - help=_('Optional path to a CA certificate bundle to be used to ' - 'validate the SSL certificate served by glance. It is ' - 'used when glance_api_insecure is set to False.')), -] - -CONF.register_opts(glance_opts, group='glance') - def import_versioned_module(version, submodule=None): module = 'ironic.common.glance_service.v%s' % version diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index a8a57d11e..baff22891 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -21,6 +21,7 @@ from ironic.conf import conductor from ironic.conf import console from ironic.conf import database from ironic.conf import dhcp +from ironic.conf import glance from ironic.conf import iboot from ironic.conf import ilo from ironic.conf import inspector @@ -42,6 +43,7 @@ conductor.register_opts(CONF) console.register_opts(CONF) database.register_opts(CONF) dhcp.register_opts(CONF) +glance.register_opts(CONF) iboot.register_opts(CONF) ilo.register_opts(CONF) inspector.register_opts(CONF) diff --git a/ironic/conf/glance.py b/ironic/conf/glance.py new file mode 100644 index 000000000..a6312de4a --- /dev/null +++ b/ironic/conf/glance.py @@ -0,0 +1,147 @@ +# Copyright 2016 Intel Corporation +# Copyright 2010 OpenStack Foundation +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.ListOpt('allowed_direct_url_schemes', + default=[], + help=_('A list of URL schemes that can be downloaded directly ' + 'via the direct_url. Currently supported schemes: ' + '[file].')), + # To upload this key to Swift: + # swift post -m Temp-Url-Key:secretkey + # When using radosgw, temp url key could be uploaded via the above swift + # command, or with: + # radosgw-admin user modify --uid=user --temp-url-key=secretkey + cfg.StrOpt('swift_temp_url_key', + help=_('The secret token given to Swift to allow temporary URL ' + 'downloads. Required for temporary URLs.'), + secret=True), + cfg.IntOpt('swift_temp_url_duration', + default=1200, + help=_('The length of time in seconds that the temporary URL ' + 'will be valid for. Defaults to 20 minutes. If some ' + 'deploys get a 401 response code when trying to ' + 'download from the temporary URL, try raising this ' + 'duration. This value must be greater than or equal to ' + 'the value for ' + 'swift_temp_url_expected_download_start_delay')), + cfg.BoolOpt('swift_temp_url_cache_enabled', + default=False, + help=_('Whether to cache generated Swift temporary URLs. ' + 'Setting it to true is only useful when an image ' + 'caching proxy is used. Defaults to False.')), + cfg.IntOpt('swift_temp_url_expected_download_start_delay', + default=0, min=0, + help=_('This is the delay (in seconds) from the time of the ' + 'deploy request (when the Swift temporary URL is ' + 'generated) to when the IPA ramdisk starts up and URL ' + 'is used for the image download. This value is used to ' + 'check if the Swift temporary URL duration is large ' + 'enough to let the image download begin. Also if ' + 'temporary URL caching is enabled this will determine ' + 'if a cached entry will still be valid when the ' + 'download starts. swift_temp_url_duration value must be ' + 'greater than or equal to this option\'s value. ' + 'Defaults to 0.')), + cfg.StrOpt( + 'swift_endpoint_url', + help=_('The "endpoint" (scheme, hostname, optional port) for ' + 'the Swift URL of the form ' + '"endpoint_url/api_version/[account/]container/object_id". ' + 'Do not include trailing "/". ' + 'For example, use "https://swift.example.com". If using RADOS ' + 'Gateway, endpoint may also contain /swift path; if it does ' + 'not, it will be appended. Required for temporary URLs.')), + cfg.StrOpt( + 'swift_api_version', + default='v1', + help=_('The Swift API version to create a temporary URL for. ' + 'Defaults to "v1". Swift temporary URL format: ' + '"endpoint_url/api_version/[account/]container/object_id"')), + cfg.StrOpt( + 'swift_account', + help=_('The account that Glance uses to communicate with ' + 'Swift. The format is "AUTH_uuid". "uuid" is the ' + 'UUID for the account configured in the glance-api.conf. ' + 'Required for temporary URLs when Glance backend is Swift. ' + 'For example: "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". ' + 'Swift temporary URL format: ' + '"endpoint_url/api_version/[account/]container/object_id"')), + cfg.StrOpt( + 'swift_container', + default='glance', + help=_('The Swift container Glance is configured to store its ' + 'images in. Defaults to "glance", which is the default ' + 'in glance-api.conf. ' + 'Swift temporary URL format: ' + '"endpoint_url/api_version/[account/]container/object_id"')), + cfg.IntOpt('swift_store_multiple_containers_seed', + default=0, + help=_('This should match a config by the same name in the ' + 'Glance configuration file. When set to 0, a ' + 'single-tenant store will only use one ' + 'container to store all images. When set to an integer ' + 'value between 1 and 32, a single-tenant store will use ' + 'multiple containers to store images, and this value ' + 'will determine how many containers are created.')), + cfg.StrOpt('temp_url_endpoint_type', + default='swift', + choices=['swift', 'radosgw'], + help=_('Type of endpoint to use for temporary URLs. If the ' + 'Glance backend is Swift, use "swift"; if it is CEPH ' + 'with RADOS gateway, use "radosgw".')), + cfg.StrOpt('glance_host', + default='$my_ip', + help=_('Default glance hostname or IP address.')), + cfg.PortOpt('glance_port', + default=9292, + help=_('Default glance port.')), + cfg.StrOpt('glance_protocol', + default='http', + choices=['http', 'https'], + help=_('Default protocol to use when connecting to glance. ' + 'Set to https for SSL.')), + cfg.ListOpt('glance_api_servers', + help=_('A list of the glance api servers available to ironic. ' + 'Prefix with https:// for SSL-based glance API ' + 'servers. Format is [hostname|IP]:port.')), + cfg.BoolOpt('glance_api_insecure', + default=False, + help=_('Allow to perform insecure SSL (https) requests to ' + 'glance.')), + cfg.IntOpt('glance_num_retries', + default=0, + help=_('Number of retries when downloading an image from ' + 'glance.')), + cfg.StrOpt('auth_strategy', + default='keystone', + choices=['keystone', 'noauth'], + help=_('Authentication strategy to use when connecting to ' + 'glance.')), + cfg.StrOpt('glance_cafile', + help=_('Optional path to a CA certificate bundle to be used to ' + 'validate the SSL certificate served by glance. It is ' + 'used when glance_api_insecure is set to False.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='glance') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 10755b30a..604db652b 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -16,9 +16,7 @@ import ironic.api import ironic.api.app import ironic.common.driver_factory import ironic.common.exception -import ironic.common.glance_service.v2.image_service import ironic.common.hash_ring -import ironic.common.image_service import ironic.common.images import ironic.common.neutron import ironic.common.paths @@ -65,9 +63,7 @@ _opts = [ ('database', ironic.conf.database.opts), ('deploy', ironic.drivers.modules.deploy_utils.deploy_opts), ('dhcp', ironic.conf.dhcp.opts), - ('glance', itertools.chain( - ironic.common.glance_service.v2.image_service.glance_opts, - ironic.common.image_service.glance_opts)), + ('glance', ironic.conf.glance.opts), ('iboot', ironic.conf.iboot.opts), ('ilo', ironic.conf.ilo.opts), ('inspector', ironic.conf.inspector.opts), From 0a5bb693efe6a5b417c5ba9a912292b498438f84 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Wed, 6 Jul 2016 20:32:15 +0300 Subject: [PATCH 049/166] Add internal_info field to ports and portgroups In case of ports, it is also added to the API, as a readonly field. It will be used for any port-specific internal information ironic needs to store inside the port object. In this change we start using it to store UUIDs of the cleaning ports that ironic creates, instead of fiddling with port.extra['vif_port_id'], as extra is intended for operator use only. Partial-bug: #1526403 Change-Id: Ib62c3e32305619d0c55f8ec7e45b067f0f0b32d4 --- doc/source/webapi/v1.rst | 5 +++ ironic/api/controllers/v1/port.py | 19 +++++++++- ironic/api/controllers/v1/utils.py | 9 +++++ ironic/api/controllers/v1/versions.py | 4 ++- ironic/common/network.py | 8 +++-- ...d4481e_add_port_portgroup_internal_info.py | 35 ++++++++++++++++++ ironic/db/sqlalchemy/models.py | 2 ++ ironic/dhcp/neutron.py | 5 ++- ironic/drivers/modules/deploy_utils.py | 26 +++++++++----- ironic/objects/port.py | 6 ++-- ironic/objects/portgroup.py | 4 ++- ironic/tests/unit/api/v1/test_ports.py | 36 ++++++++++++++++--- ironic/tests/unit/api/v1/test_utils.py | 7 ++++ ironic/tests/unit/common/test_network.py | 13 +++++++ .../unit/db/sqlalchemy/test_migrations.py | 12 +++++++ ironic/tests/unit/db/utils.py | 2 ++ ironic/tests/unit/dhcp/test_neutron.py | 16 +++++++++ .../unit/drivers/modules/test_deploy_utils.py | 21 ++++++++--- ironic/tests/unit/objects/test_objects.py | 4 +-- ...d-port-internal-info-b7e02889416570f7.yaml | 6 ++++ 20 files changed, 213 insertions(+), 27 deletions(-) create mode 100644 ironic/db/sqlalchemy/alembic/versions/10b163d4481e_add_port_portgroup_internal_info.py create mode 100644 releasenotes/notes/add-port-internal-info-b7e02889416570f7.yaml diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index bf17d4508..df79bb1da 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -32,6 +32,11 @@ always requests the newest supported API version. API Versions History -------------------- +**1.18** + + Add ``internal_info`` readonly field to the port object, that will be used + by ironic to store internal port-related information. + **1.17** Addition of provision_state verb ``adopt`` which allows an operator diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index e8f3abc14..ab0166b77 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -36,6 +36,12 @@ from ironic import objects _DEFAULT_RETURN_FIELDS = ('uuid', 'address') +def hide_fields_in_newer_versions(obj): + # if requested version is < 1.18, hide internal_info field + if not api_utils.allow_port_internal_info(): + obj.internal_info = wsme.Unset + + class Port(base.APIBase): """API representation of a port. @@ -77,6 +83,9 @@ class Port(base.APIBase): extra = {wtypes.text: types.jsontype} """This port's meta data""" + internal_info = wsme.wsattr({wtypes.text: types.jsontype}, readonly=True) + """This port's internal information maintained by ironic""" + node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid, mandatory=True) """The UUID of the node this port belongs to""" @@ -130,6 +139,8 @@ class Port(base.APIBase): if fields is not None: api_utils.check_for_invalid_fields(fields, port.as_dict()) + hide_fields_in_newer_versions(port) + return cls._convert_with_links(port, pecan.request.public_url, fields=fields) @@ -138,6 +149,7 @@ class Port(base.APIBase): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', address='fe:54:00:77:07:d9', extra={'foo': 'bar'}, + internal_info={}, created_at=datetime.datetime.utcnow(), updated_at=datetime.datetime.utcnow()) # NOTE(lucasagomes): node_uuid getter() method look at the @@ -151,6 +163,11 @@ class Port(base.APIBase): class PortPatchType(types.JsonPatchType): _api_base = Port + @staticmethod + def internal_attrs(): + defaults = types.JsonPatchType.internal_attrs() + return defaults + ['/internal_info'] + class PortCollection(collection.Collection): """API representation of a collection of ports.""" @@ -187,7 +204,7 @@ class PortsController(rest.RestController): 'detail': ['GET'], } - invalid_sort_key_list = ['extra'] + invalid_sort_key_list = ['extra', 'internal_info'] def _get_ports_collection(self, node_ident, address, marker, limit, sort_key, sort_dir, resource_url=None, diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py index 027fd69e4..d85baa7fb 100644 --- a/ironic/api/controllers/v1/utils.py +++ b/ironic/api/controllers/v1/utils.py @@ -290,6 +290,15 @@ def allow_links_node_states_and_driver_properties(): versions.MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES) +def allow_port_internal_info(): + """Check if accessing internal_info is allowed for the port. + + Version 1.18 of the API exposes internal_info readonly field for the port. + """ + return (pecan.request.version.minor >= + versions.MINOR_18_PORT_INTERNAL_INFO) + + def get_controller_reserved_names(cls): """Get reserved names for a given controller. diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py index 16d985946..8f86def24 100644 --- a/ironic/api/controllers/v1/versions.py +++ b/ironic/api/controllers/v1/versions.py @@ -47,6 +47,7 @@ BASE_VERSION = 1 # v1.15: Add ability to do manual cleaning of nodes # v1.16: Add ability to filter nodes by driver. # v1.17: Add 'adopt' verb for ADOPTING active nodes. +# v1.18: Add port.internal_info. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -66,11 +67,12 @@ MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES = 14 MINOR_15_MANUAL_CLEAN = 15 MINOR_16_DRIVER_FILTER = 16 MINOR_17_ADOPT_VERB = 17 +MINOR_18_PORT_INTERNAL_INFO = 18 # When adding another version, update MINOR_MAX_VERSION and also update # doc/source/webapi/v1.rst with a detailed explanation of what the version has # changed. -MINOR_MAX_VERSION = MINOR_17_ADOPT_VERB +MINOR_MAX_VERSION = MINOR_18_PORT_INTERNAL_INFO # String representations of the minor and maximum versions MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff --git a/ironic/common/network.py b/ironic/common/network.py index 7d8b3eb56..301384bf9 100644 --- a/ironic/common/network.py +++ b/ironic/common/network.py @@ -32,12 +32,16 @@ def get_node_vif_ids(task): portgroup_vifs = {} port_vifs = {} for portgroup in task.portgroups: - vif = portgroup.extra.get('vif_port_id') + # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist + # when we're in deployment/tenant network + vif = (portgroup.internal_info.get('cleaning_vif_port_id') or + portgroup.extra.get('vif_port_id')) if vif: portgroup_vifs[portgroup.uuid] = vif vifs['portgroups'] = portgroup_vifs for port in task.ports: - vif = port.extra.get('vif_port_id') + vif = (port.internal_info.get('cleaning_vif_port_id') or + port.extra.get('vif_port_id')) if vif: port_vifs[port.uuid] = vif vifs['ports'] = port_vifs diff --git a/ironic/db/sqlalchemy/alembic/versions/10b163d4481e_add_port_portgroup_internal_info.py b/ironic/db/sqlalchemy/alembic/versions/10b163d4481e_add_port_portgroup_internal_info.py new file mode 100644 index 000000000..9dd7bfa2e --- /dev/null +++ b/ironic/db/sqlalchemy/alembic/versions/10b163d4481e_add_port_portgroup_internal_info.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add port portgroup internal info + +Revision ID: 10b163d4481e +Revises: e294876e8028 +Create Date: 2016-07-06 17:43:55.846837 + +""" + +# revision identifiers, used by Alembic. +revision = '10b163d4481e' +down_revision = 'e294876e8028' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('ports', sa.Column('internal_info', + sa.Text(), + nullable=True)) + op.add_column('portgroups', sa.Column('internal_info', + sa.Text(), + nullable=True)) diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py index dddf23bea..911c01f9c 100644 --- a/ironic/db/sqlalchemy/models.py +++ b/ironic/db/sqlalchemy/models.py @@ -162,6 +162,7 @@ class Port(Base): local_link_connection = Column(db_types.JsonEncodedDict) portgroup_id = Column(Integer, ForeignKey('portgroups.id'), nullable=True) pxe_enabled = Column(Boolean, default=True) + internal_info = Column(db_types.JsonEncodedDict) class Portgroup(Base): @@ -179,6 +180,7 @@ class Portgroup(Base): node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True) address = Column(String(18)) extra = Column(db_types.JsonEncodedDict) + internal_info = Column(db_types.JsonEncodedDict) class NodeTag(Base): diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 25f833ff0..15935a491 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -205,7 +205,10 @@ class NeutronDHCPApi(base.BaseDHCP): :raises: InvalidIPv4Address """ - vif = p_obj.extra.get('vif_port_id') + # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist + # when we're in deployment/tenant network + vif = (p_obj.internal_info.get('cleaning_vif_port_id') or + p_obj.extra.get('vif_port_id')) if not vif: obj_name = 'portgroup' if isinstance(p_obj, objects.Port): diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 38a16c2f8..b37bec11a 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -519,7 +519,8 @@ def get_single_nic_with_vif_port_id(task): None if it cannot find any port with vif id. """ for port in task.ports: - if port.extra.get('vif_port_id'): + if (port.internal_info.get('cleaning_vif_port_id') or + port.extra.get('vif_port_id')): return port.address @@ -914,7 +915,7 @@ def prepare_cleaning_ports(task): This method deletes the cleaning ports currently existing for all the ports of the node and then creates a new one - for each one of them. It also adds 'vif_port_id' to port.extra + for each one of them. It also adds 'cleaning_vif_port_id' to internal_info of each Ironic port, after creating the cleaning ports. :param task: a TaskManager object containing the node @@ -932,12 +933,12 @@ def prepare_cleaning_ports(task): # Allow to raise if it fails, is caught and handled in conductor ports = provider.provider.create_cleaning_ports(task) - # Add vif_port_id for each of the ports because some boot + # Add cleaning_vif_port_id for each of the ports because some boot # interfaces expects these to prepare for booting ramdisk. for port in task.ports: - extra_dict = port.extra + internal_info = port.internal_info try: - extra_dict['vif_port_id'] = ports[port.uuid] + internal_info['cleaning_vif_port_id'] = ports[port.uuid] except KeyError: # This is an internal error in Ironic. All DHCP providers # implementing create_cleaning_ports are supposed to @@ -948,7 +949,7 @@ def prepare_cleaning_ports(task): raise exception.NodeCleaningFailure( node=task.node.uuid, reason=error) else: - port.extra = extra_dict + port.internal_info = internal_info port.save() @@ -969,9 +970,18 @@ def tear_down_cleaning_ports(task): provider.provider.delete_cleaning_ports(task) for port in task.ports: - if 'vif_port_id' in port.extra: + if 'cleaning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['cleaning_vif_port_id'] + port.internal_info = internal_info + port.save() + elif 'vif_port_id' in port.extra: + # TODO(vdrok): This piece is left for backwards compatibility, + # if ironic was upgraded during cleaning, vif_port_id + # containing cleaning neutron port UUID should be cleared, + # remove in Ocata extra_dict = port.extra - extra_dict.pop('vif_port_id', None) + del extra_dict['vif_port_id'] port.extra = extra_dict port.save() diff --git a/ironic/objects/port.py b/ironic/objects/port.py index 7a0ede808..f8b49684c 100644 --- a/ironic/objects/port.py +++ b/ironic/objects/port.py @@ -34,7 +34,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.4: Add list_by_node_id() # Version 1.5: Add list_by_portgroup_id() and new fields # local_link_connection, portgroup_id and pxe_enabled - VERSION = '1.5' + # Version 1.6: Add internal_info field + VERSION = '1.6' dbapi = dbapi.get_instance() @@ -47,7 +48,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat): 'local_link_connection': object_fields.FlexibleDictField( nullable=True), 'portgroup_id': object_fields.IntegerField(nullable=True), - 'pxe_enabled': object_fields.BooleanField() + 'pxe_enabled': object_fields.BooleanField(), + 'internal_info': object_fields.FlexibleDictField(nullable=True), } @staticmethod diff --git a/ironic/objects/portgroup.py b/ironic/objects/portgroup.py index 388ae2973..a3b5c6b90 100644 --- a/ironic/objects/portgroup.py +++ b/ironic/objects/portgroup.py @@ -27,7 +27,8 @@ from ironic.objects import fields as object_fields @base.IronicObjectRegistry.register class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.0: Initial version - VERSION = '1.0' + # Version 1.1: Add internal_info field + VERSION = '1.1' dbapi = dbapi.get_instance() @@ -38,6 +39,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat): 'node_id': object_fields.IntegerField(nullable=True), 'address': object_fields.MACAddressField(nullable=True), 'extra': object_fields.FlexibleDictField(nullable=True), + 'internal_info': object_fields.FlexibleDictField(nullable=True), } @staticmethod diff --git a/ironic/tests/unit/api/v1/test_ports.py b/ironic/tests/unit/api/v1/test_ports.py index 2427a16ec..e3ab27a77 100644 --- a/ironic/tests/unit/api/v1/test_ports.py +++ b/ironic/tests/unit/api/v1/test_ports.py @@ -95,6 +95,18 @@ class TestListPorts(test_api_base.BaseApiTest): # We always append "links" self.assertItemsEqual(['address', 'extra', 'links'], data) + def test_hide_fields_in_newer_versions_internal_info(self): + port = obj_utils.create_test_port(self.context, node_id=self.node.id, + internal_info={"foo": "bar"}) + data = self.get_json( + '/ports/%s' % port.uuid, + headers={api_base.Version.string: str(api_v1.MIN_VER)}) + self.assertNotIn('internal_info', data) + + data = self.get_json('/ports/%s' % port.uuid, + headers={api_base.Version.string: "1.18"}) + self.assertEqual({"foo": "bar"}, data['internal_info']) + def test_get_collection_custom_fields(self): fields = 'uuid,extra' for i in range(3): @@ -133,10 +145,14 @@ class TestListPorts(test_api_base.BaseApiTest): self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_detail(self): - port = obj_utils.create_test_port(self.context, node_id=self.node.id) - data = self.get_json('/ports/detail') + port = obj_utils.create_test_port(self.context, node_id=self.node.id,) + data = self.get_json( + '/ports/detail', + headers={api_base.Version.string: str(api_v1.MAX_VER)} + ) self.assertEqual(port.uuid, data['ports'][0]["uuid"]) self.assertIn('extra', data['ports'][0]) + self.assertIn('internal_info', data['ports'][0]) self.assertIn('node_uuid', data['ports'][0]) # never expose the node_id self.assertNotIn('node_id', data['ports'][0]) @@ -261,10 +277,12 @@ class TestListPorts(test_api_base.BaseApiTest): self.assertEqual(sorted(ports), uuids) def test_sort_key_invalid(self): - invalid_keys_list = ['foo', 'extra'] + invalid_keys_list = ['foo', 'extra', 'internal_info'] for invalid_key in invalid_keys_list: - response = self.get_json('/ports?sort_key=%s' % invalid_key, - expect_errors=True) + response = self.get_json( + '/ports?sort_key=%s' % invalid_key, expect_errors=True, + headers={api_base.Version.string: str(api_v1.MAX_VER)} + ) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn(invalid_key, response.json['error_message']) @@ -771,6 +789,14 @@ class TestPost(test_api_base.BaseApiTest): self.assertTrue(error_msg) self.assertIn(address, error_msg.upper()) + def test_create_port_with_internal_field(self): + pdict = post_get_test_port() + pdict['internal_info'] = {'a': 'b'} + response = self.post_json('/ports', pdict, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + @mock.patch.object(rpcapi.ConductorAPI, 'destroy_port') class TestDelete(test_api_base.BaseApiTest): diff --git a/ironic/tests/unit/api/v1/test_utils.py b/ironic/tests/unit/api/v1/test_utils.py index 7c01dba22..1a897717b 100644 --- a/ironic/tests/unit/api/v1/test_utils.py +++ b/ironic/tests/unit/api/v1/test_utils.py @@ -193,6 +193,13 @@ class TestApiUtils(base.TestCase): mock_request.version.minor = 17 utils.check_allow_management_verbs('adopt') + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_port_internal_info(self, mock_request): + mock_request.version.minor = 18 + self.assertTrue(utils.allow_port_internal_info()) + mock_request.version.minor = 17 + self.assertFalse(utils.allow_port_internal_info()) + class TestNodeIdent(base.TestCase): diff --git a/ironic/tests/unit/common/test_network.py b/ironic/tests/unit/common/test_network.py index 8a318f2b8..e2240a993 100644 --- a/ironic/tests/unit/common/test_network.py +++ b/ironic/tests/unit/common/test_network.py @@ -94,3 +94,16 @@ class TestNetwork(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid) as task: result = network.get_node_vif_ids(task) self.assertEqual(expected, result) + + def test_get_node_vif_ids_during_cleaning(self): + port = db_utils.create_test_port( + node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', + internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + portgroup = db_utils.create_test_portgroup( + node_id=self.node.id, address='dd:ee:ff:aa:bb:cc', + internal_info={'cleaning_vif_port_id': 'test-vif-B'}) + expected = {'portgroups': {portgroup.uuid: 'test-vif-B'}, + 'ports': {port.uuid: 'test-vif-A'}} + with task_manager.acquire(self.context, self.node.uuid) as task: + result = network.get_node_vif_ids(task) + self.assertEqual(expected, result) diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py index fbaec663b..2785d771e 100644 --- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py +++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py @@ -421,6 +421,18 @@ class MigrationCheckersMixin(object): self.assertIsInstance(nodes.c.network_interface.type, sqlalchemy.types.String) + def _check_10b163d4481e(self, engine, data): + ports = db_utils.get_table(engine, 'ports') + portgroups = db_utils.get_table(engine, 'portgroups') + port_col_names = [column.name for column in ports.c] + portgroup_col_names = [column.name for column in portgroups.c] + self.assertIn('internal_info', port_col_names) + self.assertIn('internal_info', portgroup_col_names) + self.assertIsInstance(ports.c.internal_info.type, + sqlalchemy.types.TEXT) + self.assertIsInstance(portgroups.c.internal_info.type, + sqlalchemy.types.TEXT) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index 5736f2e59..2e2285180 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -266,6 +266,7 @@ def get_test_port(**kw): 'switch_info': 'switch1'}), 'portgroup_id': kw.get('portgroup_id'), 'pxe_enabled': kw.get('pxe_enabled', True), + 'internal_info': kw.get('internal_info', {"bar": "buzz"}), } @@ -369,6 +370,7 @@ def get_test_portgroup(**kw): 'extra': kw.get('extra', {}), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), + 'internal_info': kw.get('internal_info', {"bar": "buzz"}), } diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index 658d33497..df576022f 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -341,6 +341,22 @@ class TestNeutron(db_base.DbTestCase): self.assertEqual(expected, result) mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) + @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') + def test__get_port_ip_address_cleaning(self, mock_gfia): + expected = "192.168.1.3" + port = object_utils.create_test_port( + self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', + uuid=uuidutils.generate_uuid(), + internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + mock_gfia.return_value = expected + with task_manager.acquire(self.context, + self.node.uuid) as task: + api = dhcp_factory.DHCPFactory().provider + result = api._get_port_ip_address(task, port, + mock.sentinel.client) + self.assertEqual(expected, result) + mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) + @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') def test__get_port_ip_address_for_portgroup(self, mock_gfia): expected = "192.168.1.3" diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index b64d2d9c8..bb362b7c4 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1394,6 +1394,17 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) + def test_get_single_nic_with_cleaning_vif_port_id(self): + obj_utils.create_test_port( + self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', + uuid=uuidutils.generate_uuid(), + internal_info={'cleaning_vif_port_id': 'test-vif-A'}, + driver='iscsi_ilo') + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + address = utils.get_single_nic_with_vif_port_id(task) + self.assertEqual('aa:bb:cc:dd:ee:ff', address) + class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase): @@ -1741,7 +1752,8 @@ class AgentMethodsTestCase(db_base.DbTestCase): delete_mock.assert_called_once_with(mock.ANY, task) self.ports[0].refresh() - self.assertEqual('vif-port-id', self.ports[0].extra['vif_port_id']) + self.assertEqual('vif-port-id', + self.ports[0].internal_info['cleaning_vif_port_id']) def test_prepare_inband_cleaning_ports(self): self._test_prepare_inband_cleaning_ports() @@ -1755,9 +1767,9 @@ class AgentMethodsTestCase(db_base.DbTestCase): @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports', autospec=True) def test_tear_down_inband_cleaning_ports(self, neutron_mock): - extra_dict = self.ports[0].extra - extra_dict['vif_port_id'] = 'vif-port-id' - self.ports[0].extra = extra_dict + internal_info = self.ports[0].internal_info + internal_info['cleaning_vif_port_id'] = 'vif-port-id-1' + self.ports[0].internal_info = internal_info self.ports[0].save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: @@ -1765,6 +1777,7 @@ class AgentMethodsTestCase(db_base.DbTestCase): neutron_mock.assert_called_once_with(mock.ANY, task) self.ports[0].refresh() + self.assertNotIn('cleaning_vif_port_id', self.ports[0].internal_info) self.assertNotIn('vif_port_id', self.ports[0].extra) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index 5affc4b60..2a99424a9 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -407,8 +407,8 @@ expected_object_fingerprints = { 'Node': '1.16-2a6646627cb937f083f428f5d54e6458', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', - 'Port': '1.5-a224755c3da5bc5cf1a14a11c0d00f3f', - 'Portgroup': '1.0-1ac4db8fa31edd9e1637248ada4c25a1', + 'Port': '1.6-609504503d68982a10f495659990084b', + 'Portgroup': '1.1-e57da9ca808d3696c34dad8125564696', 'Conductor': '1.1-5091f249719d4a465062a1b3dc7f860d' } diff --git a/releasenotes/notes/add-port-internal-info-b7e02889416570f7.yaml b/releasenotes/notes/add-port-internal-info-b7e02889416570f7.yaml new file mode 100644 index 000000000..d934bf5ef --- /dev/null +++ b/releasenotes/notes/add-port-internal-info-b7e02889416570f7.yaml @@ -0,0 +1,6 @@ +--- +features: + - A new dictionary field ``internal_info`` is added to the port API object. + It is readonly from the API side, and can contain any internal information + ironic needs to store for the port. ``cleaning_vif_port_id`` is being + stored inside this dictionary. From 219a6903298909e30fa9df6cf73c7cdf1287c11f Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Fri, 10 Jun 2016 15:35:18 -0300 Subject: [PATCH 050/166] Allow to enroll nodes with oneview driver This patch allows import nodes with *_oneview drivers from space separated nodes file. Change-Id: I6d8447fd0404f9746bbfb0e38bf3c6c66b7ab1f8 --- devstack/lib/ironic | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index f0555188c..cf1094c8a 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -82,6 +82,9 @@ IRONIC_HW_ARCH=${IRONIC_HW_ARCH:-x86_64} # *_ucs: # # +# *_oneview: +# +# # IRONIC_IPMIINFO_FILE is deprecated, please use IRONIC_HWINFO_FILE. IRONIC_IPMIINFO_FILE will be removed in Ocata. IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-""} if [ ! -z "$IRONIC_IPMIINFO_FILE" ]; then @@ -182,7 +185,7 @@ IRONIC_DIB_RAMDISK_OPTIONS=${IRONIC_DIB_RAMDISK_OPTIONS:-'ubuntu'} # are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``. # # Additional valid choices if IRONIC_IS_HARDWARE == true are: -# ``pxe_iscsi_cimc``, ``pxe_agent_cimc``, ``pxe_ucs`` and ``pxe_cimc``. +# ``pxe_iscsi_cimc``, ``pxe_agent_cimc``, ``pxe_ucs``, ``pxe_cimc`` and ``*_pxe_oneview`` IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh} # Support entry points installation of console scripts @@ -279,6 +282,11 @@ function is_deployed_by_ucs { return 1 } +function is_deployed_by_oneview { + [[ -z "${IRONIC_DEPLOY_DRIVER##*_oneview}" ]] && return 0 + return 1 +} + function setup_virtualbmc { # Install pyghmi from source, if requested, otherwise it will be # downloaded as part of the virtualbmc installation @@ -892,6 +900,22 @@ function enroll_nodes { ucs_service_profile=$(echo $hardware_info |awk '{print $5}') node_options+=" -i ucs_address=$bmc_address -i ucs_password=$bmc_passwd\ -i ucs_username=$bmc_username -i ucs_service_profile=$ucs_service_profile" + elif is_deployed_by_oneview; then + local server_hardware_uri + server_hardware_uri=$(echo $hardware_info |awk '{print $1}') + local server_hardware_type_uri + server_hardware_type_uri=$(echo $hardware_info |awk '{print $2}') + local enclosure_group_uri + enclosure_group_uri=$(echo $hardware_info |awk '{print $3}') + local server_profile_template_uri + server_profile_template_uri=$(echo $hardware_info |awk '{print $4}') + mac_address=$(echo $hardware_info |awk '{print $5}') + + node_options+=" -i server_hardware_uri=$server_hardware_uri" + node_options+=" -p capabilities=" + node_options+="server_hardware_type_uri:$server_hardware_type_uri," + node_options+="enclosure_group_uri:$enclosure_group_uri," + node_options+="server_profile_template_uri:$server_profile_template_uri" fi fi From 435997941eec065c7921fa03620e060cff136aa0 Mon Sep 17 00:00:00 2001 From: yuyafei Date: Fri, 8 Jul 2016 18:57:49 +0800 Subject: [PATCH 051/166] Correct reraising of exception When an exception was caught and rethrown, it should call 'raise' without any arguments because it shows the place where an exception occured initially instead of place where the exception re-raised. Change-Id: I98c11a0dd394c122cc3f3ade0ba651f99aec1530 --- ironic/api/controllers/v1/node.py | 12 ++++++------ ironic/api/controllers/v1/port.py | 2 +- ironic/db/sqlalchemy/api.py | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 0be3bb624..303bbd3ca 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -388,7 +388,7 @@ class NodeStatesController(rest.RestController): # Change error code as 404 seems appropriate because RAID is a # standard interface and all drivers might not have it. e.code = http_client.NOT_FOUND - raise e + raise @expose.expose(None, types.uuid_or_name, wtypes.text, status_code=http_client.ACCEPTED) @@ -599,7 +599,7 @@ class Node(base.APIBase): # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST - raise e + raise elif value == wtypes.Unset: self._chassis_uuid = wtypes.Unset @@ -907,7 +907,7 @@ class NodeMaintenanceController(rest.RestController): topic = pecan.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST - raise e + raise pecan.request.rpcapi.update_node(pecan.request.context, rpc_node, topic=topic) @@ -1240,7 +1240,7 @@ class NodesController(rest.RestController): # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST - raise e + raise if node.name != wtypes.Unset and node.name is not None: error_msg = _("Cannot create node with invalid name '%(name)s'") @@ -1309,7 +1309,7 @@ class NodesController(rest.RestController): # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST - raise e + raise self._check_driver_changed_and_console_enabled(rpc_node, node_ident) new_node = pecan.request.rpcapi.update_node( pecan.request.context, rpc_node, topic) @@ -1332,7 +1332,7 @@ class NodesController(rest.RestController): topic = pecan.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST - raise e + raise pecan.request.rpcapi.destroy_node(pecan.request.context, rpc_node.uuid, topic) diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index e8f3abc14..e572d0d6f 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -64,7 +64,7 @@ class Port(base.APIBase): # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST # BadRequest - raise e + raise elif value == wtypes.Unset: self._node_uuid = wtypes.Unset diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index 8f8d9aa6c..79b7aa8f2 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -405,7 +405,7 @@ class Connection(api.Connection): instance_uuid=values['instance_uuid'], node=node_id) else: - raise e + raise def _do_update_node(self, node_id, values): with _session_for_write(): @@ -600,7 +600,7 @@ class Connection(api.Connection): raise exception.PortgroupMACAlreadyExists( mac=values['address']) else: - raise exc + raise return ref def destroy_portgroup(self, portgroup_id): From 068c4be4784af9bc5839b2728f9b833342899f5e Mon Sep 17 00:00:00 2001 From: Imre Farkas Date: Fri, 8 Jul 2016 13:02:37 +0200 Subject: [PATCH 052/166] Delete bios_wsman_mock.py from DRAC driver It's left over from the dracclient refactor patches. Should have been removed in 51a73e11c2f6bad0302cefcd8380f6ccbc757907. Change-Id: I52a520971dfc3dfd2ba20c9620c303697adc02dd --- .../drivers/modules/drac/bios_wsman_mock.py | 273 ------------------ 1 file changed, 273 deletions(-) delete mode 100644 ironic/tests/unit/drivers/modules/drac/bios_wsman_mock.py diff --git a/ironic/tests/unit/drivers/modules/drac/bios_wsman_mock.py b/ironic/tests/unit/drivers/modules/drac/bios_wsman_mock.py deleted file mode 100644 index 245d27c01..000000000 --- a/ironic/tests/unit/drivers/modules/drac/bios_wsman_mock.py +++ /dev/null @@ -1,273 +0,0 @@ -# -# Copyright 2015 Dell, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for DRAC BIOS interface -""" - -from ironic.drivers.modules.drac import resource_uris - -Enumerations = { - resource_uris.DCIM_BIOSEnumeration: { - 'XML': """ - - http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous - - -http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse - uuid:1f5cd907-0e6f-1e6f-8002-4f266e3acab8 - uuid:219ca357-0e6f-1e6f-a828-f0e4fb722ab8 - - - - - - MemTest - Disabled - - 310 - BIOS.Setup.1-1 - Memory Settings - MemSettings - BIOS.Setup.1-1:MemTest - false - - Enabled - Disabled - - - C States - ProcCStates - Disabled - 1706 - BIOS.Setup.1-1 - System Profile Settings - SysProfileSettings - BIOS.Setup.1-1:ProcCStates - true - - Enabled - Disabled - - - - - """, - 'Dict': { - 'MemTest': { - 'name': 'MemTest', - 'current_value': 'Disabled', - 'pending_value': None, - 'read_only': False, - 'possible_values': ['Disabled', 'Enabled']}, - 'ProcCStates': { - 'name': 'ProcCStates', - 'current_value': 'Disabled', - 'pending_value': None, - 'read_only': True, - 'possible_values': ['Disabled', 'Enabled']}}}, - resource_uris.DCIM_BIOSString: { - 'XML': """ - - http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous - - -http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse - - uuid:1f877bcb-0e6f-1e6f-8004-4f266e3acab8 - uuid:21bea321-0e6f-1e6f-a82b-f0e4fb722ab8 - - - - - - SystemModelName - PowerEdge R630 - - 201 - BIOS.Setup.1-1 - System Information - SysInformation - BIOS.Setup.1-1:SystemModelName - true - 40 - 0 - - - - - SystemModelName2 - PowerEdge R630 - - 201 - BIOS.Setup.1-1 - System Information - SysInformation - BIOS.Setup.1-1:SystemModelName2 - true - 40 - 0 - - - - Asset Tag - AssetTag - - - 1903 - BIOS.Setup.1-1 - Miscellaneous Settings - MiscSettings - BIOS.Setup.1-1:AssetTag - false - 63 - 0 - - ^[ -~]{0,63}$ - - - - - - - """, - 'Dict': { - 'SystemModelName': { - 'name': 'SystemModelName', - 'current_value': 'PowerEdge R630', - 'pending_value': None, - 'read_only': True, - 'min_length': 0, - 'max_length': 40, - 'pcre_regex': None}, - 'SystemModelName2': { - 'name': 'SystemModelName2', - 'current_value': 'PowerEdge R630', - 'pending_value': None, - 'read_only': True, - 'min_length': 0, - 'max_length': 40, - 'pcre_regex': None}, - 'AssetTag': { - 'name': 'AssetTag', - 'current_value': None, - 'pending_value': None, - 'read_only': False, - 'min_length': 0, - 'max_length': 63, - 'pcre_regex': '^[ -~]{0,63}$'}}}, - resource_uris.DCIM_BIOSInteger: { - 'XML': """ - - -http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous - -http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse - uuid:1fa60792-0e6f-1e6f-8005-4f266e3acab8 - uuid:21ccf01d-0e6f-1e6f-a82d-f0e4fb722ab8 - - - - - - Proc1NumCores - 8 - - 439 - BIOS.Setup.1-1 - Processor Settings - ProcSettings - BIOS.Setup.1-1:Proc1NumCores - true - 0 - - 65535 - - - AcPwrRcvryUserDelay - 60 - 1825 - BIOS.Setup.1-1 - System Security - SysSecurity - BIOS.Setup.1-1:AcPwrRcvryUserDelay - false - 60 - - 240 - - - - - - - """, - 'Dict': { - 'Proc1NumCores': { - 'name': 'Proc1NumCores', - 'current_value': 8, - 'pending_value': None, - 'read_only': True, - 'lower_bound': 0, - 'upper_bound': 65535}, - 'AcPwrRcvryUserDelay': { - 'name': 'AcPwrRcvryUserDelay', - 'current_value': 60, - 'pending_value': None, - 'read_only': False, - 'lower_bound': 60, - 'upper_bound': 240}}}} - -Invoke_Commit = """ - - -http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous - -http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_BIOSService/SetAttributesResponse - uuid:42baa476-0ee9-1ee9-8020-4f266e3acab8 - uuid:fadae2f8-0eea-1eea-9626-76a8f1d9bed4 - - - - The command was successful. - BIOS001 - Yes - 0 - Set PendingValue - - -""" From 3ca8ab094e3f7115262a81b7a7aea8b2fb9fe568 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 7 Jul 2016 17:44:59 +0100 Subject: [PATCH 053/166] Remove IBootOperationError exception This exception is not raised anywhere. Change-Id: I1a93b6bf9b231042446ed77969f11ea25ca5013e --- ironic/common/exception.py | 4 ---- ironic/drivers/modules/iboot.py | 3 --- 2 files changed, 7 deletions(-) diff --git a/ironic/common/exception.py b/ironic/common/exception.py index ccbee40a9..139bbf997 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -483,10 +483,6 @@ class PasswordFileFailedToCreate(IronicException): _msg_fmt = _("Failed to create the password file. %(error)s") -class IBootOperationError(IronicException): - pass - - class IloOperationError(IronicException): _msg_fmt = _("%(operation)s failed, error: %(error)s") diff --git a/ironic/drivers/modules/iboot.py b/ironic/drivers/modules/iboot.py index 5b62d5237..3b9c1cbe4 100644 --- a/ironic/drivers/modules/iboot.py +++ b/ironic/drivers/modules/iboot.py @@ -205,7 +205,6 @@ class IBootPower(base.PowerInterface): :param task: a TaskManager instance containing the node to act on. :returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR. - :raises: IBootOperationError on an error from iBoot. :raises: InvalidParameterValue if iboot parameters are invalid. :raises: MissingParameterValue if required iboot parameters are missing. @@ -221,7 +220,6 @@ class IBootPower(base.PowerInterface): :param task: a TaskManager instance containing the node to act on. :param pstate: The desired power state, one of ironic.common.states POWER_ON, POWER_OFF. - :raises: IBootOperationError on an error from iBoot. :raises: InvalidParameterValue if iboot parameters are invalid or if an invalid power state was specified. :raises: MissingParameterValue if required iboot parameters are @@ -246,7 +244,6 @@ class IBootPower(base.PowerInterface): """Cycles the power to the task's node. :param task: a TaskManager instance containing the node to act on. - :raises: IBootOperationError on an error from iBoot. :raises: InvalidParameterValue if iboot parameters are invalid. :raises: MissingParameterValue if required iboot parameters are missing. From 32ac108e48be569fc8153c7263b2571be966221a Mon Sep 17 00:00:00 2001 From: yuyafei Date: Fri, 8 Jul 2016 18:51:57 +0800 Subject: [PATCH 054/166] Remove white space between print and () TrivialFix Change-Id: I0f691bbd30329266bc90bb64881a122f0384636b --- devstack/tools/ironic/scripts/configure-vm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/tools/ironic/scripts/configure-vm.py b/devstack/tools/ironic/scripts/configure-vm.py index acb915a69..24cb04b0f 100755 --- a/devstack/tools/ironic/scripts/configure-vm.py +++ b/devstack/tools/ironic/scripts/configure-vm.py @@ -111,7 +111,7 @@ def main(): conn = libvirt.open("qemu:///system") a = conn.defineXML(libvirt_template) - print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) + print("Created machine %s with UUID %s" % (args.name, a.UUIDString())) if __name__ == '__main__': main() From 9e9ceece99bbb46062658fe977b6d620383910f5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 9 Jul 2016 19:20:52 +0000 Subject: [PATCH 055/166] Updated from global requirements Change-Id: I6f624dd8f1cff06bb1f41c87937234e216cd9377 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index dd6e6adff..3264502f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ stevedore>=1.10.0 # Apache-2.0 pysendfile>=2.0.0 # MIT websockify>=0.8.0 # LGPLv3 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.10.0 # Apache-2.0 +oslo.config>=3.12.0 # Apache-2.0 oslo.context>=2.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 @@ -30,7 +30,7 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=1.9.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.14.0 # Apache-2.0 +oslo.utils>=3.15.0 # Apache-2.0 pecan>=1.0.0 # BSD requests>=2.10.0 # Apache-2.0 six>=1.9.0 # MIT From b56f16868331280a23d2d10f1b9930829f67918c Mon Sep 17 00:00:00 2001 From: Lin Tan Date: Mon, 11 Jul 2016 15:40:26 +0800 Subject: [PATCH 056/166] Remove unused expected_filter in the unit test The expected_filter was not used, so remove it from test_manager. Change-Id: I08b7f36116eb198ff4cf89ef16e8f03828588fa5 --- ironic/tests/unit/conductor/test_manager.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index b5266d982..92992e4fe 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -4630,10 +4630,6 @@ class ManagerCheckDeployingStatusTestCase(mgr_utils.ServiceSetUpMixin, driver='fake', provision_state=states.AVAILABLE, target_provision_state=states.NOSTATE) - self.expected_filter = { - 'provision_state': 'deploying', 'reserved': False, - 'maintenance': False} - def test__check_deploying_status(self, mock_off_cond, mock_mapped, mock_fail_if): mock_off_cond.return_value = ['fake-conductor'] From b287c6a491b141386a8d1c98668ef00edb3d708c Mon Sep 17 00:00:00 2001 From: Miles Gould Date: Mon, 11 Jul 2016 16:13:44 +0100 Subject: [PATCH 057/166] Trivial grammar fixes to the upgrade guide Change-Id: I5e5df031755c0fa5586b6b8930f6a0d340f089d3 --- doc/source/deploy/upgrade-guide.rst | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/doc/source/deploy/upgrade-guide.rst b/doc/source/deploy/upgrade-guide.rst index 493dcda16..ac2b853a5 100644 --- a/doc/source/deploy/upgrade-guide.rst +++ b/doc/source/deploy/upgrade-guide.rst @@ -19,18 +19,19 @@ In-band Inspection If you used in-band inspection with **ironic-discoverd**, you have to install **python-ironic-inspector-client** during the upgrade. This package contains a -client module for in-band inspection service, which was previously part of -**ironic-discoverd** package. Ironic Liberty supports **ironic-discoverd** -service, but does not support its in-tree client module. Please refer to +client module for the in-band inspection service, which was previously part of +the **ironic-discoverd** package. Ironic Liberty supports the +**ironic-discoverd** service, but does not support its in-tree client module. +Please refer to `ironic-inspector version support matrix `_ -for details on which Ironic version can work with which -**ironic-inspector**/**ironic-discoverd** version. +for details on which Ironic versions can work with which +**ironic-inspector**/**ironic-discoverd** versions. It's also highly recommended that you switch to using **ironic-inspector**, which is a newer (and compatible on API level) version of the same service. -The discoverd to inspector upgrade procedure: +The discoverd to inspector upgrade procedure is as follows: #. Install **ironic-inspector** on the machine where you have **ironic-discoverd** (usually the same as conductor). @@ -40,13 +41,13 @@ The discoverd to inspector upgrade procedure: `example.conf `_. - The file name is provided on command line when starting + The file name is provided on the command line when starting **ironic-discoverd**, and the previously recommended default was ``/etc/ironic-discoverd/discoverd.conf``. In this case, for the sake of consistency it's recommended you move the configuration file to ``/etc/ironic-inspector/inspector.conf``. -#. Shutdown **ironic-discoverd**, start **ironic-inspector**. +#. Shutdown **ironic-discoverd**, and start **ironic-inspector**. #. During upgrade of each conductor instance: @@ -78,8 +79,8 @@ your Nova and Ironic services are as follows: nova-compute if necessary. Note that during the period between Nova's upgrade and Ironic's upgrades, -instances can still be provisioned to nodes, however, any attempt by users -to specify a config drive for an instance will cause error until Ironic's +instances can still be provisioned to nodes. However, any attempt by users to +specify a config drive for an instance will cause an error until Ironic's upgrade has completed. Cleaning From 3bc5ebf008936d3bfe1d69dee0398d8dfe16b347 Mon Sep 17 00:00:00 2001 From: Lin Tan Date: Tue, 3 May 2016 17:19:59 +0800 Subject: [PATCH 058/166] Add a doc about appending kernel parameters to boot instances Close-Bug: #1569189 Change-Id: I3395e635a5c4f45e00ee809dceb63c1755b9e66a --- doc/source/deploy/install-guide.rst | 66 +++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index 4c62a3983..b4d0595b0 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -2298,6 +2298,72 @@ but in order to use it we should follow some rules: .. _`expected format`: http://docs.openstack.org/user-guide/cli_config_drive.html#openstack-metadata-format + +Appending kernel parameters to boot instances +============================================= + +The Bare Metal service supports passing custom kernel parameters to boot instances to fit +users' requirements. The way to append the kernel parameters is depending on how to boot instances. + +Network boot +------------ +Currently, the Bare Metal service supports assigning unified kernel parameters to PXE +booted instances by: + +* Modifying the ``[pxe]/pxe_append_params`` configuration option, for example:: + + [pxe] + + pxe_append_params = quiet splash + +* Copying a template from shipped templates to another place, for example:: + + https://git.openstack.org/cgit/openstack/ironic/tree/ironic/drivers/modules/pxe_config.template + + Making the modifications and pointing to the custom template via the configuration + options: ``[pxe]/pxe_config_template`` and ``[pxe]/uefi_pxe_config_template``. + +Local boot +---------- +For local boot instances, users can make use of configuration drive +(see `Enabling the configuration drive (configdrive)`_) to pass a custom +script to append kernel parameters when creating an instance. This is more +flexible and can vary per instance. +Here is an example for grub2 with ubuntu, users can customize it +to fit their use case: + + .. code:: python + + #!/usr/bin/env python + import os + + # Default grub2 config file in Ubuntu + grub_file = '/etc/default/grub' + # Add parameters here to pass to instance. + kernel_parameters = ['quiet', 'splash'] + grub_cmd = 'GRUB_CMDLINE_LINUX' + old_grub_file = grub_file+'~' + os.rename(grub_file, old_grub_file) + cmdline_existed = False + with open(grub_file, 'w') as writer, \ + open(old_grub_file, 'r') as reader: + for line in reader: + key = line.split('=')[0] + if key == grub_cmd: + #If there is already some value: + if line.strip()[-1] == '"': + line = line.strip()[:-1] + ' ' + ' '.join(kernel_parameters) + '"' + cmdline_existed = True + writer.write(line) + if not cmdline_existed: + line = grub_cmd + '=' + '"' + ' '.join(kernel_parameters) + '"' + writer.write(line) + + os.remove(old_grub_file) + os.system('update-grub') + os.system('reboot') + + .. _BuildingDeployRamdisk: Building or downloading a deploy ramdisk image From 6edfdfac311e55c9dfce77a6f25b900c8777b79c Mon Sep 17 00:00:00 2001 From: R-Vaishnavi Date: Thu, 7 Jan 2016 06:10:38 +0000 Subject: [PATCH 059/166] Add support for building ISO for deploy ramdisk This commit adds support for building and uploading an ISO image for deploy ramdisk. This is used by some drivers in Ironic today. A new variable IRONIC_DEPLOY_ISO_REQUIRED=true can be set in localrc to create and upload ISO image for the deploy ramdisk. This patch is required for iLO ThirdParty CI. Closes-Bug: 1510914 Change-Id: I03a31490323b4bff9700146844a7009212840c60 --- devstack/lib/ironic | 70 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index cf1094c8a..4b57fd416 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -146,6 +146,7 @@ fi # (The value must be an absolute path) IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-} IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-} +IRONIC_DEPLOY_ISO=${IRONIC_DEPLOY_ISO:-} # NOTE(jroll) this needs to be updated when stable branches are cut IPA_DOWNLOAD_BRANCH=${IPA_DOWNLOAD_BRANCH:-master} @@ -181,6 +182,15 @@ fi # "ubuntu" is set as the default value. IRONIC_DIB_RAMDISK_OPTIONS=${IRONIC_DIB_RAMDISK_OPTIONS:-'ubuntu'} +# Some drivers in Ironic require deploy ramdisk in bootable ISO format. +# Set this variable to "true" to build an ISO for deploy ramdisk and +# upload to Glance. +IRONIC_DEPLOY_ISO_REQUIRED=$(trueorfalse False IRONIC_DEPLOY_ISO_REQUIRED) +if $IRONIC_DEPLOY_ISO_REQUIRED = 'True' && $IRONIC_BUILD_DEPLOY_RAMDISK = 'False'\ + && [ -n $IRONIC_DEPLOY_ISO ]; then + die "Prebuilt ISOs are not available, provide an ISO via IRONIC_DEPLOY_ISO \ + or set IRONIC_BUILD_DEPLOY_RAMDISK=True to use ISOs" +fi # Which deploy driver to use - valid choices right now # are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``. # @@ -284,6 +294,15 @@ function is_deployed_by_ucs { function is_deployed_by_oneview { [[ -z "${IRONIC_DEPLOY_DRIVER##*_oneview}" ]] && return 0 +} + +function is_deployed_by_ilo { + [[ -z "${IRONIC_DEPLOY_DRIVER##*_ilo}" ]] && return 0 + return 1 +} + +function is_deploy_iso_required { + [[ "$IRONIC_IS_HARDWARE" == "True" && "$IRONIC_DEPLOY_ISO_REQUIRED" == "True" ]] && return 0 return 1 } @@ -916,6 +935,12 @@ function enroll_nodes { node_options+="server_hardware_type_uri:$server_hardware_type_uri," node_options+="enclosure_group_uri:$enclosure_group_uri," node_options+="server_profile_template_uri:$server_profile_template_uri" + elif is_deployed_by_ilo; then + node_options+=" -i ilo_address=$bmc_address -i ilo_password=$bmc_passwd\ + -i ilo_username=$bmc_username" + if [[ $IRONIC_DEPLOY_DRIVER -ne "pxe_ilo" ]]; then + node_options+=" -i ilo_deploy_iso=$IRONIC_DEPLOY_ISO_ID" + fi fi fi @@ -1054,15 +1079,16 @@ function configure_ironic_auxiliary { function build_ipa_ramdisk { local kernel_path=$1 local ramdisk_path=$2 + local iso_path=$3 case $IRONIC_RAMDISK_TYPE in 'coreos') - build_ipa_coreos_ramdisk $kernel_path $ramdisk_path + build_ipa_coreos_ramdisk $kernel_path $ramdisk_path $iso_path ;; 'tinyipa') - build_tinyipa_ramdisk $kernel_path $ramdisk_path + build_tinyipa_ramdisk $kernel_path $ramdisk_path $iso_path ;; 'dib') - build_ipa_dib_ramdisk $kernel_path $ramdisk_path + build_ipa_dib_ramdisk $kernel_path $ramdisk_path $iso_path ;; *) die $LINENO "Unrecognised IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected either of 'dib', 'coreos', or 'tinyipa'." @@ -1074,6 +1100,7 @@ function build_ipa_coreos_ramdisk { echo "Building coreos ironic-python-agent deploy ramdisk" local kernel_path=$1 local ramdisk_path=$2 + local iso_path=$3 # on fedora services do not start by default restart_service docker git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH @@ -1081,6 +1108,9 @@ function build_ipa_coreos_ramdisk { imagebuild/coreos/build_coreos_image.sh cp imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz $ramdisk_path cp imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz $kernel_path + if is_deploy_iso_required; then + imagebuild/coreos/iso-image-create -k $kernel_path -i $ramdisk_path -o $iso_path + fi sudo rm -rf UPLOAD cd - } @@ -1089,12 +1119,17 @@ function build_tinyipa_ramdisk { echo "Building ironic-python-agent deploy ramdisk" local kernel_path=$1 local ramdisk_path=$2 + local iso_path=$3 git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH cd $IRONIC_PYTHON_AGENT_DIR/imagebuild/tinyipa export BUILD_AND_INSTALL_TINYIPA=true make cp tinyipa.gz $ramdisk_path cp tinyipa.vmlinuz $kernel_path + if is_deploy_iso_required; then + make iso + cp tinyipa.iso $iso_path + fi make clean cd - } @@ -1112,6 +1147,7 @@ function install_diskimage_builder { function build_ipa_dib_ramdisk { local kernel_path=$1 local ramdisk_path=$2 + local iso_path=$3 local tempdir tempdir=$(mktemp -d --tmpdir=${DEST}) @@ -1121,12 +1157,18 @@ function build_ipa_dib_ramdisk { fi echo "Building IPA ramdisk with DIB options: $IRONIC_DIB_RAMDISK_OPTIONS" + if is_deploy_iso_required; then + IRONIC_DIB_RAMDISK_OPTIONS+=" iso" + fi disk-image-create "$IRONIC_DIB_RAMDISK_OPTIONS" \ -o "$tempdir/ironic-agent" \ ironic-agent chmod -R +r $tempdir mv "$tempdir/ironic-agent.kernel" "$kernel_path" mv "$tempdir/ironic-agent.initramfs" "$ramdisk_path" + if is_deploy_iso_required; then + mv "$tempdir/ironic-agent.iso" "$iso_path" + fi rm -rf $tempdir } @@ -1136,22 +1178,26 @@ function upload_baremetal_ironic_deploy { declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID echo_summary "Creating and uploading baremetal images for ironic" - if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then + if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" -o -z "$IRONIC_DEPLOY_ISO" ]; then local IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.kernel local IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs + local IRONIC_DEPLOY_ISO_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.iso else local IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL local IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK + local IRONIC_DEPLOY_ISO_PATH=$IRONIC_DEPLOY_ISO fi - if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then + if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" ] || \ + [ ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ] || \ + ( is_deploy_iso_required && [ ! -e "$IRONIC_DEPLOY_ISO_PATH" ] ); then # files don't exist, need to build them if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then # we can build them only if we're not offline if [ "$OFFLINE" != "True" ]; then - build_ipa_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH + build_ipa_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH $IRONIC_DEPLOY_ISO_PATH else - die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be built in OFFLINE mode" + die $LINENO "Deploy kernel+ramdisk or iso files don't exist and cannot be built in OFFLINE mode" fi else # download the agent image tarball @@ -1176,6 +1222,16 @@ function upload_baremetal_ironic_deploy { --container-format=ari \ < $IRONIC_DEPLOY_RAMDISK_PATH | grep ' id ' | get_field 2) die_if_not_set $LINENO IRONIC_DEPLOY_RAMDISK_ID "Failed to load ramdisk image into glance" + + if is_deploy_iso_required; then + IRONIC_DEPLOY_ISO_ID=$(openstack \ + image create \ + $(basename $IRONIC_DEPLOY_ISO_PATH) \ + --public --disk-format=iso \ + --container-format=bare \ + < $IRONIC_DEPLOY_ISO_PATH -f value -c id) + die_if_not_set $LINENO IRONIC_DEPLOY_ISO_ID "Failed to load deploy iso into glance" + fi } function prepare_baremetal_basic_ops { From 8abcf2ebfebec4e3c26fc659978a0227d6f75ef0 Mon Sep 17 00:00:00 2001 From: R-Vaishnavi Date: Wed, 13 Jan 2016 09:09:08 +0000 Subject: [PATCH 060/166] Config variable to configure [glance] section Some drivers require glance with swift backend to be able to store some deployment artifacts, e.g. in case of iscsi_ilo driver it may be boot_iso that is constructed from user image kernel and ramdisk. This requires some configuration and this change is intended for such drivers. This commit adds a new config variable IRONIC_CONFIGURE_GLANCE_WITH_SWIFT, which can be set to True to configure the Swift related config parameters of [glance] section in ironic.conf. This patch is required for iLO ThirdParty CI. Change-Id: I1bad2b06b268825e181b7b3125d08c977f54f7b8 --- devstack/lib/ironic | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 4b57fd416..d519a0f3f 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -223,6 +223,11 @@ IRONIC_VBMC_PORT_RANGE_START=${IRONIC_VBMC_PORT_RANGE_START:-6230} IRONIC_VBMC_CONFIG_FILE=${IRONIC_VBMC_CONFIG_FILE:-$HOME/.vbmc/virtualbmc.conf} IRONIC_VBMC_LOGFILE=${IRONIC_VBMC_LOGFILE:-$IRONIC_VM_LOG_DIR/virtualbmc.log} +# To explicitly enable configuration of Glance with Swift +# (which is required by some vendor drivers), set this +# variable to true. +IRONIC_CONFIGURE_GLANCE_WITH_SWIFT=$(trueorfalse False IRONIC_CONFIGURE_GLANCE_WITH_SWIFT) + # The path to the libvirt hooks directory, used if IRONIC_VM_LOG_ROTATE is True IRONIC_LIBVIRT_HOOKS_PATH=${IRONIC_LIBVIRT_HOOKS_PATH:-/etc/libvirt/hooks/} @@ -301,6 +306,11 @@ function is_deployed_by_ilo { return 1 } +function is_glance_configuration_required { + is_deployed_by_agent || [[ "$IRONIC_CONFIGURE_GLANCE_WITH_SWIFT" == "True" ]] && return 0 + return 1 +} + function is_deploy_iso_required { [[ "$IRONIC_IS_HARDWARE" == "True" && "$IRONIC_DEPLOY_ISO_REQUIRED" == "True" ]] && return 0 return 1 @@ -556,11 +566,15 @@ function configure_ironic_conductor { # Set these options for scenarios in which the agent fetches the image # directly from glance, and don't set them where the image is pushed # over iSCSI. - if is_deployed_by_agent; then + if is_glance_configuration_required; then if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY else - die $LINENO "SWIFT_ENABLE_TEMPURLS must be True to use agent_* driver in Ironic." + die $LINENO "SWIFT_ENABLE_TEMPURLS must be True. This is " \ + "required either because IRONIC_DEPLOY_DRIVER was " \ + "set to some agent_* driver OR configuration of " \ + "Glance with Swift was explicitly requested with " \ + "IRONIC_CONFIGURE_GLANCE_WITH_SWIFT=True" fi iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080} iniset $IRONIC_CONF_FILE glance swift_api_version v1 @@ -569,6 +583,9 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id} iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 + fi + + if is_deployed_by_agent; then iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 fi From 1805765fef1f9267fa37bb25cade7a36099aad03 Mon Sep 17 00:00:00 2001 From: Carol Bouchard Date: Tue, 12 Jul 2016 10:34:15 -0400 Subject: [PATCH 061/166] Increase devstack BM VM RAM for coreos to boot This change set increases baremetal VM RAM from 1024 to 1280 in devstack script to support Coreos ramdisk version. Change-Id: Ic93807e20d6fe3ba3a7a9f78c0a8887d1c54aa19 Closes-bug: #1602277 --- devstack/lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index cf1094c8a..fe76c8a06 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -111,7 +111,7 @@ IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-22} IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} -IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-1024} +IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-1280} IRONIC_VM_SPECS_CPU_ARCH=${IRONIC_VM_SPECS_CPU_ARCH:-'x86_64'} IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} IRONIC_VM_SPECS_DISK_FORMAT=${IRONIC_VM_SPECS_DISK_FORMAT:-qcow2} From cde11611d93946a1c79e406dc429aa5e742da729 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 17 May 2016 13:59:39 +0300 Subject: [PATCH 062/166] Add network interface to base driver class This change also introduces two network interfaces: * flat: Copies current neutron DHCP provider logic to work with cleaning ports; * noop: noop interface. The default value of the network_interface is None, meaning that the node will be using the default network interface. The default network interface is determined the following way: * if [DEFAULT]default_network_interface configuration option is set (the default for it is None), the specified interface becomes the default for all nodes; * if it is not set, 'flat' interface will be used if the deployment currently uses 'neutron' DHCP provider, otherwise 'noop' interface will be used. create_cleaning_ports and delete_cleaning_ports methods of the DHCP providers are still being called in case of out-of-tree DHCP providers, but this possibility will be removed completely in the next release. If the DHCP provider logic is rewritten into a custom network interface, please remove those methods from the provider, so that network interface is called instead. Partial-bug: #1526403 Co-Authored-By: Om Kumar Co-Authored-By: Vasyl Saienko Co-Authored-By: Sivaramakrishna Garimella Co-Authored-By: Vladyslav Drok Co-Authored-By: Zhenguo Niu Change-Id: I0c26582b6b6e9d32650ff3e2b9a3269c3c2d5454 --- etc/ironic/ironic.conf.sample | 29 +- ironic/common/driver_factory.py | 92 +++++-- ironic/common/exception.py | 9 + ironic/common/neutron.py | 203 +++++++++++++- ironic/conductor/base_manager.py | 6 +- ironic/dhcp/neutron.py | 103 ++----- ironic/drivers/base.py | 76 +++++- ironic/drivers/modules/agent.py | 10 +- ironic/drivers/modules/deploy_utils.py | 64 ++++- ironic/drivers/modules/ilo/deploy.py | 4 +- ironic/drivers/modules/network/__init__.py | 0 ironic/drivers/modules/network/flat.py | 121 +++++++++ ironic/drivers/modules/network/noop.py | 59 ++++ ironic/tests/base.py | 5 + .../tests/unit/common/test_driver_factory.py | 76 ++++++ ironic/tests/unit/common/test_neutron.py | 253 ++++++++++++++++++ .../tests/unit/conductor/test_base_manager.py | 4 +- ironic/tests/unit/conductor/test_manager.py | 3 +- ironic/tests/unit/dhcp/test_neutron.py | 140 ++-------- .../unit/drivers/modules/network/__init__.py | 0 .../unit/drivers/modules/network/test_flat.py | 85 ++++++ .../unit/drivers/modules/test_deploy_utils.py | 106 ++++++-- ...d-network-interfaces-0a13c4aba252573e.yaml | 29 ++ setup.cfg | 4 + 24 files changed, 1219 insertions(+), 262 deletions(-) create mode 100644 ironic/drivers/modules/network/__init__.py create mode 100644 ironic/drivers/modules/network/flat.py create mode 100644 ironic/drivers/modules/network/noop.py create mode 100644 ironic/tests/unit/drivers/modules/network/__init__.py create mode 100644 ironic/tests/unit/drivers/modules/network/test_flat.py create mode 100644 releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 1ff5b0974..0f092b718 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -30,6 +30,24 @@ # developer documentation online. (list value) #enabled_drivers = pxe_ipmitool +# Specify the list of network interfaces to load during +# service initialization. Missing network interfaces, or +# network interfaces which fail to initialize, will prevent +# the conductor service from starting. The option default is a +# recommended set of production-oriented network interfaces. A +# complete list of network interfaces present on your system +# may be found by enumerating the +# "ironic.hardware.interfaces.network" entrypoint. (list +# value) +#enabled_network_interfaces = flat,noop + +# Default network interface to be used for nodes that do not +# have network_interface field set. A complete list of network +# interfaces present on your system may be found by +# enumerating the "ironic.hardware.interfaces.network" +# entrypoint. (string value) +#default_network_interface = + # Used if there is a formatting error when generating an # exception message (a programming error). If True, raise an # exception; if False, use the unformatted message. (boolean @@ -1410,12 +1428,12 @@ # (list value) #hash_algorithms = md5 -# Authentication type to load (string value) +# Authentication type to load (unknown value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options -# (string value) +# (unknown value) #auth_section = @@ -1499,8 +1517,11 @@ # Allowed values: keystone, noauth #auth_strategy = keystone -# UUID of the network to create Neutron ports on, when booting -# to a ramdisk for cleaning using Neutron DHCP. (string value) +# Neutron network UUID for the ramdisk to be booted into for +# cleaning nodes. Required if cleaning (either automatic or +# manual) is run for flat network interface, and, if DHCP +# providers are still being used, for neutron DHCP provider. +# (string value) #cleaning_network_uuid = diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py index 6e834d4f7..bd4439f9e 100644 --- a/ironic/common/driver_factory.py +++ b/ironic/common/driver_factory.py @@ -41,6 +41,23 @@ driver_opts = [ 'be found by enumerating the "ironic.drivers" ' 'entrypoint. An example may be found in the ' 'developer documentation online.')), + cfg.ListOpt('enabled_network_interfaces', + default=['flat', 'noop'], + help=_('Specify the list of network interfaces to load during ' + 'service initialization. Missing network interfaces, ' + 'or network interfaces which fail to initialize, will ' + 'prevent the conductor service from starting. The ' + 'option default is a recommended set of ' + 'production-oriented network interfaces. A complete ' + 'list of network interfaces present on your system may ' + 'be found by enumerating the ' + '"ironic.hardware.interfaces.network" entrypoint.')), + cfg.StrOpt('default_network_interface', + help=_('Default network interface to be used for nodes that ' + 'do not have network_interface field set. A complete ' + 'list of network interfaces present on your system may ' + 'be found by enumerating the ' + '"ironic.hardware.interfaces.network" entrypoint.')) ] CONF = cfg.CONF @@ -76,6 +93,20 @@ def _attach_interfaces_to_driver(driver, node, driver_name=None): impl = getattr(driver_singleton, iface, None) setattr(driver, iface, impl) + network_iface = node.network_interface + if network_iface is None: + network_iface = (CONF.default_network_interface or + ('flat' if CONF.dhcp.dhcp_provider == 'neutron' + else 'noop')) + network_factory = NetworkInterfaceFactory() + try: + net_driver = network_factory.get_driver(network_iface) + except KeyError: + raise exception.DriverNotFoundInEntrypoint( + driver_name=network_iface, + entrypoint=network_factory._entrypoint_name) + driver.network = net_driver + def get_driver(driver_name): """Simple method to get a ref to an instance of a driver. @@ -93,7 +124,7 @@ def get_driver(driver_name): try: factory = DriverFactory() - return factory[driver_name].obj + return factory.get_driver(driver_name) except KeyError: raise exception.DriverNotFound(driver_name=driver_name) @@ -109,8 +140,11 @@ def drivers(): for name in factory.names) -class DriverFactory(object): - """Discover, load and manage the drivers available.""" +class BaseDriverFactory(object): + """Discover, load and manage the drivers available. + + This is subclassed to load both main drivers and extra interfaces. + """ # NOTE(deva): loading the _extension_manager as a class member will break # stevedore when it loads a driver, because the driver will @@ -119,13 +153,25 @@ class DriverFactory(object): # once, the first time DriverFactory.__init__ is called. _extension_manager = None + # Entrypoint name containing the list of all available drivers/interfaces + _entrypoint_name = None + # Name of the [DEFAULT] section config option containing a list of enabled + # drivers/interfaces + _enabled_driver_list_config_option = '' + # This field will contain the list of the enabled drivers/interfaces names + # without duplicates + _enabled_driver_list = None + def __init__(self): - if not DriverFactory._extension_manager: - DriverFactory._init_extension_manager() + if not self.__class__._extension_manager: + self.__class__._init_extension_manager() def __getitem__(self, name): return self._extension_manager[name] + def get_driver(self, name): + return self[name].obj + # NOTE(deva): Use lockutils to avoid a potential race in eventlet # that might try to create two driver factories. @classmethod @@ -136,19 +182,24 @@ class DriverFactory(object): # creation of multiple NameDispatchExtensionManagers. if cls._extension_manager: return + enabled_drivers = getattr(CONF, cls._enabled_driver_list_config_option, + []) # Check for duplicated driver entries and warn the operator # about them - counter = collections.Counter(CONF.enabled_drivers).items() - duplicated_drivers = list(dup for (dup, i) in counter if i > 1) + counter = collections.Counter(enabled_drivers).items() + duplicated_drivers = [] + cls._enabled_driver_list = [] + for item, cnt in counter: + if cnt > 1: + duplicated_drivers.append(item) + cls._enabled_driver_list.append(item) if duplicated_drivers: LOG.warning(_LW('The driver(s) "%s" is/are duplicated in the ' 'list of enabled_drivers. Please check your ' 'configuration file.'), ', '.join(duplicated_drivers)) - enabled_drivers = set(CONF.enabled_drivers) - # NOTE(deva): Drivers raise "DriverLoadError" if they are unable to be # loaded, eg. due to missing external dependencies. # We capture that exception, and, only if it is for an @@ -160,30 +211,31 @@ class DriverFactory(object): def _catch_driver_not_found(mgr, ep, exc): # NOTE(deva): stevedore loads plugins *before* evaluating # _check_func, so we need to check here, too. - if ep.name in enabled_drivers: + if ep.name in cls._enabled_driver_list: if not isinstance(exc, exception.DriverLoadError): raise exception.DriverLoadError(driver=ep.name, reason=exc) raise exc def _check_func(ext): - return ext.name in enabled_drivers + return ext.name in cls._enabled_driver_list cls._extension_manager = ( dispatch.NameDispatchExtensionManager( - 'ironic.drivers', + cls._entrypoint_name, _check_func, invoke_on_load=True, on_load_failure_callback=_catch_driver_not_found)) # NOTE(deva): if we were unable to load any configured driver, perhaps # because it is not present on the system, raise an error. - if (sorted(enabled_drivers) != + if (sorted(cls._enabled_driver_list) != sorted(cls._extension_manager.names())): found = cls._extension_manager.names() - names = [n for n in enabled_drivers if n not in found] + names = [n for n in cls._enabled_driver_list if n not in found] # just in case more than one could not be found ... names = ', '.join(names) - raise exception.DriverNotFound(driver_name=names) + raise exception.DriverNotFoundInEntrypoint( + driver_name=names, entrypoint=cls._entrypoint_name) LOG.info(_LI("Loaded the following drivers: %s"), cls._extension_manager.names()) @@ -192,3 +244,13 @@ class DriverFactory(object): def names(self): """The list of driver names available.""" return self._extension_manager.names() + + +class DriverFactory(BaseDriverFactory): + _entrypoint_name = 'ironic.drivers' + _enabled_driver_list_config_option = 'enabled_drivers' + + +class NetworkInterfaceFactory(BaseDriverFactory): + _entrypoint_name = 'ironic.hardware.interfaces.network' + _enabled_driver_list_config_option = 'enabled_network_interfaces' diff --git a/ironic/common/exception.py b/ironic/common/exception.py index 139bbf997..5622203b2 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -241,6 +241,11 @@ class DriverNotFound(NotFound): _msg_fmt = _("Could not find the following driver(s): %(driver_name)s.") +class DriverNotFoundInEntrypoint(DriverNotFound): + _msg_fmt = _("Could not find the following driver(s) in the " + "'%(entrypoint)s' entrypoint: %(driver_name)s.") + + class ImageNotFound(NotFound): _msg_fmt = _("Image %(image_id)s could not be found.") @@ -591,3 +596,7 @@ class OneViewError(IronicException): class NodeTagNotFound(IronicException): _msg_fmt = _("Node %(node_id)s doesn't have a tag '%(tag)s'") + + +class NetworkError(IronicException): + _msg_fmt = _("Network operation failure.") diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index 99d73043e..fc200207b 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -10,12 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. +from neutronclient.common import exceptions as neutron_exceptions from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg +from oslo_log import log +from ironic.common import exception from ironic.common.i18n import _ +from ironic.common.i18n import _LE +from ironic.common.i18n import _LI +from ironic.common.i18n import _LW from ironic.common import keystone +LOG = log.getLogger(__name__) + CONF = cfg.CONF CONF.import_opt('my_ip', 'ironic.netconf') @@ -42,8 +50,11 @@ neutron_opts = [ 'but not affected by this setting) is insecure and ' 'should only be used for testing.')), cfg.StrOpt('cleaning_network_uuid', - help=_('UUID of the network to create Neutron ports on, when ' - 'booting to a ramdisk for cleaning using Neutron DHCP.')) + help=_('Neutron network UUID for the ramdisk to be booted ' + 'into for cleaning nodes. Required if cleaning (either ' + 'automatic or manual) is run for flat network interface,' + ' and, if DHCP providers are still being used, for ' + 'neutron DHCP provider.')) ] CONF.register_opts(neutron_opts, group='neutron') @@ -73,3 +84,191 @@ def get_client(token=None): params['token'] = token return clientv20.Client(**params) + + +def add_ports_to_network(task, network_uuid, is_flat=False): + """Create neutron ports to boot the ramdisk. + + Create neutron ports for each pxe_enabled port on task.node to boot + the ramdisk. + + :param task: a TaskManager instance. + :param network_uuid: UUID of a neutron network where ports will be + created. + :param is_flat: Indicates whether it is a flat network or not. + :raises: NetworkError + :returns: a dictionary in the form {port.uuid: neutron_port['id']} + """ + client = get_client(task.context.auth_token) + node = task.node + + LOG.debug('For node %(node)s, creating neutron ports on network ' + '%(network_uuid)s using %(net_iface)s network interface.', + {'net_iface': task.driver.network.__class__.__name__, + 'node': node.uuid, 'network_uuid': network_uuid}) + body = { + 'port': { + 'network_id': network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'device_owner': 'baremetal:none', + } + } + + if not is_flat: + # NOTE(vdrok): It seems that change + # I437290affd8eb87177d0626bf7935a165859cbdd to neutron broke the + # possibility to always bind port. Set binding:host_id only in + # case of non flat network. + body['port']['binding:host_id'] = node.uuid + + # Since instance_uuid will not be available during cleaning + # operations, we need to check that and populate them only when + # available + body['port']['device_id'] = node.instance_uuid or node.uuid + + ports = {} + failures = [] + portmap = get_node_portmap(task) + pxe_enabled_ports = [p for p in task.ports if p.pxe_enabled] + for ironic_port in pxe_enabled_ports: + body['port']['mac_address'] = ironic_port.address + binding_profile = {'local_link_information': + [portmap[ironic_port.uuid]]} + body['port']['binding:profile'] = binding_profile + try: + port = client.create_port(body) + except neutron_exceptions.NeutronClientException as e: + rollback_ports(task, network_uuid) + msg = (_('Could not create neutron port for ironic port ' + '%(ir-port)s on given network %(net)s from node ' + '%(node)s. %(exc)s') % + {'net': network_uuid, 'node': node.uuid, + 'ir-port': ironic_port.uuid, 'exc': e}) + LOG.exception(msg) + raise exception.NetworkError(msg) + + try: + ports[ironic_port.uuid] = port['port']['id'] + except KeyError: + failures.append(ironic_port.uuid) + + if failures: + if len(failures) == len(pxe_enabled_ports): + raise exception.NetworkError(_( + "Failed to update vif_port_id for any PXE enabled port " + "on node %s.") % node.uuid) + else: + LOG.warning(_LW("Some errors were encountered when updating " + "vif_port_id for node %(node)s on " + "the following ports: %(ports)s."), + {'node': node.uuid, 'ports': failures}) + else: + LOG.info(_LI('Successfully created ports for node %(node_uuid)s in ' + 'network %(net)s.'), + {'node_uuid': node.uuid, 'net': network_uuid}) + + return ports + + +def remove_ports_from_network(task, network_uuid): + """Deletes the neutron ports created for booting the ramdisk. + + :param task: a TaskManager instance. + :param network_uuid: UUID of a neutron network ports will be deleted from. + :raises: NetworkError + """ + macs = [p.address for p in task.ports if p.pxe_enabled] + if macs: + params = { + 'network_id': network_uuid, + 'mac_address': macs, + } + LOG.debug("Removing ports on network %(net)s on node %(node)s.", + {'net': network_uuid, 'node': task.node.uuid}) + + remove_neutron_ports(task, params) + + +def remove_neutron_ports(task, params): + """Deletes the neutron ports matched by params. + + :param task: a TaskManager instance. + :param params: Dict of params to filter ports. + :raises: NetworkError + """ + client = get_client(task.context.auth_token) + node_uuid = task.node.uuid + + try: + response = client.list_ports(**params) + except neutron_exceptions.NeutronClientException as e: + msg = (_('Could not get given network VIF for %(node)s ' + 'from neutron, possible network issue. %(exc)s') % + {'node': node_uuid, 'exc': e}) + LOG.exception(msg) + raise exception.NetworkError(msg) + + ports = response.get('ports', []) + if not ports: + LOG.debug('No ports to remove for node %s', node_uuid) + return + + for port in ports: + if not port['id']: + # TODO(morgabra) client.list_ports() sometimes returns + # port objects with null ids. It's unclear why this happens. + LOG.warning(_LW("Deleting neutron port failed, missing 'id'. " + "Node: %(node)s, neutron port: %(port)s."), + {'node': node_uuid, 'port': port}) + continue + + LOG.debug('Deleting neutron port %(vif_port_id)s of node ' + '%(node_id)s.', + {'vif_port_id': port['id'], 'node_id': node_uuid}) + + try: + client.delete_port(port['id']) + except neutron_exceptions.NeutronClientException as e: + msg = (_('Could not remove VIF %(vif)s of node %(node)s, possibly ' + 'a network issue: %(exc)s') % + {'vif': port['id'], 'node': node_uuid, 'exc': e}) + LOG.exception(msg) + raise exception.NetworkError(msg) + + LOG.info(_LI('Successfully removed node %(node_uuid)s neutron ports.'), + {'node_uuid': node_uuid}) + + +def get_node_portmap(task): + """Extract the switch port information for the node. + + :param task: a task containing the Node object. + :returns: a dictionary in the form {port.uuid: port.local_link_connection} + """ + + portmap = {} + for port in task.ports: + portmap[port.uuid] = port.local_link_connection + return portmap + # TODO(jroll) raise InvalidParameterValue if a port doesn't have the + # necessary info? (probably) + + +def rollback_ports(task, network_uuid): + """Attempts to delete any ports created by cleaning/provisioning + + Purposefully will not raise any exceptions so error handling can + continue. + + :param task: a TaskManager instance. + :param network_uuid: UUID of a neutron network. + """ + try: + remove_ports_from_network(task, network_uuid) + except exception.NetworkError: + # Only log the error + LOG.exception(_LE( + 'Failed to rollback port changes for node %(node)s ' + 'on network %(network)s'), {'node': task.node.uuid, + 'network': network_uuid}) diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py index da5db5a1f..d1c2b1261 100644 --- a/ironic/conductor/base_manager.py +++ b/ironic/conductor/base_manager.py @@ -83,8 +83,12 @@ class BaseConductorManager(object): self.ring_manager = hash.HashRingManager() """Consistent hash ring which maps drivers to conductors.""" - # NOTE(deva): this call may raise DriverLoadError or DriverNotFound + # NOTE(deva): these calls may raise DriverLoadError or DriverNotFound + # NOTE(vdrok): instantiate network interface factory on startup so that + # all the network interfaces are loaded at the very beginning, and + # failures prevent the conductor from starting. drivers = driver_factory.drivers() + driver_factory.NetworkInterfaceFactory() if not drivers: msg = _LE("Conductor %s cannot be started because no drivers " "were loaded. This could be because no drivers were " diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 15935a491..084300d1a 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -34,6 +34,9 @@ from ironic import objects CONF = cfg.CONF LOG = logging.getLogger(__name__) +create_cleaning_ports_deprecation = False +delete_cleaning_ports_deprecation = False + class NeutronDHCPApi(base.BaseDHCP): """API for communicating to neutron 2.x API.""" @@ -271,95 +274,37 @@ class NeutronDHCPApi(base.BaseDHCP): return port_ip_addresses + portgroup_ip_addresses + # TODO(vsaienko) Remove this method when deprecation period is passed + # in Ocata. def create_cleaning_ports(self, task): """Create neutron ports for each port on task.node to boot the ramdisk. :param task: a TaskManager instance. - :raises: InvalidParameterValue if the cleaning network is None + :raises: NetworkError, InvalidParameterValue :returns: a dictionary in the form {port.uuid: neutron_port['id']} """ - if not CONF.neutron.cleaning_network_uuid: - raise exception.InvalidParameterValue(_('Valid cleaning network ' - 'UUID not provided')) - neutron_client = neutron.get_client(task.context.auth_token) - body = { - 'port': { - 'network_id': CONF.neutron.cleaning_network_uuid, - 'admin_state_up': True, - } - } - ports = {} - for ironic_port in task.ports: - body['port']['mac_address'] = ironic_port.address - try: - port = neutron_client.create_port(body) - except neutron_client_exc.ConnectionFailed as e: - self._rollback_cleaning_ports(task) - msg = (_('Could not create cleaning port on network %(net)s ' - 'from %(node)s. %(exc)s') % - {'net': CONF.neutron.cleaning_network_uuid, - 'node': task.node.uuid, - 'exc': e}) - LOG.exception(msg) - raise exception.NodeCleaningFailure(msg) - if not port.get('port') or not port['port'].get('id'): - self._rollback_cleaning_ports(task) - msg = (_('Failed to create cleaning ports for node ' - '%(node)s') % {'node': task.node.uuid}) - LOG.error(msg) - raise exception.NodeCleaningFailure(msg) - # Match return value of get_node_vif_ids() - ports[ironic_port.uuid] = port['port']['id'] - return ports + global create_cleaning_ports_deprecation + if not create_cleaning_ports_deprecation: + LOG.warning(_LW('create_cleaning_ports via dhcp provider is ' + 'deprecated. The node.network_interface setting ' + 'should be used instead.')) + create_cleaning_ports_deprecation = True + return task.driver.network.add_cleaning_network(task) + + # TODO(vsaienko) Remove this method when deprecation period is passed + # in Ocata. def delete_cleaning_ports(self, task): """Deletes the neutron port created for booting the ramdisk. :param task: a TaskManager instance. + :raises: NetworkError, InvalidParameterValue """ - neutron_client = neutron.get_client(task.context.auth_token) - macs = [p.address for p in task.ports] - params = { - 'network_id': CONF.neutron.cleaning_network_uuid - } - try: - ports = neutron_client.list_ports(**params) - except neutron_client_exc.ConnectionFailed as e: - msg = (_('Could not get cleaning network vif for %(node)s ' - 'from Neutron, possible network issue. %(exc)s') % - {'node': task.node.uuid, - 'exc': e}) - LOG.exception(msg) - raise exception.NodeCleaningFailure(msg) + global delete_cleaning_ports_deprecation + if not delete_cleaning_ports_deprecation: + LOG.warning(_LW('delete_cleaning_ports via dhcp provider is ' + 'deprecated. The node.network_interface setting ' + 'should be used instead.')) + delete_cleaning_ports_deprecation = True - # Iterate the list of Neutron port dicts, remove the ones we added - for neutron_port in ports.get('ports', []): - # Only delete ports using the node's mac addresses - if neutron_port.get('mac_address') in macs: - try: - neutron_client.delete_port(neutron_port.get('id')) - except neutron_client_exc.ConnectionFailed as e: - msg = (_('Could not remove cleaning ports on network ' - '%(net)s from %(node)s, possible network issue. ' - '%(exc)s') % - {'net': CONF.neutron.cleaning_network_uuid, - 'node': task.node.uuid, - 'exc': e}) - LOG.exception(msg) - raise exception.NodeCleaningFailure(msg) - - def _rollback_cleaning_ports(self, task): - """Attempts to delete any ports created by cleaning - - Purposefully will not raise any exceptions so error handling can - continue. - - :param task: a TaskManager instance. - """ - try: - self.delete_cleaning_ports(task) - except Exception: - # Log the error, but let the caller invoke the - # manager.cleaning_error_handler(). - LOG.exception(_LE('Failed to rollback cleaning port ' - 'changes for node %s') % task.node.uuid) + task.driver.network.remove_cleaning_network(task) diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py index 962048c23..742aa5a12 100644 --- a/ironic/drivers/base.py +++ b/ironic/drivers/base.py @@ -158,8 +158,14 @@ class BareDriver(BaseDriver): Any composable interfaces should be added as class attributes of this class, as well as appended to core_interfaces or standard_interfaces here. """ + def __init__(self): - pass + self.network = None + """`Core` attribute for network connectivity. + + A reference to an instance of :class:NetworkInterface. + """ + self.core_interfaces.append('network') class BaseInterface(object): @@ -1020,6 +1026,74 @@ class RAIDInterface(BaseInterface): return raid.get_logical_disk_properties(self.raid_schema) +@six.add_metaclass(abc.ABCMeta) +class NetworkInterface(object): + """Base class for network interfaces.""" + + def get_properties(self): + """Return the properties of the interface. + + :returns: dictionary of : entries. + """ + return {} + + def validate(self, task): + """Validates the network interface. + + :param task: a TaskManager instance. + :raises: InvalidParameterValue, if the network interface configuration + is invalid. + :raises: MissingParameterValue, if some parameters are missing. + """ + + @abc.abstractmethod + def add_provisioning_network(self, task): + """Add the provisioning network to a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + + @abc.abstractmethod + def remove_provisioning_network(self, task): + """Remove the provisioning network from a node. + + :param task: A TaskManager instance. + """ + + @abc.abstractmethod + def configure_tenant_networks(self, task): + """Configure tenant networks for a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + + @abc.abstractmethod + def unconfigure_tenant_networks(self, task): + """Unconfigure tenant networks for a node. + + :param task: A TaskManager instance. + """ + + @abc.abstractmethod + def add_cleaning_network(self, task): + """Add the cleaning network to a node. + + :param task: A TaskManager instance. + :returns: a dictionary in the form {port.uuid: neutron_port['id']} + :raises: NetworkError + """ + + @abc.abstractmethod + def remove_cleaning_network(self, task): + """Remove the cleaning network from a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + + def _validate_argsinfo(argsinfo): """Validate args info. diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 971bd63ed..79b19d5bd 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -380,8 +380,10 @@ class AgentDeploy(base.DeployInterface): """Boot into the agent to prepare for cleaning. :param task: a TaskManager object containing the node - :raises NodeCleaningFailure: if the previous cleaning ports cannot - be removed or if new cleaning ports cannot be created + :raises: NodeCleaningFailure, NetworkError if the previous cleaning + ports cannot be removed or if new cleaning ports cannot be created. + :raises: InvalidParameterValue if cleaning network UUID config option + has an invalid value. :returns: states.CLEANWAIT to signify an asynchronous prepare """ return deploy_utils.prepare_inband_cleaning( @@ -391,8 +393,8 @@ class AgentDeploy(base.DeployInterface): """Clean up the PXE and DHCP files after cleaning. :param task: a TaskManager object containing the node - :raises NodeCleaningFailure: if the cleaning ports cannot be - removed + :raises: NodeCleaningFailure, NetworkError if the cleaning ports cannot + be removed """ deploy_utils.tear_down_inband_cleaning( task, manage_boot=CONF.agent.manage_agent_boot) diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index b37bec11a..2a23b39d4 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -910,6 +910,8 @@ def get_boot_option(node): return capabilities.get('boot_option', 'netboot').lower() +# TODO(vdrok): This method is left here for backwards compatibility with out of +# tree DHCP providers implementing cleaning methods. Remove it in Ocata def prepare_cleaning_ports(task): """Prepare the Ironic ports of the node for cleaning. @@ -919,17 +921,39 @@ def prepare_cleaning_ports(task): of each Ironic port, after creating the cleaning ports. :param task: a TaskManager object containing the node - :raises NodeCleaningFailure: if the previous cleaning ports cannot - be removed or if new cleaning ports cannot be created + :raises: NodeCleaningFailure, NetworkError if the previous cleaning ports + cannot be removed or if new cleaning ports cannot be created. + :raises: InvalidParameterValue if cleaning network UUID config option has + an invalid value. """ provider = dhcp_factory.DHCPFactory() + provider_manages_delete_cleaning = hasattr(provider.provider, + 'delete_cleaning_ports') + provider_manages_create_cleaning = hasattr(provider.provider, + 'create_cleaning_ports') + # NOTE(vdrok): The neutron DHCP provider was changed to call network + # interface's add_cleaning_network anyway, so call it directly to avoid + # duplication of some actions + if (CONF.dhcp.dhcp_provider == 'neutron' or + (not provider_manages_delete_cleaning and + not provider_manages_create_cleaning)): + task.driver.network.add_cleaning_network(task) + return + + LOG.warning(_LW("delete_cleaning_ports and create_cleaning_ports " + "functions in DHCP providers are deprecated, please move " + "this logic to the network interface's " + "remove_cleaning_network or add_cleaning_network methods " + "respectively and remove the old DHCP provider methods. " + "Possibility to do the cleaning via DHCP providers will " + "be removed in Ocata release.")) # If we have left over ports from a previous cleaning, remove them - if getattr(provider.provider, 'delete_cleaning_ports', None): + if provider_manages_delete_cleaning: # Allow to raise if it fails, is caught and handled in conductor provider.provider.delete_cleaning_ports(task) # Create cleaning ports if necessary - if getattr(provider.provider, 'create_cleaning_ports', None): + if provider_manages_create_cleaning: # Allow to raise if it fails, is caught and handled in conductor ports = provider.provider.create_cleaning_ports(task) @@ -953,6 +977,8 @@ def prepare_cleaning_ports(task): port.save() +# TODO(vdrok): This method is left here for backwards compatibility with out of +# tree DHCP providers implementing cleaning methods. Remove it in Ocata def tear_down_cleaning_ports(task): """Deletes the cleaning ports created for each of the Ironic ports. @@ -960,22 +986,36 @@ def tear_down_cleaning_ports(task): was started. :param task: a TaskManager object containing the node - :raises NodeCleaningFailure: if the cleaning ports cannot be + :raises: NodeCleaningFailure, NetworkError if the cleaning ports cannot be removed. """ # If we created cleaning ports, delete them provider = dhcp_factory.DHCPFactory() - if getattr(provider.provider, 'delete_cleaning_ports', None): + provider_manages_delete_cleaning = hasattr(provider.provider, + 'delete_cleaning_ports') + try: + # NOTE(vdrok): The neutron DHCP provider was changed to call network + # interface's remove_cleaning_network anyway, so call it directly to + # avoid duplication of some actions + if (CONF.dhcp.dhcp_provider == 'neutron' or + not provider_manages_delete_cleaning): + task.driver.network.remove_cleaning_network(task) + return + + # NOTE(vdrok): No need for another deprecation warning here, if + # delete_cleaning_ports is in the DHCP provider the warning was + # printed in prepare_cleaning_ports # Allow to raise if it fails, is caught and handled in conductor provider.provider.delete_cleaning_ports(task) - for port in task.ports: if 'cleaning_vif_port_id' in port.internal_info: internal_info = port.internal_info del internal_info['cleaning_vif_port_id'] port.internal_info = internal_info port.save() - elif 'vif_port_id' in port.extra: + finally: + for port in task.ports: + if 'vif_port_id' in port.extra: # TODO(vdrok): This piece is left for backwards compatibility, # if ironic was upgraded during cleaning, vif_port_id # containing cleaning neutron port UUID should be cleared, @@ -1028,8 +1068,10 @@ def prepare_inband_cleaning(task, manage_boot=True): automatically boot agent ramdisk every time bare metal node is rebooted. :returns: states.CLEANWAIT to signify an asynchronous prepare. - :raises NodeCleaningFailure: if the previous cleaning ports cannot - be removed or if new cleaning ports cannot be created + :raises: NetworkError, NodeCleaningFailure if the previous cleaning ports + cannot be removed or if new cleaning ports cannot be created. + :raises: InvalidParameterValue if cleaning network UUID config option has + an invalid value. """ prepare_cleaning_ports(task) @@ -1062,7 +1104,7 @@ def tear_down_inband_cleaning(task, manage_boot=True): :param manage_boot: If this is set to True, this method calls the 'clean_up_ramdisk' method of boot interface to boot the agent ramdisk. If False, it skips this step. - :raises NodeCleaningFailure: if the cleaning ports cannot be + :raises: NetworkError, NodeCleaningFailure if the cleaning ports cannot be removed. """ manager_utils.node_power_action(task, states.POWER_OFF) diff --git a/ironic/drivers/modules/ilo/deploy.py b/ironic/drivers/modules/ilo/deploy.py index a285ed4e1..9dc57f262 100644 --- a/ironic/drivers/modules/ilo/deploy.py +++ b/ironic/drivers/modules/ilo/deploy.py @@ -274,8 +274,8 @@ class IloVirtualMediaAgentDeploy(agent.AgentDeploy): :param task: a TaskManager object containing the node :returns: states.CLEANWAIT to signify an asynchronous prepare. - :raises NodeCleaningFailure: if the previous cleaning ports cannot - be removed or if new cleaning ports cannot be created + :raises: NodeCleaningFailure, NetworkError if the previous cleaning + ports cannot be removed or if new cleaning ports cannot be created :raises: IloOperationError, if some operation on iLO failed. """ # Powering off the Node before initiating boot for node cleaning. diff --git a/ironic/drivers/modules/network/__init__.py b/ironic/drivers/modules/network/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ironic/drivers/modules/network/flat.py b/ironic/drivers/modules/network/flat.py new file mode 100644 index 000000000..591c4e8df --- /dev/null +++ b/ironic/drivers/modules/network/flat.py @@ -0,0 +1,121 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Flat network interface. Useful for shared, flat networks. +""" + +from oslo_config import cfg +from oslo_log import log +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common.i18n import _LI +from ironic.common.i18n import _LW +from ironic.common import neutron +from ironic.drivers import base + + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class FlatNetwork(base.NetworkInterface): + """Flat network interface.""" + + def __init__(self): + cleaning_net = CONF.neutron.cleaning_network_uuid + # TODO(vdrok): Switch to DriverLoadError in Ocata + if not uuidutils.is_uuid_like(cleaning_net): + LOG.warning(_LW( + 'Please specify a valid UUID for ' + '[neutron]/cleaning_network_uuid configuration option so that ' + 'this interface is able to perform cleaning. It will be ' + 'required starting with the Ocata release, and if not ' + 'specified then, the conductor service will fail to start if ' + '"flat" is in the list of values for ' + '[DEFAULT]enabled_network_interfaces configuration option.')) + + def add_provisioning_network(self, task): + """Add the provisioning network to a node. + + :param task: A TaskManager instance. + """ + pass + + def remove_provisioning_network(self, task): + """Remove the provisioning network from a node. + + :param task: A TaskManager instance. + """ + pass + + def configure_tenant_networks(self, task): + """Configure tenant networks for a node. + + :param task: A TaskManager instance. + """ + pass + + def unconfigure_tenant_networks(self, task): + """Unconfigure tenant networks for a node. + + :param task: A TaskManager instance. + """ + for port in task.ports: + extra_dict = port.extra + extra_dict.pop('vif_port_id', None) + port.extra = extra_dict + port.save() + + def add_cleaning_network(self, task): + """Add the cleaning network to a node. + + :param task: A TaskManager instance. + :returns: a dictionary in the form {port.uuid: neutron_port['id']} + :raises: NetworkError, InvalidParameterValue + """ + if not uuidutils.is_uuid_like(CONF.neutron.cleaning_network_uuid): + raise exception.InvalidParameterValue(_( + 'You must provide a valid cleaning network UUID in ' + '[neutron]cleaning_network_uuid configuration option.')) + # If we have left over ports from a previous cleaning, remove them + neutron.rollback_ports(task, CONF.neutron.cleaning_network_uuid) + LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid) + vifs = neutron.add_ports_to_network( + task, CONF.neutron.cleaning_network_uuid, is_flat=True) + for port in task.ports: + if port.uuid in vifs: + internal_info = port.internal_info + internal_info['cleaning_vif_port_id'] = vifs[port.uuid] + port.internal_info = internal_info + port.save() + return vifs + + def remove_cleaning_network(self, task): + """Remove the cleaning network from a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Removing ports from cleaning network for node %s'), + task.node.uuid) + neutron.remove_ports_from_network( + task, CONF.neutron.cleaning_network_uuid) + for port in task.ports: + if 'cleaning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['cleaning_vif_port_id'] + port.internal_info = internal_info + port.save() diff --git a/ironic/drivers/modules/network/noop.py b/ironic/drivers/modules/network/noop.py new file mode 100644 index 000000000..f4c530022 --- /dev/null +++ b/ironic/drivers/modules/network/noop.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ironic.drivers import base + + +class NoopNetwork(base.NetworkInterface): + """Noop network interface.""" + + def add_provisioning_network(self, task): + """Add the provisioning network to a node. + + :param task: A TaskManager instance. + """ + pass + + def remove_provisioning_network(self, task): + """Remove the provisioning network from a node. + + :param task: A TaskManager instance. + """ + pass + + def configure_tenant_networks(self, task): + """Configure tenant networks for a node. + + :param task: A TaskManager instance. + """ + pass + + def unconfigure_tenant_networks(self, task): + """Unconfigure tenant networks for a node. + + :param task: A TaskManager instance. + """ + pass + + def add_cleaning_network(self, task): + """Add the cleaning network to a node. + + :param task: A TaskManager instance. + """ + pass + + def remove_cleaning_network(self, task): + """Remove the cleaning network from a node. + + :param task: A TaskManager instance. + """ + pass diff --git a/ironic/tests/base.py b/ironic/tests/base.py index dd5834939..b4b7b7175 100644 --- a/ironic/tests/base.py +++ b/ironic/tests/base.py @@ -32,6 +32,7 @@ import fixtures from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log import log as logging +from oslo_utils import uuidutils import testtools from ironic.common import config as ironic_config @@ -43,6 +44,7 @@ from ironic.tests.unit import policy_fixture CONF = cfg.CONF CONF.import_opt('host', 'ironic.common.service') +CONF.import_opt('cleaning_network_uuid', 'ironic.common.neutron', 'neutron') logging.register_options(CONF) logging.setup(CONF, 'ironic') @@ -115,6 +117,9 @@ class TestCase(testtools.TestCase): self.config(use_stderr=False, fatal_exception_format_errors=True, tempdir=tempfile.tempdir) + self.config(cleaning_network_uuid=uuidutils.generate_uuid(), + group='neutron') + self.config(enabled_network_interfaces=['flat', 'noop']) self.set_defaults(host='fake-mini', debug=True) self.set_defaults(connection="sqlite://", diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py index e42fe1541..a8b286140 100644 --- a/ironic/tests/unit/common/test_driver_factory.py +++ b/ironic/tests/unit/common/test_driver_factory.py @@ -17,8 +17,11 @@ from stevedore import dispatch from ironic.common import driver_factory from ironic.common import exception +from ironic.conductor import task_manager from ironic.drivers import base as drivers_base from ironic.tests import base +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils as obj_utils class FakeEp(object): @@ -86,3 +89,76 @@ class GetDriverTestCase(base.TestCase): def test_get_driver_unknown(self): self.assertRaises(exception.DriverNotFound, driver_factory.get_driver, 'unknown_driver') + + +class NetworkInterfaceFactoryTestCase(db_base.DbTestCase): + def setUp(self): + super(NetworkInterfaceFactoryTestCase, self).setUp() + driver_factory.DriverFactory._extension_manager = None + driver_factory.NetworkInterfaceFactory._extension_manager = None + self.config(enabled_drivers=['fake']) + + def test_build_driver_for_task(self): + # flat and noop network interfaces are enabled in base test case + factory = driver_factory.NetworkInterfaceFactory + node = obj_utils.create_test_node(self.context, driver='fake', + network_interface='flat') + with task_manager.acquire(self.context, node.id) as task: + extension_mgr = factory._extension_manager + self.assertIn('flat', extension_mgr) + self.assertIn('noop', extension_mgr) + self.assertEqual(extension_mgr['flat'].obj, task.driver.network) + self.assertEqual('ironic.hardware.interfaces.network', + factory._entrypoint_name) + self.assertEqual(['flat', 'noop'], + sorted(factory._enabled_driver_list)) + + def test_build_driver_for_task_default_is_none(self): + # flat and noop network interfaces are enabled in base test case + factory = driver_factory.NetworkInterfaceFactory + self.config(dhcp_provider='none', group='dhcp') + node = obj_utils.create_test_node(self.context, driver='fake') + with task_manager.acquire(self.context, node.id) as task: + extension_mgr = factory._extension_manager + self.assertIn('flat', extension_mgr) + self.assertIn('noop', extension_mgr) + self.assertEqual(extension_mgr['noop'].obj, task.driver.network) + + def test_build_driver_for_task_default_network_interface_is_set(self): + # flat and noop network interfaces are enabled in base test case + factory = driver_factory.NetworkInterfaceFactory + self.config(dhcp_provider='none', group='dhcp') + self.config(default_network_interface='flat') + node = obj_utils.create_test_node(self.context, driver='fake') + with task_manager.acquire(self.context, node.id) as task: + extension_mgr = factory._extension_manager + self.assertIn('flat', extension_mgr) + self.assertIn('noop', extension_mgr) + self.assertEqual(extension_mgr['flat'].obj, task.driver.network) + + def test_build_driver_for_task_default_is_flat(self): + # flat and noop network interfaces are enabled in base test case + factory = driver_factory.NetworkInterfaceFactory + node = obj_utils.create_test_node(self.context, driver='fake') + with task_manager.acquire(self.context, node.id) as task: + extension_mgr = factory._extension_manager + self.assertIn('flat', extension_mgr) + self.assertIn('noop', extension_mgr) + self.assertEqual(extension_mgr['flat'].obj, task.driver.network) + + def test_build_driver_for_task_unknown_network_interface(self): + node = obj_utils.create_test_node(self.context, driver='fake', + network_interface='meow') + self.assertRaises(exception.DriverNotFoundInEntrypoint, + task_manager.acquire, self.context, node.id) + + +class NewDriverFactory(driver_factory.BaseDriverFactory): + _entrypoint_name = 'woof' + + +class NewFactoryTestCase(db_base.DbTestCase): + def test_new_driver_factory_unknown_entrypoint(self): + factory = NewDriverFactory() + self.assertEqual('woof', factory._entrypoint_name) + self.assertEqual([], factory._enabled_driver_list) diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py index c55c115a3..0076d741a 100644 --- a/ironic/tests/unit/common/test_neutron.py +++ b/ironic/tests/unit/common/test_neutron.py @@ -11,11 +11,18 @@ # under the License. import mock +from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client from oslo_config import cfg +from oslo_utils import uuidutils +from ironic.common import exception from ironic.common import neutron +from ironic.conductor import task_manager from ironic.tests import base +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils as object_utils class TestNeutronClient(base.TestCase): @@ -107,3 +114,249 @@ class TestNeutronClient(base.TestCase): self.assertRaises(ValueError, cfg.CONF.set_override, 'auth_strategy', 'fake', 'neutron', enforce_type=True) + + +class TestNeutronNetworkActions(db_base.DbTestCase): + + def setUp(self): + super(TestNeutronNetworkActions, self).setUp() + mgr_utils.mock_the_extension_manager(driver='fake') + self.config(enabled_drivers=['fake']) + self.node = object_utils.create_test_node(self.context) + self.ports = [object_utils.create_test_port( + self.context, node_id=self.node.id, + uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782', + address='52:54:00:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()} + )] + # Very simple neutron port representation + self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00', + 'mac_address': '52:54:00:cf:2d:32'} + self.network_uuid = uuidutils.generate_uuid() + + @mock.patch.object(client.Client, 'create_port') + def test_add_ports_to_vlan_network(self, create_mock): + # Ports will be created only if pxe_enabled is True + object_utils.create_test_port( + self.context, node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:22', + pxe_enabled=False + ) + port = self.ports[0] + expected_body = { + 'port': { + 'network_id': self.network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'device_owner': 'baremetal:none', + 'binding:host_id': self.node.uuid, + 'device_id': self.node.uuid, + 'mac_address': port.address, + 'binding:profile': { + 'local_link_information': [port.local_link_connection] + } + } + } + # Ensure we can create ports + create_mock.return_value = {'port': self.neutron_port} + expected = {port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.uuid) as task: + ports = neutron.add_ports_to_network(task, self.network_uuid) + self.assertEqual(expected, ports) + create_mock.assert_called_once_with(expected_body) + + @mock.patch.object(client.Client, 'create_port') + def test_add_ports_to_flat_network(self, create_mock): + port = self.ports[0] + expected_body = { + 'port': { + 'network_id': self.network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'device_owner': 'baremetal:none', + 'device_id': self.node.uuid, + 'mac_address': port.address, + 'binding:profile': { + 'local_link_information': [port.local_link_connection] + } + } + } + # Ensure we can create ports + create_mock.return_value = {'port': self.neutron_port} + expected = {port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.uuid) as task: + ports = neutron.add_ports_to_network(task, self.network_uuid, + is_flat=True) + self.assertEqual(expected, ports) + create_mock.assert_called_once_with(expected_body) + + @mock.patch.object(client.Client, 'create_port') + def test_add_ports_to_flat_network_no_neutron_port_id(self, create_mock): + port = self.ports[0] + expected_body = { + 'port': { + 'network_id': self.network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'device_owner': 'baremetal:none', + 'device_id': self.node.uuid, + 'mac_address': port.address, + 'binding:profile': { + 'local_link_information': [port.local_link_connection] + } + } + } + del self.neutron_port['id'] + create_mock.return_value = {'port': self.neutron_port} + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaises(exception.NetworkError, + neutron.add_ports_to_network, + task, self.network_uuid, is_flat=True) + create_mock.assert_called_once_with(expected_body) + + @mock.patch.object(client.Client, 'create_port') + def test_add_ports_to_vlan_network_instance_uuid(self, create_mock): + self.node.instance_uuid = uuidutils.generate_uuid() + self.node.save() + port = self.ports[0] + expected_body = { + 'port': { + 'network_id': self.network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'device_owner': 'baremetal:none', + 'binding:host_id': self.node.uuid, + 'device_id': self.node.instance_uuid, + 'mac_address': port.address, + 'binding:profile': { + 'local_link_information': [port.local_link_connection] + } + } + } + # Ensure we can create ports + create_mock.return_value = {'port': self.neutron_port} + expected = {port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.uuid) as task: + ports = neutron.add_ports_to_network(task, self.network_uuid) + self.assertEqual(expected, ports) + create_mock.assert_called_once_with(expected_body) + + @mock.patch.object(neutron, 'rollback_ports') + @mock.patch.object(client.Client, 'create_port') + def test_add_network_fail(self, create_mock, rollback_mock): + # Check that if creating a port fails, the ports are cleaned up + create_mock.side_effect = neutron_client_exc.ConnectionFailed + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex( + exception.NetworkError, 'Could not create neutron port', + neutron.add_ports_to_network, task, self.network_uuid) + rollback_mock.assert_called_once_with(task, self.network_uuid) + + @mock.patch.object(neutron, 'rollback_ports') + @mock.patch.object(client.Client, 'create_port', return_value={}) + def test_add_network_fail_create_any_port_empty(self, create_mock, + rollback_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex( + exception.NetworkError, 'any PXE enabled port', + neutron.add_ports_to_network, task, self.network_uuid) + self.assertFalse(rollback_mock.called) + + @mock.patch.object(neutron, 'LOG') + @mock.patch.object(neutron, 'rollback_ports') + @mock.patch.object(client.Client, 'create_port') + def test_add_network_fail_create_some_ports_empty(self, create_mock, + rollback_mock, log_mock): + port2 = object_utils.create_test_port( + self.context, node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + address='52:54:55:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()} + ) + create_mock.side_effect = [{'port': self.neutron_port}, {}] + with task_manager.acquire(self.context, self.node.uuid) as task: + neutron.add_ports_to_network(task, self.network_uuid) + self.assertIn(str(port2.uuid), + # Call #0, argument #1 + log_mock.warning.call_args[0][1]['ports']) + self.assertFalse(rollback_mock.called) + + @mock.patch.object(neutron, 'remove_neutron_ports') + def test_remove_ports_from_network(self, remove_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + neutron.remove_ports_from_network(task, self.network_uuid) + remove_mock.assert_called_once_with( + task, + {'network_id': self.network_uuid, + 'mac_address': [self.ports[0].address]} + ) + + @mock.patch.object(neutron, 'remove_neutron_ports') + def test_remove_ports_from_network_not_all_pxe_enabled(self, remove_mock): + object_utils.create_test_port( + self.context, node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + address='52:54:55:cf:2d:32', + pxe_enabled=False + ) + with task_manager.acquire(self.context, self.node.uuid) as task: + neutron.remove_ports_from_network(task, self.network_uuid) + remove_mock.assert_called_once_with( + task, + {'network_id': self.network_uuid, + 'mac_address': [self.ports[0].address]} + ) + + @mock.patch.object(client.Client, 'delete_port') + @mock.patch.object(client.Client, 'list_ports') + def test_remove_neutron_ports(self, list_mock, delete_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + list_mock.return_value = {'ports': [self.neutron_port]} + neutron.remove_neutron_ports(task, {'param': 'value'}) + list_mock.assert_called_once_with(**{'param': 'value'}) + delete_mock.assert_called_once_with(self.neutron_port['id']) + + @mock.patch.object(client.Client, 'list_ports') + def test_remove_neutron_ports_list_fail(self, list_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + list_mock.side_effect = neutron_client_exc.ConnectionFailed + self.assertRaisesRegex( + exception.NetworkError, 'Could not get given network VIF', + neutron.remove_neutron_ports, task, {'param': 'value'}) + list_mock.assert_called_once_with(**{'param': 'value'}) + + @mock.patch.object(client.Client, 'delete_port') + @mock.patch.object(client.Client, 'list_ports') + def test_remove_neutron_ports_delete_fail(self, list_mock, delete_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + delete_mock.side_effect = neutron_client_exc.ConnectionFailed + list_mock.return_value = {'ports': [self.neutron_port]} + self.assertRaisesRegex( + exception.NetworkError, 'Could not remove VIF', + neutron.remove_neutron_ports, task, {'param': 'value'}) + list_mock.assert_called_once_with(**{'param': 'value'}) + delete_mock.assert_called_once_with(self.neutron_port['id']) + + def test_get_node_portmap(self): + with task_manager.acquire(self.context, self.node.uuid) as task: + portmap = neutron.get_node_portmap(task) + self.assertEqual( + {self.ports[0].uuid: self.ports[0].local_link_connection}, + portmap + ) + + @mock.patch.object(neutron, 'remove_ports_from_network') + def test_rollback_ports(self, remove_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + neutron.rollback_ports(task, self.network_uuid) + remove_mock.assert_called_once_with(task, self.network_uuid) + + @mock.patch.object(neutron, 'LOG') + @mock.patch.object(neutron, 'remove_ports_from_network') + def test_rollback_ports_exception(self, remove_mock, log_mock): + remove_mock.side_effect = exception.NetworkError('boom') + with task_manager.acquire(self.context, self.node.uuid) as task: + neutron.rollback_ports(task, self.network_uuid) + self.assertTrue(log_mock.exception.called) diff --git a/ironic/tests/unit/conductor/test_base_manager.py b/ironic/tests/unit/conductor/test_base_manager.py index 4416f86f5..215228ed6 100644 --- a/ironic/tests/unit/conductor/test_base_manager.py +++ b/ironic/tests/unit/conductor/test_base_manager.py @@ -77,7 +77,8 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase): @mock.patch.object(driver_factory.DriverFactory, '__getitem__', lambda *args: mock.MagicMock()) - def test_start_registers_driver_names(self): + @mock.patch.object(driver_factory, 'NetworkInterfaceFactory') + def test_start_registers_driver_names(self, net_factory): init_names = ['fake1', 'fake2'] restart_names = ['fake3', 'fake4'] @@ -99,6 +100,7 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase): res = objects.Conductor.get_by_hostname(self.context, self.hostname) self.assertEqual(restart_names, res['drivers']) + self.assertEqual(2, net_factory.call_count) @mock.patch.object(driver_factory.DriverFactory, '__getitem__') def test_start_registers_driver_specific_tasks(self, get_mock): diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index b7ef21c6b..74483b815 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -2341,7 +2341,8 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn, 'management': {'result': True}, 'boot': {'result': True}, 'raid': {'result': True}, - 'deploy': {'result': True}} + 'deploy': {'result': True}, + 'network': {'result': True}} self.assertEqual(expected, ret) mock_iwdi.assert_called_once_with(self.context, node.instance_info) diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index df576022f..f93476813 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -18,7 +18,6 @@ import mock from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client -from oslo_config import cfg from oslo_utils import uuidutils from ironic.common import dhcp_factory @@ -474,127 +473,34 @@ class TestNeutron(db_base.DbTestCase): [mock.call(task, task.ports[0], mock.ANY), mock.call(task, task.portgroups[0], mock.ANY)]) - @mock.patch.object(client.Client, 'create_port') - def test_create_cleaning_ports(self, create_mock): - # Ensure we can create cleaning ports for in band cleaning - create_mock.return_value = {'port': self.neutron_port} - expected = {self.ports[0].uuid: self.neutron_port['id']} + @mock.patch.object(neutron, 'create_cleaning_ports_deprecation', False) + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_create_cleaning_ports(self, log_mock): + self.config(cleaning_network_uuid=uuidutils.generate_uuid(), + group='neutron') api = dhcp_factory.DHCPFactory().provider with task_manager.acquire(self.context, self.node.uuid) as task: - ports = api.create_cleaning_ports(task) - self.assertEqual(expected, ports) - create_mock.assert_called_once_with({'port': { - 'network_id': '00000000-0000-0000-0000-000000000000', - 'admin_state_up': True, 'mac_address': self.ports[0].address}}) + with mock.patch.object( + task.driver.network, 'add_cleaning_network', + autospec=True) as add_net_mock: + api.create_cleaning_ports(task) + add_net_mock.assert_called_once_with(task) - @mock.patch.object(neutron.NeutronDHCPApi, '_rollback_cleaning_ports') - @mock.patch.object(client.Client, 'create_port') - def test_create_cleaning_ports_fail(self, create_mock, rollback_mock): - # Check that if creating a port fails, the ports are cleaned up - create_mock.side_effect = neutron_client_exc.ConnectionFailed + api.create_cleaning_ports(task) + self.assertEqual(1, log_mock.warning.call_count) + + @mock.patch.object(neutron, 'delete_cleaning_ports_deprecation', False) + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_delete_cleaning_ports(self, log_mock): api = dhcp_factory.DHCPFactory().provider with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.NodeCleaningFailure, - api.create_cleaning_ports, - task) - create_mock.assert_called_once_with({'port': { - 'network_id': '00000000-0000-0000-0000-000000000000', - 'admin_state_up': True, 'mac_address': self.ports[0].address}}) - rollback_mock.assert_called_once_with(task) + with mock.patch.object( + task.driver.network, 'remove_cleaning_network', + autospec=True) as rm_net_mock: + api.delete_cleaning_ports(task) + rm_net_mock.assert_called_once_with(task) - @mock.patch.object(neutron.NeutronDHCPApi, '_rollback_cleaning_ports') - @mock.patch.object(client.Client, 'create_port') - def test_create_cleaning_ports_fail_delayed(self, create_mock, - rollback_mock): - """Check ports are cleaned up on failure to create them - - This test checks that the port clean-up occurs - when the port create call was successful, - but the port in fact was not created. - - """ - # NOTE(pas-ha) this is trying to emulate the complex port object - # with both methods and dictionary access with methods on elements - mockport = mock.MagicMock() - create_mock.return_value = mockport - # fail only on second 'or' branch to fool lazy eval - # and actually execute both expressions to assert on both mocks - mockport.get.return_value = True - mockitem = mock.Mock() - mockport.__getitem__.return_value = mockitem - mockitem.get.return_value = None - api = dhcp_factory.DHCPFactory().provider - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.NodeCleaningFailure, - api.create_cleaning_ports, - task) - create_mock.assert_called_once_with({'port': { - 'network_id': '00000000-0000-0000-0000-000000000000', - 'admin_state_up': True, 'mac_address': self.ports[0].address}}) - rollback_mock.assert_called_once_with(task) - mockport.get.assert_called_once_with('port') - mockitem.get.assert_called_once_with('id') - mockport.__getitem__.assert_called_once_with('port') - - @mock.patch.object(client.Client, 'create_port') - def test_create_cleaning_ports_bad_config(self, create_mock): - # Check an error is raised if the cleaning network is not set - self.config(cleaning_network_uuid=None, group='neutron') - api = dhcp_factory.DHCPFactory().provider - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.InvalidParameterValue, - api.create_cleaning_ports, task) - - @mock.patch.object(client.Client, 'delete_port') - @mock.patch.object(client.Client, 'list_ports') - def test_delete_cleaning_ports(self, list_mock, delete_mock): - # Ensure that we can delete cleaning ports, and that ports with - # different macs don't get deleted - other_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f01', - 'mac_address': 'aa:bb:cc:dd:ee:ff'} - list_mock.return_value = {'ports': [self.neutron_port, other_port]} - api = dhcp_factory.DHCPFactory().provider - - with task_manager.acquire(self.context, self.node.uuid) as task: - api.delete_cleaning_ports(task) - list_mock.assert_called_once_with( - network_id='00000000-0000-0000-0000-000000000000') - delete_mock.assert_called_once_with(self.neutron_port['id']) - - @mock.patch.object(client.Client, 'list_ports') - def test_delete_cleaning_ports_list_fail(self, list_mock): - # Check that if listing ports fails, the node goes to cleanfail - list_mock.side_effect = neutron_client_exc.ConnectionFailed - api = dhcp_factory.DHCPFactory().provider - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.NodeCleaningFailure, - api.delete_cleaning_ports, - task) - list_mock.assert_called_once_with( - network_id='00000000-0000-0000-0000-000000000000') - - @mock.patch.object(client.Client, 'delete_port') - @mock.patch.object(client.Client, 'list_ports') - def test_delete_cleaning_ports_delete_fail(self, list_mock, delete_mock): - # Check that if deleting ports fails, the node goes to cleanfail - list_mock.return_value = {'ports': [self.neutron_port]} - delete_mock.side_effect = neutron_client_exc.ConnectionFailed - api = dhcp_factory.DHCPFactory().provider - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.NodeCleaningFailure, - api.delete_cleaning_ports, - task) - list_mock.assert_called_once_with( - network_id='00000000-0000-0000-0000-000000000000') - delete_mock.assert_called_once_with(self.neutron_port['id']) - - def test_out_range_auth_strategy(self): - self.assertRaises(ValueError, cfg.CONF.set_override, - 'auth_strategy', 'fake', 'neutron', - enforce_type=True) + api.delete_cleaning_ports(task) + self.assertEqual(1, log_mock.warning.call_count) diff --git a/ironic/tests/unit/drivers/modules/network/__init__.py b/ironic/tests/unit/drivers/modules/network/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ironic/tests/unit/drivers/modules/network/test_flat.py b/ironic/tests/unit/drivers/modules/network/test_flat.py new file mode 100644 index 000000000..bf4b36b35 --- /dev/null +++ b/ironic/tests/unit/drivers/modules/network/test_flat.py @@ -0,0 +1,85 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common import neutron +from ironic.conductor import task_manager +from ironic.drivers.modules.network import flat as flat_interface +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils + +CONF = cfg.CONF + + +class TestFlatInterface(db_base.DbTestCase): + + def setUp(self): + super(TestFlatInterface, self).setUp() + self.config(enabled_drivers=['fake']) + mgr_utils.mock_the_extension_manager() + self.interface = flat_interface.FlatNetwork() + self.node = utils.create_test_node(self.context) + self.port = utils.create_test_port( + self.context, node_id=self.node.id, + internal_info={ + 'cleaning_vif_port_id': uuidutils.generate_uuid()}) + + @mock.patch.object(flat_interface, 'LOG') + def test_init_incorrect_cleaning_net(self, mock_log): + self.config(cleaning_network_uuid=None, group='neutron') + flat_interface.FlatNetwork() + self.assertTrue(mock_log.warning.called) + + @mock.patch.object(neutron, 'add_ports_to_network') + @mock.patch.object(neutron, 'rollback_ports') + def test_add_cleaning_network(self, rollback_mock, add_mock): + add_mock.return_value = {self.port.uuid: 'vif-port-id'} + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.add_cleaning_network(task) + rollback_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + add_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid, is_flat=True) + self.port.refresh() + self.assertEqual('vif-port-id', + self.port.internal_info['cleaning_vif_port_id']) + + @mock.patch.object(neutron, 'add_ports_to_network') + @mock.patch.object(neutron, 'rollback_ports') + def test_add_cleaning_network_no_cleaning_net_uuid(self, rollback_mock, + add_mock): + self.config(cleaning_network_uuid='abc', group='neutron') + with task_manager.acquire(self.context, self.node.id) as task: + self.assertRaises(exception.InvalidParameterValue, + self.interface.add_cleaning_network, task) + self.assertFalse(rollback_mock.called) + self.assertFalse(add_mock.called) + + @mock.patch.object(neutron, 'remove_ports_from_network') + def test_remove_cleaning_network(self, remove_mock): + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.remove_cleaning_network(task) + remove_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + self.port.refresh() + self.assertNotIn('cleaning_vif_port_id', self.port.internal_info) + + def test_unconfigure_tenant_networks(self): + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.unconfigure_tenant_networks(task) + self.port.refresh() + self.assertNotIn('vif_port_id', self.port.extra) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index bb362b7c4..78412e71f 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -28,6 +28,7 @@ import testtools from testtools import matchers from ironic.common import boot_devices +from ironic.common import dhcp_factory from ironic.common import exception from ironic.common import image_service from ironic.common import keystone @@ -1735,38 +1736,66 @@ class AgentMethodsTestCase(db_base.DbTestCase): self.assertEqual(True, task.node.driver_internal_info[ 'agent_continue_if_ata_erase_failed']) - @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports', - autospec=True) - @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.create_cleaning_ports', - autospec=True) - def _test_prepare_inband_cleaning_ports( - self, create_mock, delete_mock, return_vif_port_id=True): + @mock.patch.object(utils.LOG, 'warning', autospec=True) + @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True) + def _test_prepare_inband_cleaning_ports_out_of_tree( + self, dhcp_factory_mock, log_mock, return_vif_port_id=True): + self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + dhcp_provider = dhcp_factory_mock.return_value.provider + create = dhcp_provider.create_cleaning_ports + delete = dhcp_provider.delete_cleaning_ports if return_vif_port_id: - create_mock.return_value = {self.ports[0].uuid: 'vif-port-id'} + create.return_value = {self.ports[0].uuid: 'vif-port-id'} else: - create_mock.return_value = {} + create.return_value = {} with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.prepare_cleaning_ports(task) - create_mock.assert_called_once_with(mock.ANY, task) - delete_mock.assert_called_once_with(mock.ANY, task) + create.assert_called_once_with(task) + delete.assert_called_once_with(task) + self.assertTrue(log_mock.called) self.ports[0].refresh() self.assertEqual('vif-port-id', self.ports[0].internal_info['cleaning_vif_port_id']) - def test_prepare_inband_cleaning_ports(self): - self._test_prepare_inband_cleaning_ports() + def test_prepare_inband_cleaning_ports_out_of_tree(self): + self._test_prepare_inband_cleaning_ports_out_of_tree() - def test_prepare_inband_cleaning_ports_no_vif_port_id(self): + def test_prepare_inband_cleaning_ports_out_of_tree_no_vif_port_id(self): self.assertRaises( exception.NodeCleaningFailure, - self._test_prepare_inband_cleaning_ports, + self._test_prepare_inband_cleaning_ports_out_of_tree, return_vif_port_id=False) - @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports', - autospec=True) - def test_tear_down_inband_cleaning_ports(self, neutron_mock): + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_cleaning_network') + def test_prepare_inband_cleaning_ports_neutron(self, add_clean_net_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + utils.prepare_cleaning_ports(task) + add_clean_net_mock.assert_called_once_with(task) + + @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.' + 'add_cleaning_network') + @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True) + def test_prepare_inband_cleaning_ports_provider_does_not_create( + self, dhcp_factory_mock, add_clean_net_mock): + self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + dhcp_provider = dhcp_factory_mock.return_value.provider + del dhcp_provider.delete_cleaning_ports + del dhcp_provider.create_cleaning_ports + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + utils.prepare_cleaning_ports(task) + add_clean_net_mock.assert_called_once_with(task) + + @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True) + def test_tear_down_inband_cleaning_ports_out_of_tree(self, + dhcp_factory_mock): + self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + dhcp_provider = dhcp_factory_mock.return_value.provider + delete = dhcp_provider.delete_cleaning_ports internal_info = self.ports[0].internal_info internal_info['cleaning_vif_port_id'] = 'vif-port-id-1' self.ports[0].internal_info = internal_info @@ -1774,18 +1803,46 @@ class AgentMethodsTestCase(db_base.DbTestCase): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.tear_down_cleaning_ports(task) - neutron_mock.assert_called_once_with(mock.ANY, task) + delete.assert_called_once_with(task) self.ports[0].refresh() self.assertNotIn('cleaning_vif_port_id', self.ports[0].internal_info) self.assertNotIn('vif_port_id', self.ports[0].extra) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'remove_cleaning_network') + def test_tear_down_inband_cleaning_ports_neutron(self, rm_clean_net_mock): + extra_port = obj_utils.create_test_port( + self.context, node_id=self.node.id, address='10:00:00:00:00:01', + extra={'vif_port_id': 'vif-port'}, uuid=uuidutils.generate_uuid() + ) + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + utils.tear_down_cleaning_ports(task) + rm_clean_net_mock.assert_called_once_with(task) + extra_port.refresh() + self.assertNotIn('vif_port_id', extra_port.extra) + + @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.' + 'remove_cleaning_network') + @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True) + def test_tear_down_inband_cleaning_ports_provider_does_not_delete( + self, dhcp_factory_mock, rm_clean_net_mock): + self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + dhcp_provider = dhcp_factory_mock.return_value.provider + del dhcp_provider.delete_cleaning_ports + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + utils.tear_down_cleaning_ports(task) + rm_clean_net_mock.assert_called_once_with(task) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) @mock.patch.object(utils, 'build_agent_options', autospec=True) - @mock.patch.object(utils, 'prepare_cleaning_ports', autospec=True) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_cleaning_network') def _test_prepare_inband_cleaning( - self, prepare_cleaning_ports_mock, + self, add_cleaning_network_mock, build_options_mock, power_mock, prepare_ramdisk_mock, manage_boot=True): build_options_mock.return_value = {'a': 'b'} @@ -1794,7 +1851,7 @@ class AgentMethodsTestCase(db_base.DbTestCase): self.assertEqual( states.CLEANWAIT, utils.prepare_inband_cleaning(task, manage_boot=manage_boot)) - prepare_cleaning_ports_mock.assert_called_once_with(task) + add_cleaning_network_mock.assert_called_once_with(task) power_mock.assert_called_once_with(task, states.REBOOT) self.assertEqual(1, task.node.driver_internal_info[ 'agent_erase_devices_iterations']) @@ -1815,16 +1872,17 @@ class AgentMethodsTestCase(db_base.DbTestCase): self._test_prepare_inband_cleaning(manage_boot=False) @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True) - @mock.patch.object(utils, 'tear_down_cleaning_ports', autospec=True) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'remove_cleaning_network') @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) def _test_tear_down_inband_cleaning( - self, power_mock, tear_down_ports_mock, + self, power_mock, remove_cleaning_network_mock, clean_up_ramdisk_mock, manage_boot=True): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.tear_down_inband_cleaning(task, manage_boot=manage_boot) power_mock.assert_called_once_with(task, states.POWER_OFF) - tear_down_ports_mock.assert_called_once_with(task) + remove_cleaning_network_mock.assert_called_once_with(task) if manage_boot: clean_up_ramdisk_mock.assert_called_once_with( task.driver.boot, task) diff --git a/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml b/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml new file mode 100644 index 000000000..d77c5ade4 --- /dev/null +++ b/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml @@ -0,0 +1,29 @@ +--- +features: + - | + Added network interface. Introduced two network interface implementations: + ``flat``, which replicates the flat network behavior present previously and + ``noop`` when neutron is not used, which is basically a noop interface. + The network interface is used to switch network for node during + provisioning/cleaning. Added ``enabled_network_interfaces`` option in + DEFAULT config section. This option defines a list of enabled network + interfaces on the conductor. +deprecations: + - | + ``create_cleaning_ports`` and ``delete_cleaning_ports`` methods in DHCP + providers are deprecated and will be removed completely in the Ocata + release. The logic they are implementing should be moved to a custom + network interface's ``add_cleaning_network`` and + ``remove_cleaning_network`` methods respectively. After that, the methods + themselves should be removed from DHCP provider so that network interface + is used instead. ``flat`` network interface does not require + ``[neutron]cleaning_network_uuid`` for now so as not to break standalone + deployments, but it will be required in the Ocata release. +upgrade: + - | + ``[DEFAULT]default_network_interface`` configuration option is introduced, + with empty default value. If set, the specified interface will be used as + the network interface for nodes that don't have ``network_interface`` field + set. If it is not set, the network interface is determined by looking at + the ``[dhcp]dhcp_provider`` value. If it is ``neutron`` - ``flat`` network + interface is the default, ``noop`` otherwise. diff --git a/setup.cfg b/setup.cfg index d7e5196aa..4d1f319fd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -87,6 +87,10 @@ ironic.drivers = pxe_iscsi_cimc = ironic.drivers.pxe:PXEAndCIMCDriver pxe_agent_cimc = ironic.drivers.agent:AgentAndCIMCDriver +ironic.hardware.interfaces.network = + flat = ironic.drivers.modules.network.flat:FlatNetwork + noop = ironic.drivers.modules.network.noop:NoopNetwork + ironic.database.migration_backend = sqlalchemy = ironic.db.sqlalchemy.migration From 170ba4f29520b338f8b54b1225541db974b40a86 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 6 Jul 2016 14:42:31 +0100 Subject: [PATCH 063/166] Add and document the "rotational" root device hint This patch is adding a new root device hint called "rotational". This hint is used to identify whether a device is rotational or not making it easy to distinguish HDDs and SSDs when choosing which disk Ironic should deploy the image onto. Closes-Bug: #1599517 Depends-On: I270fe57df825929bdef7911b3a6757cf7163a5f1 Change-Id: Id630a0b9d02ed8e1bd674c32bef0d489849c3f29 --- doc/source/deploy/install-guide.rst | 3 +++ ironic/drivers/modules/deploy_utils.py | 9 ++++++++- .../unit/drivers/modules/test_deploy_utils.py | 16 +++++++++++----- ...device-hints-rotational-c21f02130394e1d4.yaml | 4 ++++ 4 files changed, 26 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/root-device-hints-rotational-c21f02130394e1d4.yaml diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index 6ded72df1..8cfdd85f8 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -1905,6 +1905,9 @@ deployment. The list of support hints is: * wwn (STRING): unique storage identifier * wwn_with_extension (STRING): unique storage identifier with the vendor extension appended * wwn_vendor_extension (STRING): unique vendor storage identifier +* rotational (BOOLEAN): whether it's a rotational device or not. This + hint makes it easier to distinguish HDDs (rotational) and SSDs (not + rotational) when choosing which disk Ironic should deploy the image onto. * name (STRING): the device name, e.g /dev/md0 diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index b37bec11a..2914463c5 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -104,7 +104,7 @@ LOG = logging.getLogger(__name__) VALID_ROOT_DEVICE_HINTS = set(('size', 'model', 'wwn', 'serial', 'vendor', 'wwn_with_extension', 'wwn_vendor_extension', - 'name')) + 'name', 'rotational')) SUPPORTED_CAPABILITIES = { 'boot_option': ('local', 'netboot'), @@ -710,6 +710,13 @@ def parse_root_device_hints(node): raise exception.InvalidParameterValue( _('Root device hint "size" is not an integer value.')) + if 'rotational' in root_device: + try: + strutils.bool_from_string(root_device['rotational'], strict=True) + except ValueError: + raise exception.InvalidParameterValue( + _('Root device hint "rotational" is not a boolean value.')) + hints = [] for key, value in sorted(root_device.items()): # NOTE(lucasagomes): We can't have spaces in the PXE config diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index bb362b7c4..ef09b252c 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1214,13 +1214,14 @@ class OtherFunctionTestCase(db_base.DbTestCase): def test_parse_root_device_hints(self): self.node.properties['root_device'] = { - 'wwn': 123456, 'model': 'foo-model', 'size': 123, + 'wwn': '123456', 'model': 'foo-model', 'size': 123, 'serial': 'foo-serial', 'vendor': 'foo-vendor', 'name': '/dev/sda', - 'wwn_with_extension': 123456111, 'wwn_vendor_extension': 111, + 'wwn_with_extension': '123456111', 'wwn_vendor_extension': '111', + 'rotational': True, } - expected = ('model=foo-model,name=/dev/sda,serial=foo-serial,size=123,' - 'vendor=foo-vendor,wwn=123456,wwn_vendor_extension=111,' - 'wwn_with_extension=123456111') + expected = ('model=foo-model,name=/dev/sda,rotational=True,' + 'serial=foo-serial,size=123,vendor=foo-vendor,wwn=123456,' + 'wwn_vendor_extension=111,wwn_with_extension=123456111') result = utils.parse_root_device_hints(self.node) self.assertEqual(expected, result) @@ -1245,6 +1246,11 @@ class OtherFunctionTestCase(db_base.DbTestCase): self.assertRaises(exception.InvalidParameterValue, utils.parse_root_device_hints, self.node) + def test_parse_root_device_hints_invalid_rotational(self): + self.node.properties['root_device'] = {'rotational': 'not-boolean'} + self.assertRaises(exception.InvalidParameterValue, + utils.parse_root_device_hints, self.node) + @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(task_manager.TaskManager, 'process_event', diff --git a/releasenotes/notes/root-device-hints-rotational-c21f02130394e1d4.yaml b/releasenotes/notes/root-device-hints-rotational-c21f02130394e1d4.yaml new file mode 100644 index 000000000..000fce109 --- /dev/null +++ b/releasenotes/notes/root-device-hints-rotational-c21f02130394e1d4.yaml @@ -0,0 +1,4 @@ +--- +features: + - Extend the root device hints to identify whether a disk is rotational + or not. From f14661001c2de77d3a9789b687143849f8033196 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 8 Jul 2016 15:34:05 -0700 Subject: [PATCH 064/166] Fix docstring warnings Fix existing docstring warnings in Ironic doc builds. This is to make sure upcoming pbr release to fix the 'warnerrors' issue[0] doesn't break our doc builds. [0] http://lists.openstack.org/pipermail/openstack-dev/2016-June/097849.html Change-Id: Idd8db05bacb26833ff3ec3a2d8bf48d1fccee788 --- ironic/common/fsm.py | 2 +- ironic/conductor/manager.py | 2 +- ironic/dhcp/neutron.py | 33 ++++++++++---------- ironic/drivers/modules/agent_client.py | 4 +-- ironic/drivers/modules/drac/management.py | 7 ++--- ironic/drivers/modules/irmc/inspect.py | 38 ++++++++++++----------- ironic/drivers/modules/oneview/power.py | 10 +++--- setup.cfg | 1 + 8 files changed, 50 insertions(+), 47 deletions(-) diff --git a/ironic/common/fsm.py b/ironic/common/fsm.py index 52b9c4d92..9bd103a26 100644 --- a/ironic/common/fsm.py +++ b/ironic/common/fsm.py @@ -70,7 +70,7 @@ class FSM(machines.FiniteMachine): :param state: the state of interest :raises: InvalidState if the state is invalid - :returns True if it is a stable state; False otherwise + :returns: True if it is a stable state; False otherwise """ try: return self._states[state]['stable'] diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 5b277b887..4f85fa879 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -2146,7 +2146,7 @@ class ConductorManager(base_manager.BaseConductorManager): :param args: The positional arguments to the action method :param kwargs: The keyword arguments to the action method :returns: The result of the action method, which may (or may not) - be an instance of the implementing VersionedObject class. + be an instance of the implementing VersionedObject class. """ objclass = objects_base.IronicObject.obj_class_from_name( objname, object_versions[objname]) diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 25f833ff0..b7972538b 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -90,25 +90,26 @@ class NeutronDHCPApi(base.BaseDHCP): :param task: A TaskManager instance. :param options: this will be a list of dicts, e.g. - :: + :: - [{'opt_name': 'bootfile-name', - 'opt_value': 'pxelinux.0'}, - {'opt_name': 'server-ip-address', - 'opt_value': '123.123.123.456'}, - {'opt_name': 'tftp-server', - 'opt_value': '123.123.123.123'}] + [{'opt_name': 'bootfile-name', + 'opt_value': 'pxelinux.0'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}] :param vifs: a dict of Neutron port/portgroup dicts - to update DHCP options on. The port/portgroup dict key - should be Ironic port UUIDs, and the values should be - Neutron port UUIDs, e.g. + to update DHCP options on. The port/portgroup dict + key should be Ironic port UUIDs, and the values + should be Neutron port UUIDs, e.g. - :: + :: - {'ports': {'port.uuid': vif.id}, - 'portgroups': {'portgroup.uuid': vif.id}} - If the value is None, will get the list of ports/portgroups - from the Ironic port/portgroup objects. + {'ports': {'port.uuid': vif.id}, + 'portgroups': {'portgroup.uuid': vif.id}} + If the value is None, will get the list of + ports/portgroups from the Ironic port/portgroup + objects. """ if vifs is None: vifs = network.get_node_vif_ids(task) @@ -200,7 +201,7 @@ class NeutronDHCPApi(base.BaseDHCP): :param p_obj: Ironic port or portgroup object. :param client: Neutron client instance. :returns: List of Neutron vif ip address associated with - Node's port/portgroup. + Node's port/portgroup. :raises: FailedToGetIPAddressOnPort :raises: InvalidIPv4Address """ diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py index ae8da4c9a..be31dc45b 100644 --- a/ironic/drivers/modules/agent_client.py +++ b/ironic/drivers/modules/agent_client.py @@ -139,8 +139,8 @@ class AgentClient(object): :param iqn: iSCSI target IQN :param portal_port: iSCSI portal port :param wipe_disk_metadata: True if the agent should wipe first the - disk magic strings like the partition table, RAID or filesystem - signature. + disk magic strings like the partition + table, RAID or filesystem signature. """ params = {'iqn': iqn} # This is to workaround passing default values to an old ramdisk diff --git a/ironic/drivers/modules/drac/management.py b/ironic/drivers/modules/drac/management.py index ae0ea6c22..1799f1a07 100644 --- a/ironic/drivers/modules/drac/management.py +++ b/ironic/drivers/modules/drac/management.py @@ -156,11 +156,11 @@ class DracManagement(base.ManagementInterface): :param task: a TaskManager instance containing the node to act on. :raises: DracOperationError on an error from python-dracclient. :returns: a dictionary containing: + :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. - :persistent: whether the boot device will persist to all - future boots or not, None if it is unknown. - + :persistent: whether the boot device will persist to all future + boots or not, None if it is unknown. """ node = task.node @@ -214,6 +214,5 @@ class DracManagement(base.ManagementInterface): :raises: FailedToParseSensorData when parsing sensor data fails. :returns: returns a consistent format dict of sensor data grouped by sensor type, which can be processed by Ceilometer. - """ raise NotImplementedError() diff --git a/ironic/drivers/modules/irmc/inspect.py b/ironic/drivers/modules/irmc/inspect.py index cc192fd97..a700e38ae 100644 --- a/ironic/drivers/modules/irmc/inspect.py +++ b/ironic/drivers/modules/irmc/inspect.py @@ -38,27 +38,28 @@ SC2.mib: sc2UnitNodeClass returns NIC type. sc2UnitNodeClass OBJECT-TYPE SYNTAX INTEGER { - unknown(1), - primary(2), - secondary(3), - management-blade(4), - secondary-remote(5), - secondary-remote-backup(6), - baseboard-controller(7) + unknown(1), + primary(2), + secondary(3), + management-blade(4), + secondary-remote(5), + secondary-remote-backup(6), + baseboard-controller(7) } ACCESS read-only STATUS mandatory DESCRIPTION "Management node class: - primary: local operating system interface - secondary: local management controller LAN interface - management-blade: management blade interface (in a blade server - chassis) - secondary-remote: remote management controller (in an RSB - concentrator environment) - secondary-remote-backup: backup remote management controller - baseboard-controller: local baseboard management controller (BMC)" + primary: local operating system interface + secondary: local management controller LAN interface + management-blade: management blade interface (in a blade server + chassis) + secondary-remote: remote management controller (in an RSB + concentrator environment) + secondary-remote-backup: backup remote management controller + baseboard-controller: local baseboard management controller (BMC)" ::= { sc2ManagementNodes 8 } """ + NODE_CLASS_OID_VALUE = { 'unknown': 1, 'primary': 2, @@ -81,6 +82,7 @@ sc2UnitNodeMacAddress OBJECT-TYPE DESCRIPTION "Management node hardware (MAC) address" ::= { sc2ManagementNodes 9 } """ + MAC_ADDRESS_OID = '1.3.6.1.4.1.231.2.10.2.2.10.3.1.1.9.1' @@ -111,10 +113,10 @@ def _inspect_hardware(node): :param node: node object. :raises: HardwareInspectionFailure, if unable to get essential - hardware properties. + hardware properties. :returns: a pair of dictionary and list, the dictionary contains - keys as in IRMCInspect.ESSENTIAL_PROPERTIES and its inspected - values, the list contains mac addresses. + keys as in IRMCInspect.ESSENTIAL_PROPERTIES and its inspected + values, the list contains mac addresses. """ try: report = irmc_common.get_irmc_report(node) diff --git a/ironic/drivers/modules/oneview/power.py b/ironic/drivers/modules/oneview/power.py index 917c75206..f53fbfdbf 100644 --- a/ironic/drivers/modules/oneview/power.py +++ b/ironic/drivers/modules/oneview/power.py @@ -50,7 +50,7 @@ class OneViewPower(base.PowerInterface): :param task: a task from TaskManager. :raises: MissingParameterValue if a required parameter is missing. :raises: InvalidParameterValue if parameters set are inconsistent with - resources in OneView + resources in OneView """ common.verify_node_info(task.node) @@ -65,9 +65,9 @@ class OneViewPower(base.PowerInterface): :param task: a TaskManager instance. :param node: The Node. :returns: one of :mod:`ironic.common.states` POWER_OFF, - POWER_ON or ERROR. + POWER_ON or ERROR. :raises: OneViewError if fails to retrieve power state of OneView - resource + resource """ oneview_info = common.get_oneview_info(task.node) @@ -90,7 +90,7 @@ class OneViewPower(base.PowerInterface): :param task: a TaskManager instance. :param node: The Node. :param power_state: The desired power state POWER_ON, POWER_OFF or - REBOOT from :mod:`ironic.common.states`. + REBOOT from :mod:`ironic.common.states`. :raises: InvalidParameterValue if an invalid power state was specified. :raises: PowerStateFailure if the power couldn't be set to power_state. :raises: OneViewError if OneView fails setting the power state. @@ -127,7 +127,7 @@ class OneViewPower(base.PowerInterface): :param task: a TaskManager instance. :param node: The Node. :raises: PowerStateFailure if the final state of the node is not - POWER_ON. + POWER_ON. """ self.set_power_state(task, states.REBOOT) diff --git a/setup.cfg b/setup.cfg index d7e5196aa..3406b8d5a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -98,6 +98,7 @@ autodoc_index_modules = True autodoc_exclude_modules = ironic.db.sqlalchemy.alembic.env ironic.db.sqlalchemy.alembic.versions.* +warnerrors = True [build_sphinx] all_files = 1 From ab97fa0f1f9ed6acb78c1ceadec8b333eac73254 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 17 May 2016 14:00:00 +0300 Subject: [PATCH 065/166] Add 'neutron' network interface This patch adds a 'neutron' network interface. This interface supports separate networks for provisioning and for cleaning of nodes. Partial-bug: #1526403 Co-Authored-By: Vladyslav Drok Change-Id: Ia3442ab3536a1a8d8839b24dbfc640b818450350 --- etc/ironic/ironic.conf.sample | 12 +- ironic/common/network.py | 8 +- ironic/common/neutron.py | 12 +- ironic/dhcp/neutron.py | 7 +- ironic/drivers/modules/deploy_utils.py | 4 + ironic/drivers/modules/network/neutron.py | 212 ++++++++++++++++ ironic/tests/base.py | 4 +- .../tests/unit/common/test_driver_factory.py | 2 +- ironic/tests/unit/common/test_network.py | 16 +- ironic/tests/unit/dhcp/test_neutron.py | 43 ++-- .../drivers/modules/network/test_neutron.py | 231 ++++++++++++++++++ .../unit/drivers/modules/test_deploy_utils.py | 11 + ...on-network-interface-aa9e7e65011ab8cd.yaml | 14 ++ setup.cfg | 1 + 14 files changed, 536 insertions(+), 41 deletions(-) create mode 100644 ironic/drivers/modules/network/neutron.py create mode 100644 ironic/tests/unit/drivers/modules/network/test_neutron.py create mode 100644 releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 0f092b718..796358e9f 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1518,12 +1518,16 @@ #auth_strategy = keystone # Neutron network UUID for the ramdisk to be booted into for -# cleaning nodes. Required if cleaning (either automatic or -# manual) is run for flat network interface, and, if DHCP -# providers are still being used, for neutron DHCP provider. -# (string value) +# cleaning nodes. Required for "neutron" network interface. It +# is also required if cleaning nodes when using "flat" network +# interface or "neutron" DHCP provider. (string value) #cleaning_network_uuid = +# Neutron network UUID for the ramdisk to be booted into for +# provisioning nodes. Required for "neutron" network +# interface. (string value) +#provisioning_network_uuid = + [oneview] diff --git a/ironic/common/network.py b/ironic/common/network.py index 301384bf9..78aabd58e 100644 --- a/ironic/common/network.py +++ b/ironic/common/network.py @@ -32,15 +32,19 @@ def get_node_vif_ids(task): portgroup_vifs = {} port_vifs = {} for portgroup in task.portgroups: - # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist - # when we're in deployment/tenant network + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant + # network vif = (portgroup.internal_info.get('cleaning_vif_port_id') or + portgroup.internal_info.get('provisioning_vif_port_id') or portgroup.extra.get('vif_port_id')) if vif: portgroup_vifs[portgroup.uuid] = vif vifs['portgroups'] = portgroup_vifs for port in task.ports: vif = (port.internal_info.get('cleaning_vif_port_id') or + port.internal_info.get('provisioning_vif_port_id') or port.extra.get('vif_port_id')) if vif: port_vifs[port.uuid] = vif diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index fc200207b..780d4d111 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -51,10 +51,14 @@ neutron_opts = [ 'should only be used for testing.')), cfg.StrOpt('cleaning_network_uuid', help=_('Neutron network UUID for the ramdisk to be booted ' - 'into for cleaning nodes. Required if cleaning (either ' - 'automatic or manual) is run for flat network interface,' - ' and, if DHCP providers are still being used, for ' - 'neutron DHCP provider.')) + 'into for cleaning nodes. Required for "neutron" ' + 'network interface. It is also required if cleaning ' + 'nodes when using "flat" network interface or "neutron" ' + 'DHCP provider.')), + cfg.StrOpt('provisioning_network_uuid', + help=_('Neutron network UUID for the ramdisk to be booted ' + 'into for provisioning nodes. Required for "neutron" ' + 'network interface.')), ] CONF.register_opts(neutron_opts, group='neutron') diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 084300d1a..a5027e05f 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -208,9 +208,12 @@ class NeutronDHCPApi(base.BaseDHCP): :raises: InvalidIPv4Address """ - # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist - # when we're in deployment/tenant network + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant + # network vif = (p_obj.internal_info.get('cleaning_vif_port_id') or + p_obj.internal_info.get('provisioning_vif_port_id') or p_obj.extra.get('vif_port_id')) if not vif: obj_name = 'portgroup' diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 2a23b39d4..87aaf65de 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -518,8 +518,12 @@ def get_single_nic_with_vif_port_id(task): :returns: MAC address of the port connected to deployment network. None if it cannot find any port with vif id. """ + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant network for port in task.ports: if (port.internal_info.get('cleaning_vif_port_id') or + port.internal_info.get('provisioning_vif_port_id') or port.extra.get('vif_port_id')): return port.address diff --git a/ironic/drivers/modules/network/neutron.py b/ironic/drivers/modules/network/neutron.py new file mode 100644 index 000000000..5b8daaf5d --- /dev/null +++ b/ironic/drivers/modules/network/neutron.py @@ -0,0 +1,212 @@ +# Copyright 2015 Rackspace, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutronclient.common import exceptions as neutron_exceptions +from oslo_config import cfg +from oslo_log import log +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common.i18n import _LI +from ironic.common.i18n import _LW +from ironic.common import neutron +from ironic.drivers import base +from ironic import objects + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class NeutronNetwork(base.NetworkInterface): + """Neutron v2 network interface""" + + def __init__(self): + failures = [] + cleaning_net = CONF.neutron.cleaning_network_uuid + if not uuidutils.is_uuid_like(cleaning_net): + failures.append('cleaning_network_uuid=%s' % cleaning_net) + + provisioning_net = CONF.neutron.provisioning_network_uuid + if not uuidutils.is_uuid_like(provisioning_net): + failures.append('provisioning_network_uuid=%s' % provisioning_net) + + if failures: + raise exception.DriverLoadError( + driver=self.__class__.__name__, + reason=(_('The following [neutron] group configuration ' + 'options are incorrect, they must be valid UUIDs: ' + '%s') % ', '.join(failures))) + + def add_provisioning_network(self, task): + """Add the provisioning network to a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Adding provisioning network to node %s'), + task.node.uuid) + vifs = neutron.add_ports_to_network( + task, CONF.neutron.provisioning_network_uuid) + for port in task.ports: + if port.uuid in vifs: + internal_info = port.internal_info + internal_info['provisioning_vif_port_id'] = vifs[port.uuid] + port.internal_info = internal_info + port.save() + + def remove_provisioning_network(self, task): + """Remove the provisioning network from a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Removing provisioning network from node %s'), + task.node.uuid) + neutron.remove_ports_from_network( + task, CONF.neutron.provisioning_network_uuid) + for port in task.ports: + if 'provisioning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['provisioning_vif_port_id'] + port.internal_info = internal_info + port.save() + + def add_cleaning_network(self, task): + """Create neutron ports for each port on task.node to boot the ramdisk. + + :param task: a TaskManager instance. + :raises: NetworkError + :returns: a dictionary in the form {port.uuid: neutron_port['id']} + """ + # If we have left over ports from a previous cleaning, remove them + neutron.rollback_ports(task, CONF.neutron.cleaning_network_uuid) + LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid) + vifs = neutron.add_ports_to_network(task, + CONF.neutron.cleaning_network_uuid) + for port in task.ports: + if port.uuid in vifs: + internal_info = port.internal_info + internal_info['cleaning_vif_port_id'] = vifs[port.uuid] + port.internal_info = internal_info + port.save() + return vifs + + def remove_cleaning_network(self, task): + """Deletes the neutron port created for booting the ramdisk. + + :param task: a TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Removing cleaning network from node %s'), + task.node.uuid) + neutron.remove_ports_from_network( + task, CONF.neutron.cleaning_network_uuid) + for port in task.ports: + if 'cleaning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['cleaning_vif_port_id'] + port.internal_info = internal_info + port.save() + + def configure_tenant_networks(self, task): + """Configure tenant networks for a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + node = task.node + ports = task.ports + LOG.info(_LI('Mapping instance ports to %s'), node.uuid) + + # TODO(russell_h): this is based on the broken assumption that the + # number of Neutron ports will match the number of physical ports. + # Instead, we should probably list ports for this instance in + # Neutron and update all of those with the appropriate portmap. + if not ports: + msg = _("No ports are associated with node %s") % node.uuid + LOG.error(msg) + raise exception.NetworkError(msg) + ports = [p for p in ports if not p.portgroup_id] + portgroups = task.portgroups + + portmap = neutron.get_node_portmap(task) + + client = neutron.get_client(task.context.auth_token) + for port_like_obj in ports + portgroups: + vif_port_id = port_like_obj.extra.get('vif_port_id') + + if not vif_port_id: + LOG.warning( + _LW('%(port_like_object)s %(pobj_uuid)s in node %(node)s ' + 'has no vif_port_id value in extra field.'), + {'port_like_object': port_like_obj.__class__.__name__, + 'pobj_uuid': port_like_obj.uuid, 'node': node.uuid}) + continue + + LOG.debug('Mapping tenant port %(vif_port_id)s to node ' + '%(node_id)s', + {'vif_port_id': vif_port_id, 'node_id': node.uuid}) + local_link_info = [] + if isinstance(port_like_obj, objects.Portgroup): + pg_ports = [p for p in task.ports + if p.portgroup_id == port_like_obj.id] + for port in pg_ports: + local_link_info.append(portmap[port.uuid]) + else: + # We iterate only on ports or portgroups, no need to check + # that it is a port + local_link_info.append(portmap[port_like_obj.uuid]) + body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': node.instance_uuid or node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': node.uuid, + 'binding:profile': { + 'local_link_information': local_link_info, + }, + } + } + + try: + client.update_port(vif_port_id, body) + except neutron_exceptions.ConnectionFailed as e: + msg = (_('Could not add public network VIF %(vif)s ' + 'to node %(node)s, possible network issue. %(exc)s') % + {'vif': vif_port_id, + 'node': node.uuid, + 'exc': e}) + LOG.error(msg) + raise exception.NetworkError(msg) + + def unconfigure_tenant_networks(self, task): + """Unconfigure tenant networks for a node. + + Even though nova takes care of port removal from tenant network, we + remove it here/now to avoid the possibility of the ironic port being + bound to the tenant and cleaning networks at the same time. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + node = task.node + LOG.info(_LI('Unmapping instance ports from node %s'), node.uuid) + params = {'device_id': node.instance_uuid or node.uuid} + + neutron.remove_neutron_ports(task, params) diff --git a/ironic/tests/base.py b/ironic/tests/base.py index b4b7b7175..8a205ba63 100644 --- a/ironic/tests/base.py +++ b/ironic/tests/base.py @@ -119,7 +119,9 @@ class TestCase(testtools.TestCase): tempdir=tempfile.tempdir) self.config(cleaning_network_uuid=uuidutils.generate_uuid(), group='neutron') - self.config(enabled_network_interfaces=['flat', 'noop']) + self.config(provisioning_network_uuid=uuidutils.generate_uuid(), + group='neutron') + self.config(enabled_network_interfaces=['flat', 'noop', 'neutron']) self.set_defaults(host='fake-mini', debug=True) self.set_defaults(connection="sqlite://", diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py index a8b286140..e83d81555 100644 --- a/ironic/tests/unit/common/test_driver_factory.py +++ b/ironic/tests/unit/common/test_driver_factory.py @@ -110,7 +110,7 @@ class NetworkInterfaceFactoryTestCase(db_base.DbTestCase): self.assertEqual(extension_mgr['flat'].obj, task.driver.network) self.assertEqual('ironic.hardware.interfaces.network', factory._entrypoint_name) - self.assertEqual(['flat', 'noop'], + self.assertEqual(['flat', 'neutron', 'noop'], sorted(factory._enabled_driver_list)) def test_build_driver_for_task_default_is_none(self): diff --git a/ironic/tests/unit/common/test_network.py b/ironic/tests/unit/common/test_network.py index e2240a993..a37267a06 100644 --- a/ironic/tests/unit/common/test_network.py +++ b/ironic/tests/unit/common/test_network.py @@ -95,15 +95,21 @@ class TestNetwork(db_base.DbTestCase): result = network.get_node_vif_ids(task) self.assertEqual(expected, result) - def test_get_node_vif_ids_during_cleaning(self): + def _test_get_node_vif_ids_multitenancy(self, int_info_key): port = db_utils.create_test_port( node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', - internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + internal_info={int_info_key: 'test-vif-A'}) portgroup = db_utils.create_test_portgroup( node_id=self.node.id, address='dd:ee:ff:aa:bb:cc', - internal_info={'cleaning_vif_port_id': 'test-vif-B'}) - expected = {'portgroups': {portgroup.uuid: 'test-vif-B'}, - 'ports': {port.uuid: 'test-vif-A'}} + internal_info={int_info_key: 'test-vif-B'}) + expected = {'ports': {port.uuid: 'test-vif-A'}, + 'portgroups': {portgroup.uuid: 'test-vif-B'}} with task_manager.acquire(self.context, self.node.uuid) as task: result = network.get_node_vif_ids(task) self.assertEqual(expected, result) + + def test_get_node_vif_ids_during_cleaning(self): + self._test_get_node_vif_ids_multitenancy('cleaning_vif_port_id') + + def test_get_node_vif_ids_during_provisioning(self): + self._test_get_node_vif_ids_multitenancy('provisioning_vif_port_id') diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index f93476813..ca93feb66 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -322,31 +322,21 @@ class TestNeutron(db_base.DbTestCase): fake_client.show_port.assert_called_once_with(port_id) @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') - def test__get_port_ip_address(self, mock_gfia): - expected = "192.168.1.3" - port = object_utils.create_test_port(self.context, - node_id=self.node.id, - address='aa:bb:cc:dd:ee:ff', - uuid=uuidutils.generate_uuid(), - extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') - mock_gfia.return_value = expected - with task_manager.acquire(self.context, - self.node.uuid) as task: - api = dhcp_factory.DHCPFactory().provider - result = api._get_port_ip_address(task, port, - mock.sentinel.client) - self.assertEqual(expected, result) - mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) - - @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') - def test__get_port_ip_address_cleaning(self, mock_gfia): + def _test__get_port_ip_address(self, mock_gfia, network): expected = "192.168.1.3" + fake_vif = 'test-vif-%s' % network port = object_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), - internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + extra={'vif_port_id': fake_vif} if network == 'tenant' else {}, + internal_info={ + 'cleaning_vif_port_id': (fake_vif if network == 'cleaning' + else None), + 'provisioning_vif_port_id': (fake_vif + if network == 'provisioning' + else None), + } + ) mock_gfia.return_value = expected with task_manager.acquire(self.context, self.node.uuid) as task: @@ -354,7 +344,16 @@ class TestNeutron(db_base.DbTestCase): result = api._get_port_ip_address(task, port, mock.sentinel.client) self.assertEqual(expected, result) - mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) + mock_gfia.assert_called_once_with(fake_vif, mock.sentinel.client) + + def test__get_port_ip_address_tenant(self): + self._test__get_port_ip_address(network='tenant') + + def test__get_port_ip_address_cleaning(self): + self._test__get_port_ip_address(network='cleaning') + + def test__get_port_ip_address_provisioning(self): + self._test__get_port_ip_address(network='provisioning') @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') def test__get_port_ip_address_for_portgroup(self, mock_gfia): diff --git a/ironic/tests/unit/drivers/modules/network/test_neutron.py b/ironic/tests/unit/drivers/modules/network/test_neutron.py new file mode 100644 index 000000000..08d89c24d --- /dev/null +++ b/ironic/tests/unit/drivers/modules/network/test_neutron.py @@ -0,0 +1,231 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from neutronclient.common import exceptions as neutron_exceptions +from oslo_config import cfg +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common import neutron as neutron_common +from ironic.conductor import task_manager +from ironic.drivers.modules.network import neutron +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils + +CONF = cfg.CONF + + +class NeutronInterfaceTestCase(db_base.DbTestCase): + + def setUp(self): + super(NeutronInterfaceTestCase, self).setUp() + self.config(enabled_drivers=['fake']) + mgr_utils.mock_the_extension_manager() + self.interface = neutron.NeutronNetwork() + self.node = utils.create_test_node(self.context, + network_interface='neutron') + self.port = utils.create_test_port( + self.context, node_id=self.node.id, + address='52:54:00:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()}) + self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00', + 'mac_address': '52:54:00:cf:2d:32'} + + def test_init_incorrect_provisioning_net(self): + self.config(provisioning_network_uuid=None, group='neutron') + self.assertRaises(exception.DriverLoadError, neutron.NeutronNetwork) + self.config(provisioning_network_uuid=uuidutils.generate_uuid(), + group='neutron') + self.config(cleaning_network_uuid='asdf', group='neutron') + self.assertRaises(exception.DriverLoadError, neutron.NeutronNetwork) + + @mock.patch.object(neutron_common, 'add_ports_to_network') + def test_add_provisioning_network(self, add_ports_mock): + add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.add_provisioning_network(task) + add_ports_mock.assert_called_once_with( + task, CONF.neutron.provisioning_network_uuid) + self.port.refresh() + self.assertEqual(self.neutron_port['id'], + self.port.internal_info['provisioning_vif_port_id']) + + @mock.patch.object(neutron_common, 'remove_ports_from_network') + def test_remove_provisioning_network(self, remove_ports_mock): + self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.remove_provisioning_network(task) + remove_ports_mock.assert_called_once_with( + task, CONF.neutron.provisioning_network_uuid) + self.port.refresh() + self.assertNotIn('provisioning_vif_port_id', self.port.internal_info) + + @mock.patch.object(neutron_common, 'rollback_ports') + @mock.patch.object(neutron_common, 'add_ports_to_network') + def test_add_cleaning_network(self, add_ports_mock, rollback_mock): + add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.id) as task: + res = self.interface.add_cleaning_network(task) + rollback_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + self.assertEqual(res, add_ports_mock.return_value) + self.port.refresh() + self.assertEqual(self.neutron_port['id'], + self.port.internal_info['cleaning_vif_port_id']) + + @mock.patch.object(neutron_common, 'remove_ports_from_network') + def test_remove_cleaning_network(self, remove_ports_mock): + self.port.internal_info = {'cleaning_vif_port_id': 'vif-port-id'} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.remove_cleaning_network(task) + remove_ports_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + self.port.refresh() + self.assertNotIn('cleaning_vif_port_id', self.port.internal_info) + + @mock.patch.object(neutron_common, 'remove_neutron_ports') + def test_unconfigure_tenant_networks(self, remove_ports_mock): + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.unconfigure_tenant_networks(task) + remove_ports_mock.assert_called_once_with( + task, {'device_id': task.node.uuid}) + + def test_configure_tenant_networks_no_ports_for_node(self): + n = utils.create_test_node(self.context, network_interface='neutron', + uuid=uuidutils.generate_uuid()) + with task_manager.acquire(self.context, n.id) as task: + self.assertRaisesRegexp( + exception.NetworkError, 'No ports are associated', + self.interface.configure_tenant_networks, task) + + @mock.patch.object(neutron_common, 'get_client') + @mock.patch.object(neutron, 'LOG') + def test_configure_tenant_networks_no_vif_id(self, log_mock, client_mock): + self.port.extra = {} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + self.assertIn('no vif_port_id value in extra', + log_mock.warning.call_args[0][0]) + + @mock.patch.object(neutron_common, 'get_client') + def test_configure_tenant_networks_update_fail(self, client_mock): + client = client_mock.return_value + client.update_port.side_effect = neutron_exceptions.ConnectionFailed( + reason='meow') + with task_manager.acquire(self.context, self.node.id) as task: + self.assertRaisesRegexp( + exception.NetworkError, 'Could not add', + self.interface.configure_tenant_networks, task) + client_mock.assert_called_once_with(task.context.auth_token) + + @mock.patch.object(neutron_common, 'get_client') + def _test_configure_tenant_networks(self, client_mock): + upd_mock = mock.Mock() + client_mock.return_value.update_port = upd_mock + second_port = utils.create_test_port( + self.context, node_id=self.node.id, address='52:54:00:cf:2d:33', + extra={'vif_port_id': uuidutils.generate_uuid()}, + uuid=uuidutils.generate_uuid(), + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/1', + 'switch_info': 'switch2'} + ) + expected_body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': self.node.instance_uuid or self.node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': self.node.uuid, + } + } + port1_body = copy.deepcopy(expected_body) + port1_body['port']['binding:profile'] = { + 'local_link_information': [self.port.local_link_connection] + } + port2_body = copy.deepcopy(expected_body) + port2_body['port']['binding:profile'] = { + 'local_link_information': [second_port.local_link_connection] + } + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + upd_mock.assert_has_calls( + [mock.call(self.port.extra['vif_port_id'], port1_body), + mock.call(second_port.extra['vif_port_id'], port2_body)], + any_order=True + ) + + def test_configure_tenant_networks(self): + self.node.instance_uuid = uuidutils.generate_uuid() + self.node.save() + self._test_configure_tenant_networks() + + def test_configure_tenant_networks_no_instance_uuid(self): + self._test_configure_tenant_networks() + + @mock.patch.object(neutron_common, 'get_client') + def test_configure_tenant_networks_with_portgroups(self, client_mock): + pg = utils.create_test_portgroup( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()}) + port1 = utils.create_test_port( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:33', + uuid=uuidutils.generate_uuid(), + portgroup_id=pg.id, + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/1', + 'switch_info': 'switch2'} + ) + port2 = utils.create_test_port( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:34', + uuid=uuidutils.generate_uuid(), + portgroup_id=pg.id, + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/2', + 'switch_info': 'switch2'} + ) + upd_mock = mock.Mock() + client_mock.return_value.update_port = upd_mock + expected_body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': self.node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': self.node.uuid, + } + } + call1_body = copy.deepcopy(expected_body) + call1_body['port']['binding:profile'] = { + 'local_link_information': [self.port.local_link_connection] + } + call2_body = copy.deepcopy(expected_body) + call2_body['port']['binding:profile'] = { + 'local_link_information': [port1.local_link_connection, + port2.local_link_connection] + } + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + upd_mock.assert_has_calls( + [mock.call(self.port.extra['vif_port_id'], call1_body), + mock.call(pg.extra['vif_port_id'], call2_body)] + ) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 78412e71f..7cbafdbb9 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1406,6 +1406,17 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) + def test_get_single_nic_with_provisioning_vif_port_id(self): + obj_utils.create_test_port( + self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', + uuid=uuidutils.generate_uuid(), + internal_info={'provisioning_vif_port_id': 'test-vif-A'}, + driver='iscsi_ilo') + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + address = utils.get_single_nic_with_vif_port_id(task) + self.assertEqual('aa:bb:cc:dd:ee:ff', address) + class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase): diff --git a/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml b/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml new file mode 100644 index 000000000..d8970fd73 --- /dev/null +++ b/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml @@ -0,0 +1,14 @@ +--- +features: + - Added ``neutron`` network interface. This interface allows to provision + and/or clean node in separate networks. A new config option + ``[neutron]provisioning_network_uuid`` has been added. This option + specifies provision network UUID. +upgrade: + - | + If ``neutron`` network interface is specified in + ``[DEFAULT]enabled_network_interfaces``, + ``[neutron]provisioning_network_uuid`` and + ``[neutron]cleaning_network_uuid`` configuration options are required. If + any of them is not specified, the ironic-conductor service will fail to + start. diff --git a/setup.cfg b/setup.cfg index 4d1f319fd..becd3df3e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -90,6 +90,7 @@ ironic.drivers = ironic.hardware.interfaces.network = flat = ironic.drivers.modules.network.flat:FlatNetwork noop = ironic.drivers.modules.network.noop:NoopNetwork + neutron = ironic.drivers.modules.network.neutron:NeutronNetwork ironic.database.migration_backend = sqlalchemy = ironic.db.sqlalchemy.migration From 5041703401ded33fc0b54409d60667900a319403 Mon Sep 17 00:00:00 2001 From: Om Kumar Date: Fri, 14 Aug 2015 22:56:24 +0530 Subject: [PATCH 066/166] Update the deploy drivers with network flipping logic This patch consumes the plumbing work for network interfaces and thus provides support for tenant network isolation for baremetal. Partial-bug: #1526403 Co-Authored-By: Vasyl Saienko Co-Authored-By: Vladyslav Drok Co-Authored-By: Sivaramakrishna Garimella Change-Id: I2f0a508973ff2f674c16f91d5dc440f9e8e667bf --- ironic/drivers/base.py | 4 +- ironic/drivers/modules/agent.py | 22 +++++++ ironic/drivers/modules/agent_base_vendor.py | 7 ++- ironic/drivers/modules/iscsi_deploy.py | 20 ++++++ .../tests/unit/drivers/modules/test_agent.py | 26 +++++--- .../drivers/modules/test_agent_base_vendor.py | 61 ++++++++++++++----- .../unit/drivers/modules/test_iscsi_deploy.py | 27 +++++--- 7 files changed, 131 insertions(+), 36 deletions(-) diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py index 742aa5a12..b59c82e14 100644 --- a/ironic/drivers/base.py +++ b/ironic/drivers/base.py @@ -299,9 +299,7 @@ class DeployInterface(BaseInterface): this method should be implemented by the driver. If implemented, this method must be idempotent. It may be called - multiple times for the same node on the same conductor, and it may be - called by multiple conductors in parallel. Therefore, it must not - require an exclusive lock. + multiple times for the same node on the same conductor. This method is called before `deploy`. diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 79b19d5bd..2cf56809f 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -295,8 +295,16 @@ class AgentDeploy(base.DeployInterface): :param task: a TaskManager instance. :returns: status of the deploy. One of ironic.common.states. + :raises: NetworkError if the cleaning ports cannot be removed. + :raises: InvalidParameterValue when the wrong power state is specified + or the wrong driver info is specified for power management. + :raises: other exceptions by the node's power driver if something + wrong occurred during the power action. """ manager_utils.node_power_action(task, states.POWER_OFF) + + task.driver.network.unconfigure_tenant_networks(task) + return states.DELETED @task_manager.require_exclusive_lock @@ -304,11 +312,25 @@ class AgentDeploy(base.DeployInterface): """Prepare the deployment environment for this node. :param task: a TaskManager instance. + :raises: NetworkError: if the previous cleaning ports cannot be removed + or if new cleaning ports cannot be created. + :raises: InvalidParameterValue when the wrong power state is specified + or the wrong driver info is specified for power management. + :raises: other exceptions by the node's power driver if something + wrong occurred during the power action. + :raises: exception.ImageRefValidationFailed if image_source is not + Glance href and is not HTTP(S) URL. + :raises: any boot interface's prepare_ramdisk exceptions. """ # Nodes deployed by AgentDeploy always boot from disk now. So there # is nothing to be done in prepare() when it's called during # take over. node = task.node + if node.provision_state == states.DEPLOYING: + # Adding the node to provisioning network so that the dhcp + # options get added for the provisioning port. + manager_utils.node_power_action(task, states.POWER_OFF) + task.driver.network.add_provisioning_network(task) if node.provision_state != states.ACTIVE: node.instance_info = build_instance_info_for_deploy(task) node.save() diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index caf66720e..a5614d609 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -768,6 +768,7 @@ class BaseAgentVendor(base.VendorInterface): {'node_uuid': node.uuid, 'timeout': (wait * (attempts - 1)) / 1000, 'error': e}) + manager_utils.node_power_action(task, states.POWER_OFF) else: # Flush the file system prior to hard rebooting the node result = self._client.sync(node) @@ -781,8 +782,12 @@ class BaseAgentVendor(base.VendorInterface): 'Failed to flush the file system prior to hard ' 'rebooting the node %(node)s. Error: %(error)s'), {'node': node.uuid, 'error': error}) + manager_utils.node_power_action(task, states.POWER_OFF) - manager_utils.node_power_action(task, states.REBOOT) + task.driver.network.remove_provisioning_network(task) + task.driver.network.configure_tenant_networks(task) + + manager_utils.node_power_action(task, states.POWER_ON) except Exception as e: msg = (_('Error rebooting node %(node)s after deploy. ' 'Error: %(error)s') % diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 2bd7deca2..957368eb3 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -456,10 +456,17 @@ class ISCSIDeploy(base.DeployInterface): :param task: a TaskManager instance containing the node to act on. :returns: deploy state DELETED. + :raises: NetworkError if the cleaning ports cannot be removed. + :raises: InvalidParameterValue when the wrong state is specified + or the wrong driver info is specified. + :raises: other exceptions by the node's power driver if something + wrong occurred during the power action. """ manager_utils.node_power_action(task, states.POWER_OFF) + task.driver.network.unconfigure_tenant_networks(task) return states.DELETED + @task_manager.require_exclusive_lock def prepare(self, task): """Prepare the deployment environment for this task's node. @@ -468,11 +475,24 @@ class ISCSIDeploy(base.DeployInterface): local cache. :param task: a TaskManager instance containing the node to act on. + :raises: NetworkError: if the previous cleaning ports cannot be removed + or if new cleaning ports cannot be created. + :raises: InvalidParameterValue when the wrong power state is specified + or the wrong driver info is specified for power management. + :raises: other exceptions by the node's power driver if something + wrong occurred during the power action. + :raises: any boot interface's prepare_ramdisk exceptions. """ node = task.node if node.provision_state == states.ACTIVE: task.driver.boot.prepare_instance(task) else: + if node.provision_state == states.DEPLOYING: + # Adding the node to provisioning network so that the dhcp + # options get added for the provisioning port. + manager_utils.node_power_action(task, states.POWER_OFF) + task.driver.network.add_provisioning_network(task) + deploy_opts = deploy_utils.build_agent_options(node) task.driver.boot.prepare_ramdisk(task, deploy_opts) diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py index 514d98f7b..e2b647b89 100644 --- a/ironic/tests/unit/drivers/modules/test_agent.py +++ b/ironic/tests/unit/drivers/modules/test_agent.py @@ -439,19 +439,25 @@ class TestAgentDeploy(db_base.DbTestCase): self.assertEqual(driver_return, states.DEPLOYWAIT) power_mock.assert_called_once_with(task, states.REBOOT) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'unconfigure_tenant_networks', autospec=True) @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) - def test_tear_down(self, power_mock): + def test_tear_down(self, power_mock, unconfigure_tenant_nets_mock): with task_manager.acquire( self.context, self.node['uuid'], shared=False) as task: driver_return = self.driver.tear_down(task) power_mock.assert_called_once_with(task, states.POWER_OFF) self.assertEqual(driver_return, states.DELETED) + unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY, + task) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') @mock.patch.object(deploy_utils, 'build_agent_options') @mock.patch.object(agent, 'build_instance_info_for_deploy') - def test_prepare(self, build_instance_info_mock, build_options_mock, - pxe_prepare_ramdisk_mock): + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', autospec=True) + def test_prepare(self, add_provisioning_net_mock, build_instance_info_mock, + build_options_mock, pxe_prepare_ramdisk_mock): with task_manager.acquire( self.context, self.node['uuid'], shared=False) as task: task.node.provision_state = states.DEPLOYING @@ -464,6 +470,7 @@ class TestAgentDeploy(db_base.DbTestCase): build_options_mock.assert_called_once_with(task.node) pxe_prepare_ramdisk_mock.assert_called_once_with( task, {'a': 'b'}) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) self.node.refresh() self.assertEqual('bar', self.node.instance_info['foo']) @@ -489,12 +496,14 @@ class TestAgentDeploy(db_base.DbTestCase): self.node.refresh() self.assertEqual('bar', self.node.instance_info['foo']) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') @mock.patch.object(deploy_utils, 'build_agent_options') @mock.patch.object(agent, 'build_instance_info_for_deploy') def test_prepare_active( self, build_instance_info_mock, build_options_mock, - pxe_prepare_ramdisk_mock): + pxe_prepare_ramdisk_mock, add_provisioning_net_mock): with task_manager.acquire( self.context, self.node['uuid'], shared=False) as task: task.node.provision_state = states.ACTIVE @@ -504,6 +513,7 @@ class TestAgentDeploy(db_base.DbTestCase): self.assertFalse(build_instance_info_mock.called) self.assertFalse(build_options_mock.called) self.assertFalse(pxe_prepare_ramdisk_mock.called) + self.assertFalse(add_provisioning_net_mock.called) @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') @@ -766,7 +776,7 @@ class TestAgentVendor(db_base.DbTestCase): power_off_mock.assert_called_once_with(task.node) get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + task, states.POWER_ON) self.assertFalse(prepare_mock.called) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -811,7 +821,7 @@ class TestAgentVendor(db_base.DbTestCase): power_off_mock.assert_called_once_with(task.node) get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + task, states.POWER_ON) prepare_mock.assert_called_once_with(task.driver.boot, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -860,7 +870,7 @@ class TestAgentVendor(db_base.DbTestCase): get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + task, states.POWER_ON) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(uuid_mock.called) @@ -945,7 +955,7 @@ class TestAgentVendor(db_base.DbTestCase): power_off_mock.assert_called_once_with(task.node) get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + task, states.POWER_ON) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 9176aca74..08f91d447 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -585,7 +585,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): power_off_mock.assert_called_once_with(task.node) self.assertEqual(2, get_power_state_mock.call_count) node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + task, states.POWER_ON) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -595,8 +595,13 @@ class TestBaseAgentVendor(db_base.DbTestCase): spec=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'power_off', spec=types.FunctionType) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'remove_provisioning_network', spec_set=True, autospec=True) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'configure_tenant_networks', spec_set=True, autospec=True) def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete( - self, power_off_mock, get_power_state_mock, + self, configure_tenant_net_mock, remove_provisioning_net_mock, + power_off_mock, get_power_state_mock, node_power_action_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE @@ -607,16 +612,25 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.passthru.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) - node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + node_power_action_mock.assert_has_calls([ + mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON)]) + remove_provisioning_net_mock.assert_called_once_with(mock.ANY, + task) + configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(agent_client.AgentClient, 'power_off', spec=types.FunctionType) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'remove_provisioning_network', spec_set=True, autospec=True) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'configure_tenant_networks', spec_set=True, autospec=True) def test_reboot_and_finish_deploy_soft_poweroff_fails( - self, power_off_mock, node_power_action_mock): + self, configure_tenant_net_mock, remove_provisioning_net_mock, + power_off_mock, node_power_action_mock): power_off_mock.side_effect = RuntimeError("boom") self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE @@ -625,8 +639,12 @@ class TestBaseAgentVendor(db_base.DbTestCase): shared=True) as task: self.passthru.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) - node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + node_power_action_mock.assert_has_calls([ + mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON)]) + remove_provisioning_net_mock.assert_called_once_with(mock.ANY, + task) + configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -636,9 +654,13 @@ class TestBaseAgentVendor(db_base.DbTestCase): spec=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'power_off', spec=types.FunctionType) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'remove_provisioning_network', spec_set=True, autospec=True) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'configure_tenant_networks', spec_set=True, autospec=True) def test_reboot_and_finish_deploy_get_power_state_fails( - self, power_off_mock, get_power_state_mock, - node_power_action_mock): + self, configure_tenant_net_mock, remove_provisioning_net_mock, + power_off_mock, get_power_state_mock, node_power_action_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -648,8 +670,12 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.passthru.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) - node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + node_power_action_mock.assert_has_calls([ + mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON)]) + remove_provisioning_net_mock.assert_called_once_with(mock.ANY, + task) + configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -675,7 +701,6 @@ class TestBaseAgentVendor(db_base.DbTestCase): power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) node_power_action_mock.assert_has_calls([ - mock.call(task, states.REBOOT), mock.call(task, states.POWER_OFF)]) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) @@ -698,8 +723,10 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.passthru.reboot_and_finish_deploy(task) sync_mock.assert_called_once_with(task.node) - node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + node_power_action_mock.assert_has_calls([ + mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON), + ]) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) @@ -723,8 +750,10 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.passthru.reboot_and_finish_deploy(task) sync_mock.assert_called_once_with(task.node) - node_power_action_mock.assert_called_once_with( - task, states.REBOOT) + node_power_action_mock.assert_has_calls([ + mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON), + ]) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) log_error = ('The version of the IPA ramdisk used in the ' diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index b8d3ca06f..a4f6d9991 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -527,31 +527,37 @@ class ISCSIDeployTestCase(db_base.DbTestCase): validate_capabilities_mock.assert_called_once_with(task.node) validate_mock.assert_called_once_with(task) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', spec_set=True, autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) - def test_prepare_node_active(self, prepare_instance_mock): - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: + def test_prepare_node_active(self, prepare_instance_mock, + add_provisioning_net_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: task.node.provision_state = states.ACTIVE task.driver.deploy.prepare(task) prepare_instance_mock.assert_called_once_with( task.driver.boot, task) + self.assertEqual(0, add_provisioning_net_mock.call_count) @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) - def test_prepare_node_deploying(self, mock_prepare_ramdisk, + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', spec_set=True, autospec=True) + def test_prepare_node_deploying(self, add_provisioning_net_mock, + mock_prepare_ramdisk, mock_agent_options): mock_agent_options.return_value = {'c': 'd'} - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - task.node.provision_state = states.DEPLOYWAIT + with task_manager.acquire(self.context, self.node.uuid) as task: + task.node.provision_state = states.DEPLOYING task.driver.deploy.prepare(task) mock_agent_options.assert_called_once_with(task.node) mock_prepare_ramdisk.assert_called_once_with( task.driver.boot, task, {'c': 'd'}) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True) @@ -567,14 +573,19 @@ class ISCSIDeployTestCase(db_base.DbTestCase): mock_check_image_size.assert_called_once_with(task) mock_node_power_action.assert_called_once_with(task, states.REBOOT) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'unconfigure_tenant_networks', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) - def test_tear_down(self, node_power_action_mock): + def test_tear_down(self, node_power_action_mock, + unconfigure_tenant_nets_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: state = task.driver.deploy.tear_down(task) self.assertEqual(state, states.DELETED) node_power_action_mock.assert_called_once_with(task, states.POWER_OFF) + unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY, + task) @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') From 0a5549680d491894faad5133db38e1bc2b4c93d6 Mon Sep 17 00:00:00 2001 From: Laura Moore Date: Mon, 27 Jul 2015 17:29:57 -0400 Subject: [PATCH 067/166] Add multitenancy-related fields to port API object This commit adds new fields to port object: - port.pxe_enabled: indicates whether pxe is enabled or disabled for this port - port.local_link_connection: contains the port binding profile. Partial-bug: #1526403 Co-Authored-By: Jenny Moorehead Co-Authored-By: Will Stevenson Co-Authored-By: Vasyl Saienko Co-Authored-By: Vladyslav Drok Co-Authored-By: Zhenguo Niu Change-Id: Ie655fd59b06de7b84fba3b438d5e4c2ecd8075c3 --- doc/source/webapi/v1.rst | 5 + ironic/api/controllers/v1/port.py | 44 ++++- ironic/api/controllers/v1/types.py | 72 +++++++ ironic/api/controllers/v1/utils.py | 23 +++ ironic/api/controllers/v1/versions.py | 4 +- ironic/common/exception.py | 10 + ironic/common/utils.py | 33 ++++ ironic/tests/unit/api/utils.py | 3 - ironic/tests/unit/api/v1/test_ports.py | 177 ++++++++++++++++-- ironic/tests/unit/api/v1/test_types.py | 52 +++++ ironic/tests/unit/api/v1/test_utils.py | 32 ++++ ironic/tests/unit/common/test_utils.py | 17 ++ ...-advanced-net-fields-55465091f019d962.yaml | 8 + 13 files changed, 458 insertions(+), 22 deletions(-) create mode 100644 releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index df79bb1da..d6465e7af 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -32,6 +32,11 @@ always requests the newest supported API version. API Versions History -------------------- +**1.19** + + This API version adds the multitenancy-related ``local_link_connection`` + and ``pxe_enabled`` fields to a port. + **1.18** Add ``internal_info`` readonly field to the port object, that will be used diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index b5f332c2c..852078b47 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -40,6 +40,11 @@ def hide_fields_in_newer_versions(obj): # if requested version is < 1.18, hide internal_info field if not api_utils.allow_port_internal_info(): obj.internal_info = wsme.Unset + # if requested version is < 1.19, hide local_link_connection and + # pxe_enabled fields + if not api_utils.allow_port_advanced_net_fields(): + obj.pxe_enabled = wsme.Unset + obj.local_link_connection = wsme.Unset class Port(base.APIBase): @@ -90,6 +95,12 @@ class Port(base.APIBase): mandatory=True) """The UUID of the node this port belongs to""" + pxe_enabled = types.boolean + """Indicates whether pxe is enabled or disabled on the node.""" + + local_link_connection = types.locallinkconnectiontype + """The port binding profile for each port""" + links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated port links""" @@ -151,7 +162,11 @@ class Port(base.APIBase): extra={'foo': 'bar'}, internal_info={}, created_at=datetime.datetime.utcnow(), - updated_at=datetime.datetime.utcnow()) + updated_at=datetime.datetime.utcnow(), + pxe_enabled=True, + local_link_connection={ + 'switch_info': 'host', 'port_id': 'Gig0/1', + 'switch_id': 'aa:bb:cc:dd:ee:ff'}) # NOTE(lucasagomes): node_uuid getter() method look at the # _node_uuid variable sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' @@ -204,7 +219,9 @@ class PortsController(rest.RestController): 'detail': ['GET'], } - invalid_sort_key_list = ['extra', 'internal_info'] + invalid_sort_key_list = ['extra', 'internal_info', 'local_link_connection'] + + advanced_net_fields = ['pxe_enabled', 'local_link_connection'] def _get_ports_collection(self, node_ident, address, marker, limit, sort_key, sort_dir, resource_url=None, @@ -285,8 +302,13 @@ class PortsController(rest.RestController): :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. + :raises: NotAcceptable """ api_utils.check_allow_specify_fields(fields) + if (fields and not api_utils.allow_port_advanced_net_fields() and + set(fields).intersection(self.advanced_net_fields)): + raise exception.NotAcceptable() + if fields is None: fields = _DEFAULT_RETURN_FIELDS @@ -322,6 +344,7 @@ class PortsController(rest.RestController): :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :raises: NotAcceptable, HTTPNotFound """ if not node_uuid and node: # We're invoking this interface using positional notation, or @@ -348,6 +371,7 @@ class PortsController(rest.RestController): :param port_uuid: UUID of a port. :param fields: Optional, a list with a specified set of fields of the resource to be returned. + :raises: NotAcceptable """ if self.from_nodes: raise exception.OperationNotPermitted() @@ -362,12 +386,19 @@ class PortsController(rest.RestController): """Create a new port. :param port: a port within the request body. + :raises: NotAcceptable """ if self.from_nodes: raise exception.OperationNotPermitted() + pdict = port.as_dict() + if not api_utils.allow_port_advanced_net_fields(): + if set(pdict).intersection(self.advanced_net_fields): + raise exception.NotAcceptable() + new_port = objects.Port(pecan.request.context, - **port.as_dict()) + **pdict) + new_port.create() # Set the HTTP Location Header pecan.response.location = link.build_url('ports', new_port.uuid) @@ -380,9 +411,16 @@ class PortsController(rest.RestController): :param port_uuid: UUID of a port. :param patch: a json PATCH document to apply to this port. + :raises: NotAcceptable """ if self.from_nodes: raise exception.OperationNotPermitted() + if not api_utils.allow_port_advanced_net_fields(): + for field in self.advanced_net_fields: + field_path = '/%s' % field + if (api_utils.get_patch_values(patch, field_path) or + api_utils.is_path_removed(patch, field_path)): + raise exception.NotAcceptable() rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid) try: diff --git a/ironic/api/controllers/v1/types.py b/ironic/api/controllers/v1/types.py index b5fdd7f21..7976af753 100644 --- a/ironic/api/controllers/v1/types.py +++ b/ironic/api/controllers/v1/types.py @@ -255,3 +255,75 @@ class JsonPatchType(wtypes.Base): if patch.value is not wsme.Unset: ret['value'] = patch.value return ret + + +class LocalLinkConnectionType(wtypes.UserType): + """A type describing local link connection.""" + + basetype = wtypes.DictType + name = 'locallinkconnection' + + mandatory_fields = {'switch_id', + 'port_id'} + valid_fields = mandatory_fields.union({'switch_info'}) + + @staticmethod + def validate(value): + """Validate and convert the input to a LocalLinkConnectionType. + + :param value: A dictionary of values to validate, switch_id is a MAC + address or an OpenFlow based datapath_id, switch_info is an optional + field. + + For example:: + { + 'switch_id': mac_or_datapath_id(), + 'port_id': 'Ethernet3/1', + 'switch_info': 'switch1' + } + + :returns: A dictionary. + :raises: Invalid if some of the keys in the dictionary being validated + are unknown, invalid, or some required ones are missing. + + """ + wtypes.DictType(wtypes.text, wtypes.text).validate(value) + + keys = set(value) + + # This is to workaround an issue when an API object is initialized from + # RPC object, in which dictionary fields that are set to None become + # empty dictionaries + if not keys: + return value + + invalid = keys - LocalLinkConnectionType.valid_fields + if invalid: + raise exception.Invalid(_('%s are invalid keys') % (invalid)) + + # Check all mandatory fields are present + missing = LocalLinkConnectionType.mandatory_fields - keys + if missing: + msg = _('Missing mandatory keys: %s') % missing + raise exception.Invalid(msg) + + # Check switch_id is either a valid mac address or + # OpenFlow datapath_id and normalize it. + if utils.is_valid_mac(value['switch_id']): + value['switch_id'] = utils.validate_and_normalize_mac( + value['switch_id']) + elif utils.is_valid_datapath_id(value['switch_id']): + value['switch_id'] = utils.validate_and_normalize_datapath_id( + value['switch_id']) + else: + raise exception.InvalidSwitchID(switch_id=value['switch_id']) + + return value + + @staticmethod + def frombasetype(value): + if value is None: + return None + return LocalLinkConnectionType.validate(value) + +locallinkconnectiontype = LocalLinkConnectionType() diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py index d85baa7fb..70098f488 100644 --- a/ironic/api/controllers/v1/utils.py +++ b/ironic/api/controllers/v1/utils.py @@ -99,6 +99,20 @@ def get_patch_values(patch, path): if p['path'] == path and p['op'] != 'remove'] +def is_path_removed(patch, path): + """Returns whether the patch includes removal of the path (or subpath of). + + :param patch: HTTP PATCH request body. + :param path: the path to check. + :returns: True if path or subpath being removed, False otherwise. + """ + path = path.rstrip('/') + for p in patch: + if ((p['path'] == path or p['path'].startswith(path + '/')) and + p['op'] == 'remove'): + return True + + def allow_node_logical_names(): # v1.5 added logical name aliases return pecan.request.version.minor >= versions.MINOR_5_NODE_NAME @@ -299,6 +313,15 @@ def allow_port_internal_info(): versions.MINOR_18_PORT_INTERNAL_INFO) +def allow_port_advanced_net_fields(): + """Check if we should return local_link_connection and pxe_enabled fields. + + Version 1.19 of the API added support for these new fields in port object. + """ + return (pecan.request.version.minor >= + versions.MINOR_19_PORT_ADVANCED_NET_FIELDS) + + def get_controller_reserved_names(cls): """Get reserved names for a given controller. diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py index 8f86def24..9bbfd32c7 100644 --- a/ironic/api/controllers/v1/versions.py +++ b/ironic/api/controllers/v1/versions.py @@ -48,6 +48,7 @@ BASE_VERSION = 1 # v1.16: Add ability to filter nodes by driver. # v1.17: Add 'adopt' verb for ADOPTING active nodes. # v1.18: Add port.internal_info. +# v1.19: Add port.local_link_connection and port.pxe_enabled. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -68,11 +69,12 @@ MINOR_15_MANUAL_CLEAN = 15 MINOR_16_DRIVER_FILTER = 16 MINOR_17_ADOPT_VERB = 17 MINOR_18_PORT_INTERNAL_INFO = 18 +MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 # When adding another version, update MINOR_MAX_VERSION and also update # doc/source/webapi/v1.rst with a detailed explanation of what the version has # changed. -MINOR_MAX_VERSION = MINOR_18_PORT_INTERNAL_INFO +MINOR_MAX_VERSION = MINOR_19_PORT_ADVANCED_NET_FIELDS # String representations of the minor and maximum versions MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff --git a/ironic/common/exception.py b/ironic/common/exception.py index 5622203b2..ca761cd1d 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -192,6 +192,16 @@ class InvalidMAC(Invalid): _msg_fmt = _("Expected a MAC address but received %(mac)s.") +class InvalidSwitchID(Invalid): + _msg_fmt = _("Expected a MAC address or OpenFlow datapath ID but " + "received %(switch_id)s.") + + +class InvalidDatapathId(Invalid): + _msg_fmt = _("Expected an OpenFlow datapath ID but received " + "%(datapath_id)s.") + + class InvalidStateRequested(Invalid): _msg_fmt = _('The requested action "%(action)s" can not be performed ' 'on node "%(node)s" while it is in state "%(state)s".') diff --git a/ironic/common/utils.py b/ironic/common/utils.py index 65acb5dd4..1f76a9a17 100644 --- a/ironic/common/utils.py +++ b/ironic/common/utils.py @@ -185,6 +185,22 @@ def is_valid_mac(address): re.match(m, address.lower())) +def is_valid_datapath_id(datapath_id): + """Verify the format of an OpenFlow datapath_id. + + Check if a datapath_id is valid and contains 16 hexadecimal digits. + Datapath ID format: the lower 48-bits are for a MAC address, + while the upper 16-bits are implementer-defined. + + :param datapath_id: OpenFlow datapath_id to be validated. + :returns: True if valid. False if not. + + """ + m = "^[0-9a-f]{16}$" + return (isinstance(datapath_id, six.string_types) and + re.match(m, datapath_id.lower())) + + _is_valid_logical_name_re = re.compile(r'^[A-Z0-9-._~]+$', re.I) # old is_hostname_safe() regex, retained for backwards compat @@ -284,6 +300,23 @@ def validate_and_normalize_mac(address): return address.lower() +def validate_and_normalize_datapath_id(datapath_id): + """Validate an OpenFlow datapath_id and return normalized form. + + Checks whether the supplied OpenFlow datapath_id is formally correct and + normalize it to all lower case. + + :param datapath_id: OpenFlow datapath_id to be validated and normalized. + :returns: Normalized and validated OpenFlow datapath_id. + :raises: InvalidDatapathId If an OpenFlow datapath_id is not valid. + + """ + + if not is_valid_datapath_id(datapath_id): + raise exception.InvalidDatapathId(datapath_id=datapath_id) + return datapath_id.lower() + + def is_valid_ipv6_cidr(address): try: str(netaddr.IPNetwork(address, version=6).cidr) diff --git a/ironic/tests/unit/api/utils.py b/ironic/tests/unit/api/utils.py index 024fd07e7..2b1035171 100644 --- a/ironic/tests/unit/api/utils.py +++ b/ironic/tests/unit/api/utils.py @@ -105,9 +105,6 @@ def port_post_data(**kw): port = utils.get_test_port(**kw) # node_id is not part of the API object port.pop('node_id') - # TODO(vsaienko): remove when API part is added - port.pop('local_link_connection') - port.pop('pxe_enabled') # portgroup_id is not part of the API object port.pop('portgroup_id') internal = port_controller.PortPatchType.internal_attrs() diff --git a/ironic/tests/unit/api/v1/test_ports.py b/ironic/tests/unit/api/v1/test_ports.py index e3ab27a77..87b3f1ac1 100644 --- a/ironic/tests/unit/api/v1/test_ports.py +++ b/ironic/tests/unit/api/v1/test_ports.py @@ -31,6 +31,7 @@ from ironic.api.controllers import base as api_base from ironic.api.controllers import v1 as api_v1 from ironic.api.controllers.v1 import port as api_port from ironic.api.controllers.v1 import utils as api_utils +from ironic.api.controllers.v1 import versions from ironic.common import exception from ironic.conductor import rpcapi from ironic.tests import base @@ -63,6 +64,7 @@ class TestListPorts(test_api_base.BaseApiTest): def setUp(self): super(TestListPorts, self).setUp() self.node = obj_utils.create_test_node(self.context) + self.headers = {api_base.Version.string: str(api_v1.MAX_VER)} def test_empty(self): data = self.get_json('/ports') @@ -145,7 +147,7 @@ class TestListPorts(test_api_base.BaseApiTest): self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_detail(self): - port = obj_utils.create_test_port(self.context, node_id=self.node.id,) + port = obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json( '/ports/detail', headers={api_base.Version.string: str(api_v1.MAX_VER)} @@ -154,6 +156,8 @@ class TestListPorts(test_api_base.BaseApiTest): self.assertIn('extra', data['ports'][0]) self.assertIn('internal_info', data['ports'][0]) self.assertIn('node_uuid', data['ports'][0]) + self.assertIn('pxe_enabled', data['ports'][0]) + self.assertIn('local_link_connection', data['ports'][0]) # never expose the node_id self.assertNotIn('node_id', data['ports'][0]) @@ -373,6 +377,8 @@ class TestPatch(test_api_base.BaseApiTest): self.mock_gtf = p.start() self.mock_gtf.return_value = 'test-topic' self.addCleanup(p.stop) + self.headers = {api_base.Version.string: str( + versions.MAX_VERSION_STRING)} def test_update_byid(self, mock_upd): extra = {'foo': 'bar'} @@ -456,6 +462,44 @@ class TestPatch(test_api_base.BaseApiTest): self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) + def test_replace_local_link_connection(self, mock_upd): + switch_id = 'aa:bb:cc:dd:ee:ff' + mock_upd.return_value = self.port + mock_upd.return_value.local_link_connection['switch_id'] = switch_id + response = self.patch_json('/ports/%s' % self.port.uuid, + [{'path': + '/local_link_connection/switch_id', + 'value': switch_id, + 'op': 'replace'}], + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(switch_id, + response.json['local_link_connection']['switch_id']) + self.assertTrue(mock_upd.called) + + kargs = mock_upd.call_args[0][1] + self.assertEqual(switch_id, kargs.local_link_connection['switch_id']) + + def test_remove_local_link_connection_old_api(self, mock_upd): + response = self.patch_json( + '/ports/%s' % self.port.uuid, + [{'path': '/local_link_connection/switch_id', 'op': 'remove'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + + def test_set_pxe_enabled_false_old_api(self, mock_upd): + response = self.patch_json('/ports/%s' % self.port.uuid, + [{'path': '/pxe_enabled', + 'value': False, + 'op': 'add'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + def test_add_node_uuid(self, mock_upd): mock_upd.return_value = self.port response = self.patch_json('/ports/%s' % self.port.uuid, @@ -661,21 +705,50 @@ class TestPatch(test_api_base.BaseApiTest): kargs = mock_upd.call_args[0][1] self.assertEqual(address.lower(), kargs.address) + def test_update_pxe_enabled_allowed(self, mock_upd): + pxe_enabled = True + mock_upd.return_value = self.port + mock_upd.return_value.pxe_enabled = pxe_enabled + response = self.patch_json('/ports/%s' % self.port.uuid, + [{'path': '/pxe_enabled', + 'value': pxe_enabled, + 'op': 'replace'}], + headers=self.headers) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(pxe_enabled, response.json['pxe_enabled']) + + def test_update_pxe_enabled_old_api_version(self, mock_upd): + pxe_enabled = True + mock_upd.return_value = self.port + headers = {api_base.Version.string: '1.14'} + response = self.patch_json('/ports/%s' % self.port.uuid, + [{'path': '/pxe_enabled', + 'value': pxe_enabled, + 'op': 'replace'}], + expect_errors=True, + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + self.assertFalse(mock_upd.called) + class TestPost(test_api_base.BaseApiTest): def setUp(self): super(TestPost, self).setUp() self.node = obj_utils.create_test_node(self.context) + self.headers = {api_base.Version.string: str( + versions.MAX_VERSION_STRING)} @mock.patch.object(timeutils, 'utcnow') def test_create_port(self, mock_utcnow): pdict = post_get_test_port() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time - response = self.post_json('/ports', pdict) + response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual(http_client.CREATED, response.status_int) - result = self.get_json('/ports/%s' % pdict['uuid']) + result = self.get_json('/ports/%s' % pdict['uuid'], + headers=self.headers) self.assertEqual(pdict['uuid'], result['uuid']) self.assertFalse(result['updated_at']) return_created_at = timeutils.parse_isotime( @@ -691,8 +764,9 @@ class TestPost(test_api_base.BaseApiTest): with mock.patch.object(self.dbapi, 'create_port', wraps=self.dbapi.create_port) as cp_mock: pdict = post_get_test_port(extra={'foo': 123}) - self.post_json('/ports', pdict) - result = self.get_json('/ports/%s' % pdict['uuid']) + self.post_json('/ports', pdict, headers=self.headers) + result = self.get_json('/ports/%s' % pdict['uuid'], + headers=self.headers) self.assertEqual(pdict['extra'], result['extra']) cp_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args @@ -701,8 +775,9 @@ class TestPost(test_api_base.BaseApiTest): def test_create_port_generate_uuid(self): pdict = post_get_test_port() del pdict['uuid'] - response = self.post_json('/ports', pdict) - result = self.get_json('/ports/%s' % response.json['uuid']) + response = self.post_json('/ports', pdict, headers=self.headers) + result = self.get_json('/ports/%s' % response.json['uuid'], + headers=self.headers) self.assertEqual(pdict['address'], result['address']) self.assertTrue(uuidutils.is_uuid_like(result['uuid'])) @@ -711,14 +786,16 @@ class TestPost(test_api_base.BaseApiTest): 'float': 0.1, 'bool': True, 'list': [1, 2], 'none': None, 'dict': {'cat': 'meow'}}) - self.post_json('/ports', pdict) - result = self.get_json('/ports/%s' % pdict['uuid']) + self.post_json('/ports', pdict, headers=self.headers) + result = self.get_json('/ports/%s' % pdict['uuid'], + headers=self.headers) self.assertEqual(pdict['extra'], result['extra']) def test_create_port_no_mandatory_field_address(self): pdict = post_get_test_port() del pdict['address'] - response = self.post_json('/ports', pdict, expect_errors=True) + response = self.post_json('/ports', pdict, expect_errors=True, + headers=self.headers) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @@ -741,8 +818,9 @@ class TestPost(test_api_base.BaseApiTest): def test_create_port_address_normalized(self): address = 'AA:BB:CC:DD:EE:FF' pdict = post_get_test_port(address=address) - self.post_json('/ports', pdict) - result = self.get_json('/ports/%s' % pdict['uuid']) + self.post_json('/ports', pdict, headers=self.headers) + result = self.get_json('/ports/%s' % pdict['uuid'], + headers=self.headers) self.assertEqual(address.lower(), result['address']) def test_create_port_with_hyphens_delimiter(self): @@ -764,7 +842,7 @@ class TestPost(test_api_base.BaseApiTest): def test_node_uuid_to_node_id_mapping(self): pdict = post_get_test_port(node_uuid=self.node['uuid']) - self.post_json('/ports', pdict) + self.post_json('/ports', pdict, headers=self.headers) # GET doesn't return the node_id it's an internal value port = self.dbapi.get_port_by_uuid(pdict['uuid']) self.assertEqual(self.node['id'], port.node_id) @@ -780,9 +858,10 @@ class TestPost(test_api_base.BaseApiTest): def test_create_port_address_already_exist(self): address = 'AA:AA:AA:11:22:33' pdict = post_get_test_port(address=address) - self.post_json('/ports', pdict) + self.post_json('/ports', pdict, headers=self.headers) pdict['uuid'] = uuidutils.generate_uuid() - response = self.post_json('/ports', pdict, expect_errors=True) + response = self.post_json('/ports', pdict, expect_errors=True, + headers=self.headers) self.assertEqual(http_client.CONFLICT, response.status_int) self.assertEqual('application/json', response.content_type) error_msg = response.json['error_message'] @@ -797,6 +876,74 @@ class TestPost(test_api_base.BaseApiTest): self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) + def test_create_port_some_invalid_local_link_connection_key(self): + pdict = post_get_test_port( + local_link_connection={'switch_id': 'value1', + 'port_id': 'Ethernet1/15', + 'switch_foo': 'value3'}) + response = self.post_json('/ports', pdict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_create_port_local_link_connection_keys(self): + pdict = post_get_test_port( + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'Ethernet1/15', + 'switch_info': 'value3'}) + response = self.post_json('/ports', pdict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + + def test_create_port_local_link_connection_switch_id_bad_mac(self): + pdict = post_get_test_port( + local_link_connection={'switch_id': 'zz:zz:zz:zz:zz:zz', + 'port_id': 'Ethernet1/15', + 'switch_info': 'value3'}) + response = self.post_json('/ports', pdict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_create_port_local_link_connection_missing_mandatory(self): + pdict = post_get_test_port( + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', + 'switch_info': 'fooswitch'}) + response = self.post_json('/ports', pdict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + + def test_create_port_local_link_connection_missing_optional(self): + pdict = post_get_test_port( + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'Ethernet1/15'}) + response = self.post_json('/ports', pdict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + + def test_create_port_with_llc_old_api_version(self): + headers = {api_base.Version.string: '1.14'} + pdict = post_get_test_port( + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'Ethernet1/15'}) + response = self.post_json('/ports', pdict, headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + + def test_create_port_with_pxe_enabled_old_api_version(self): + headers = {api_base.Version.string: '1.14'} + pdict = post_get_test_port( + pxe_enabled=False) + del pdict['local_link_connection'] + response = self.post_json('/ports', pdict, headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + @mock.patch.object(rpcapi.ConductorAPI, 'destroy_port') class TestDelete(test_api_base.BaseApiTest): diff --git a/ironic/tests/unit/api/v1/test_types.py b/ironic/tests/unit/api/v1/test_types.py index c05562f0d..ba893517f 100644 --- a/ironic/tests/unit/api/v1/test_types.py +++ b/ironic/tests/unit/api/v1/test_types.py @@ -287,3 +287,55 @@ class TestListType(base.TestCase): self.assertItemsEqual(['foo', 'bar'], v.validate("foo,foo,foo,bar")) self.assertIsInstance(v.validate('foo,bar'), list) + + +class TestLocalLinkConnectionType(base.TestCase): + + def test_local_link_connection_type(self): + v = types.locallinkconnectiontype + value = {'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'value2', + 'switch_info': 'value3'} + self.assertItemsEqual(value, v.validate(value)) + + def test_local_link_connection_type_datapath_id(self): + v = types.locallinkconnectiontype + value = {'switch_id': '0000000000000000', + 'port_id': 'value2', + 'switch_info': 'value3'} + self.assertItemsEqual(value, + v.validate(value)) + + def test_local_link_connection_type_not_mac_or_datapath_id(self): + v = types.locallinkconnectiontype + value = {'switch_id': 'badid', + 'port_id': 'value2', + 'switch_info': 'value3'} + self.assertRaises(exception.InvalidSwitchID, v.validate, value) + + def test_local_link_connection_type_invalid_key(self): + v = types.locallinkconnectiontype + value = {'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'value2', + 'switch_info': 'value3', + 'invalid_key': 'value'} + self.assertRaisesRegex(exception.Invalid, 'are invalid keys', + v.validate, value) + + def test_local_link_connection_type_missing_mandatory_key(self): + v = types.locallinkconnectiontype + value = {'switch_id': '0a:1b:2c:3d:4e:5f', + 'switch_info': 'value3'} + self.assertRaisesRegex(exception.Invalid, 'Missing mandatory', + v.validate, value) + + def test_local_link_connection_type_withou_optional_key(self): + v = types.locallinkconnectiontype + value = {'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'value2'} + self.assertItemsEqual(value, v.validate(value)) + + def test_local_link_connection_type_empty_value(self): + v = types.locallinkconnectiontype + value = {} + self.assertItemsEqual(value, v.validate(value)) diff --git a/ironic/tests/unit/api/v1/test_utils.py b/ironic/tests/unit/api/v1/test_utils.py index 1a897717b..5d2058d55 100644 --- a/ironic/tests/unit/api/v1/test_utils.py +++ b/ironic/tests/unit/api/v1/test_utils.py @@ -82,6 +82,31 @@ class TestApiUtils(base.TestCase): values = utils.get_patch_values(patch, path) self.assertEqual(['node-x', 'node-y'], values) + def test_is_path_removed_success(self): + patch = [{'path': '/name', 'op': 'remove'}] + path = '/name' + value = utils.is_path_removed(patch, path) + self.assertTrue(value) + + def test_is_path_removed_subpath_success(self): + patch = [{'path': '/local_link_connection/switch_id', 'op': 'remove'}] + path = '/local_link_connection' + value = utils.is_path_removed(patch, path) + self.assertTrue(value) + + def test_is_path_removed_similar_subpath(self): + patch = [{'path': '/local_link_connection_info/switch_id', + 'op': 'remove'}] + path = '/local_link_connection' + value = utils.is_path_removed(patch, path) + self.assertFalse(value) + + def test_is_path_removed_replace(self): + patch = [{'path': '/name', 'op': 'replace', 'value': 'node-x'}] + path = '/name' + value = utils.is_path_removed(patch, path) + self.assertFalse(value) + def test_check_for_invalid_fields(self): requested = ['field_1', 'field_3'] supported = ['field_1', 'field_2', 'field_3'] @@ -200,6 +225,13 @@ class TestApiUtils(base.TestCase): mock_request.version.minor = 17 self.assertFalse(utils.allow_port_internal_info()) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_multitenancy_fields(self, mock_request): + mock_request.version.minor = 19 + self.assertTrue(utils.allow_port_advanced_net_fields()) + mock_request.version.minor = 18 + self.assertFalse(utils.allow_port_advanced_net_fields()) + class TestNodeIdent(base.TestCase): diff --git a/ironic/tests/unit/common/test_utils.py b/ironic/tests/unit/common/test_utils.py index 4b372a08e..ada2cf648 100644 --- a/ironic/tests/unit/common/test_utils.py +++ b/ironic/tests/unit/common/test_utils.py @@ -385,6 +385,14 @@ class GenericUtilsTestCase(base.TestCase): self.assertFalse(utils.is_valid_mac("AA BB CC DD EE FF")) self.assertFalse(utils.is_valid_mac("AA-BB-CC-DD-EE-FF")) + def test_is_valid_datapath_id(self): + self.assertTrue(utils.is_valid_datapath_id("525400cf2d319fdf")) + self.assertTrue(utils.is_valid_datapath_id("525400CF2D319FDF")) + self.assertFalse(utils.is_valid_datapath_id("52")) + self.assertFalse(utils.is_valid_datapath_id("52:54:00:cf:2d:31")) + self.assertFalse(utils.is_valid_datapath_id("notadatapathid00")) + self.assertFalse(utils.is_valid_datapath_id("5525400CF2D319FDF")) + def test_is_hostname_safe(self): self.assertTrue(utils.is_hostname_safe('spam')) self.assertFalse(utils.is_hostname_safe('spAm')) @@ -456,6 +464,15 @@ class GenericUtilsTestCase(base.TestCase): self.assertEqual(mac.lower(), utils.validate_and_normalize_mac(mac)) + def test_validate_and_normalize_datapath_id(self): + datapath_id = 'AA:BB:CC:DD:EE:FF' + with mock.patch.object(utils, 'is_valid_datapath_id', + autospec=True) as m_mock: + m_mock.return_value = True + self.assertEqual(datapath_id.lower(), + utils.validate_and_normalize_datapath_id( + datapath_id)) + def test_validate_and_normalize_mac_invalid_format(self): with mock.patch.object(utils, 'is_valid_mac', autospec=True) as m_mock: m_mock.return_value = False diff --git a/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml b/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml new file mode 100644 index 000000000..a857de47b --- /dev/null +++ b/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + API version is bumped to 1.19, ``local_link_connection`` and + ``pxe_enabled`` fields were added to a Port: + + * ``pxe_enabled`` indicates whether PXE is enabled for the port. + * ``local_link_connection`` contains the port binding profile. From 787b9deb7357f36223e198de7bc0e39847db99ae Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 13 Jul 2016 23:51:19 +0000 Subject: [PATCH 068/166] Updated from global requirements Change-Id: I04305458d05cf836864d4df3bcc4d55926ec68fa --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3264502f8..61ea5200a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=4.2.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 -ironic-lib>=1.3.0 # Apache-2.0 +ironic-lib>=2.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT stevedore>=1.10.0 # Apache-2.0 From 22a80f77c578b4c10298ee16daf351fd5bfc25d4 Mon Sep 17 00:00:00 2001 From: Dao Cong Tien Date: Fri, 10 Jun 2016 15:25:46 +0700 Subject: [PATCH 069/166] Nova-compatible serial console: socat console_utils This adds console_utils functions for 'socat' console. Implements: - get_socat_console_url(): returns url for socat console - start_socat_console(): uses (socat + console_cmd execution) - stop_socat_console(): stops socat/console_cmd subprocess Change-Id: I79ddd83d12cc8111e05b5107359d6db8a8881d61 Spec: https://review.openstack.org/#/c/319505/ Related-Bug: #1553083 --- devstack/files/debs/ironic | 1 + devstack/files/rpms/ironic | 1 + etc/ironic/ironic.conf.sample | 6 +- ironic/conf/console.py | 8 +- ironic/drivers/modules/console_utils.py | 124 ++++++++++++++++- .../drivers/modules/test_console_utils.py | 126 ++++++++++++++++++ 6 files changed, 259 insertions(+), 7 deletions(-) diff --git a/devstack/files/debs/ironic b/devstack/files/debs/ironic index ce4ab42b4..4cb130e7d 100644 --- a/devstack/files/debs/ironic +++ b/devstack/files/debs/ironic @@ -21,3 +21,4 @@ tftpd-hpa xinetd squashfs-tools libvirt-dev +socat diff --git a/devstack/files/rpms/ironic b/devstack/files/rpms/ironic index ce90401ae..9bbf30e88 100644 --- a/devstack/files/rpms/ironic +++ b/devstack/files/rpms/ironic @@ -16,3 +16,4 @@ tftp-server xinetd squashfs-tools libvirt-devel +socat diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 796358e9f..f8c02390c 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -632,11 +632,13 @@ # From ironic # -# Path to serial console terminal program (string value) +# Path to serial console terminal program. Used only by +# Shell In A Box console. (string value) #terminal = shellinaboxd # Directory containing the terminal SSL cert(PEM) for serial -# console access (string value) +# console access. Used only by Shell In A Box console. +# (string value) #terminal_cert_dir = # Directory for holding terminal pid files. If not specified, diff --git a/ironic/conf/console.py b/ironic/conf/console.py index 692c0df6b..a6df1b561 100644 --- a/ironic/conf/console.py +++ b/ironic/conf/console.py @@ -21,10 +21,12 @@ from ironic.common.i18n import _ opts = [ cfg.StrOpt('terminal', default='shellinaboxd', - help=_('Path to serial console terminal program')), + help=_('Path to serial console terminal program. Used only ' + 'by Shell In A Box console.')), cfg.StrOpt('terminal_cert_dir', - help=_('Directory containing the terminal SSL cert(PEM) for ' - 'serial console access')), + help=_('Directory containing the terminal SSL cert (PEM) for ' + 'serial console access. Used only by Shell In A Box ' + 'console.')), cfg.StrOpt('terminal_pid_dir', help=_('Directory for holding terminal pid files. ' 'If not specified, the temporary directory ' diff --git a/ironic/drivers/modules/console_utils.py b/ironic/drivers/modules/console_utils.py index 6a54d0569..e7dde6709 100644 --- a/ironic/drivers/modules/console_utils.py +++ b/ironic/drivers/modules/console_utils.py @@ -34,6 +34,7 @@ from oslo_utils import netutils from ironic.common import exception from ironic.common.i18n import _ +from ironic.common.i18n import _LE from ironic.common.i18n import _LW from ironic.common import utils from ironic.conf import CONF @@ -110,8 +111,7 @@ def _stop_console(node_uuid): raise exception.ConsoleError(message=msg) else: LOG.warning(_LW("Console process for node %s is not running " - "but pid file exists while trying to stop " - "shellinabox console."), node_uuid) + "but pid file exists."), node_uuid) finally: ironic_utils.unlink_without_raise(_get_console_pid_file(node_uuid)) @@ -250,3 +250,123 @@ def stop_shellinabox_console(node_uuid): except exception.NoConsolePid: LOG.warning(_LW("No console pid found for node %s while trying to " "stop shellinabox console."), node_uuid) + + +def get_socat_console_url(port): + """Get a URL to access the console via socat. + + :param port: the terminal port (integer) for the node + :return: an access URL to the socat console of the node + """ + console_host = CONF.my_ip + if netutils.is_valid_ipv6(console_host): + console_host = '[%s]' % console_host + + return 'tcp://%(host)s:%(port)s' % {'host': console_host, + 'port': port} + + +def start_socat_console(node_uuid, port, console_cmd): + """Open the serial console for a node. + + :param node_uuid: the uuid of the node + :param port: the terminal port for the node + :param console_cmd: the shell command that will be executed by socat to + establish console to the node + :raises ConsoleError: if the directory for the PID file or the PID file + cannot be created + :raises ConsoleSubprocessFailed: when invoking the subprocess failed + """ + # Make sure that the old console for this node is stopped. + # If no console is running, we may get exception NoConsolePid. + try: + _stop_console(node_uuid) + except exception.NoConsolePid: + pass + + _ensure_console_pid_dir_exists() + pid_file = _get_console_pid_file(node_uuid) + + # put together the command and arguments for invoking the console + args = ['socat'] + args.append('-L%s' % pid_file) + + console_host = CONF.my_ip + if netutils.is_valid_ipv6(console_host): + arg = 'TCP6-LISTEN:%(port)s,bind=[%(host)s],reuseaddr,fork' + else: + arg = 'TCP4-LISTEN:%(port)s,bind=%(host)s,reuseaddr,fork' + args.append(arg % {'host': console_host, + 'port': port}) + + args.append('EXEC:"%s",pty,stderr' % console_cmd) + + # run the command as a subprocess + try: + LOG.debug('Running subprocess: %s', ' '.join(args)) + # Use pipe here to catch the error in case socat + # fails to start. Note that socat uses stdout as transferring + # data, so we only capture stderr for checking if it fails. + obj = subprocess.Popen(args, stderr=subprocess.PIPE) + except (OSError, ValueError) as e: + error = _("%(exec_error)s\n" + "Command: %(command)s") % {'exec_error': str(e), + 'command': ' '.join(args)} + LOG.exception(_LE('Unable to start socat console')) + raise exception.ConsoleSubprocessFailed(error=error) + + # NOTE: we need to check if socat fails to start here. + # If it starts successfully, it will run in non-daemon mode and + # will not return until the console session is stopped. + + def _wait(node_uuid, popen_obj): + wait_state['returncode'] = popen_obj.poll() + + # socat runs in non-daemon mode, so it should not return now + if wait_state['returncode'] is None: + # If the pid file is created and the process is running, + # we stop checking it periodically. + if (os.path.exists(pid_file) and + psutil.pid_exists(_get_console_pid(node_uuid))): + raise loopingcall.LoopingCallDone() + else: + # socat returned, it failed to start. + # We get the error (out should be None in this case). + (_out, err) = popen_obj.communicate() + wait_state['errstr'] = _( + "Command: %(command)s.\n" + "Exit code: %(return_code)s.\n" + "Stderr: %(error)r") % { + 'command': ' '.join(args), + 'return_code': wait_state['returncode'], + 'error': err} + LOG.error(wait_state['errstr']) + raise loopingcall.LoopingCallDone() + + if time.time() > expiration: + wait_state['errstr'] = (_("Timeout while waiting for console " + "subprocess to start for node %s.") % + node_uuid) + LOG.error(wait_state['errstr']) + raise loopingcall.LoopingCallDone() + + wait_state = {'returncode': None, 'errstr': ''} + expiration = time.time() + CONF.console.subprocess_timeout + timer = loopingcall.FixedIntervalLoopingCall(_wait, node_uuid, obj) + timer.start(interval=CONF.console.subprocess_checking_interval).wait() + + if wait_state['errstr']: + raise exception.ConsoleSubprocessFailed(error=wait_state['errstr']) + + +def stop_socat_console(node_uuid): + """Close the serial console for a node. + + :param node_uuid: the UUID of the node + :raise ConsoleError: if unable to stop the console process + """ + try: + _stop_console(node_uuid) + except exception.NoConsolePid: + LOG.warning(_LW("No console pid found for node %s while trying to " + "stop socat console."), node_uuid) diff --git a/ironic/tests/unit/drivers/modules/test_console_utils.py b/ironic/tests/unit/drivers/modules/test_console_utils.py index 66b79a52c..d54fb6bf6 100644 --- a/ironic/tests/unit/drivers/modules/test_console_utils.py +++ b/ironic/tests/unit/drivers/modules/test_console_utils.py @@ -407,3 +407,129 @@ class ConsoleUtilsTestCase(db_base.DbTestCase): console_utils.stop_shellinabox_console(self.info['uuid']) mock_stop.assert_called_once_with(self.info['uuid']) + + def test_get_socat_console_url_tcp(self): + self.config(my_ip="10.0.0.1") + url = console_utils.get_socat_console_url(self.info['port']) + self.assertEqual("tcp://10.0.0.1:%s" % self.info['port'], url) + + def test_get_socat_console_url_tcp6(self): + self.config(my_ip='::1') + url = console_utils.get_socat_console_url(self.info['port']) + self.assertEqual("tcp://[::1]:%s" % self.info['port'], url) + + @mock.patch.object(os.path, 'exists', autospec=True) + @mock.patch.object(subprocess, 'Popen', autospec=True) + @mock.patch.object(psutil, 'pid_exists', autospec=True) + @mock.patch.object(console_utils, '_get_console_pid', autospec=True) + @mock.patch.object(console_utils, '_ensure_console_pid_dir_exists', + autospec=True) + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_start_socat_console(self, mock_stop, + mock_dir_exists, + mock_get_pid, + mock_pid_exists, + mock_popen, + mock_path_exists): + mock_popen.return_value.pid = 23456 + mock_popen.return_value.poll.return_value = None + mock_popen.return_value.communicate.return_value = (None, None) + + mock_get_pid.return_value = 23456 + mock_path_exists.return_value = True + + console_utils.start_socat_console(self.info['uuid'], + self.info['port'], + 'ls&') + + mock_stop.assert_called_once_with(self.info['uuid']) + mock_dir_exists.assert_called_once_with() + mock_get_pid.assert_called_with(self.info['uuid']) + mock_path_exists.assert_called_with(mock.ANY) + mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE) + + @mock.patch.object(os.path, 'exists', autospec=True) + @mock.patch.object(subprocess, 'Popen', autospec=True) + @mock.patch.object(psutil, 'pid_exists', autospec=True) + @mock.patch.object(console_utils, '_get_console_pid', autospec=True) + @mock.patch.object(console_utils, '_ensure_console_pid_dir_exists', + autospec=True) + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_start_socat_console_nopid(self, mock_stop, + mock_dir_exists, + mock_get_pid, + mock_pid_exists, + mock_popen, + mock_path_exists): + # no existing PID file before starting + mock_stop.side_effect = exception.NoConsolePid('/tmp/blah') + mock_popen.return_value.pid = 23456 + mock_popen.return_value.poll.return_value = None + mock_popen.return_value.communicate.return_value = (None, None) + + mock_get_pid.return_value = 23456 + mock_path_exists.return_value = True + + console_utils.start_socat_console(self.info['uuid'], + self.info['port'], + 'ls&') + + mock_stop.assert_called_once_with(self.info['uuid']) + mock_dir_exists.assert_called_once_with() + mock_get_pid.assert_called_with(self.info['uuid']) + mock_path_exists.assert_called_with(mock.ANY) + mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE) + + @mock.patch.object(subprocess, 'Popen', autospec=True) + @mock.patch.object(console_utils, '_ensure_console_pid_dir_exists', + autospec=True) + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_start_socat_console_fail(self, mock_stop, mock_dir_exists, + mock_popen): + mock_popen.side_effect = OSError() + mock_popen.return_value.pid = 23456 + mock_popen.return_value.poll.return_value = 1 + mock_popen.return_value.communicate.return_value = (None, 'error') + + self.assertRaises(exception.ConsoleSubprocessFailed, + console_utils.start_socat_console, + self.info['uuid'], + self.info['port'], + 'ls&') + + mock_stop.assert_called_once_with(self.info['uuid']) + mock_dir_exists.assert_called_once_with() + mock_popen.assert_called_once_with(mock.ANY, stderr=subprocess.PIPE) + + @mock.patch.object(subprocess, 'Popen', autospec=True) + @mock.patch.object(console_utils, '_ensure_console_pid_dir_exists', + autospec=True) + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_start_socat_console_fail_nopiddir(self, mock_stop, + mock_dir_exists, + mock_popen): + mock_dir_exists.side_effect = exception.ConsoleError(message='fail') + + self.assertRaises(exception.ConsoleError, + console_utils.start_socat_console, + self.info['uuid'], + self.info['port'], + 'ls&') + + mock_stop.assert_called_once_with(self.info['uuid']) + mock_dir_exists.assert_called_once_with() + mock_popen.assert_not_called() + + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_stop_socat_console(self, mock_stop): + console_utils.stop_socat_console(self.info['uuid']) + mock_stop.assert_called_once_with(self.info['uuid']) + + @mock.patch.object(console_utils.LOG, 'warning', autospec=True) + @mock.patch.object(console_utils, '_stop_console', autospec=True) + def test_stop_socat_console_fail_nopid(self, mock_stop, mock_log_warning): + mock_stop.side_effect = exception.NoConsolePid('/tmp/blah') + console_utils.stop_socat_console(self.info['uuid']) + mock_stop.assert_called_once_with(self.info['uuid']) + # LOG.warning() is called when _stop_console() raises NoConsolePid + self.assertTrue(mock_log_warning.called) From 6d846590bc3d740239b6350f446757f4a94e34f4 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 13 Jul 2016 18:12:35 -0700 Subject: [PATCH 070/166] Follow-up fixes to 206244 This commit is a collection of follow-up fixes to the nits/comments on https://review.openstack.org/#/c/206244/116 Change-Id: I5631f5df3c0b6a3baa386da8b93b1e888c1f8d3c --- api-ref/source/baremetal-api-v1-ports.inc | 5 +++++ doc/source/webapi/v1.rst | 3 +-- ironic/api/controllers/v1/port.py | 2 +- ironic/api/controllers/v1/types.py | 13 +++++++------ ironic/common/exception.py | 2 +- ironic/common/utils.py | 4 ++-- ironic/tests/unit/api/v1/test_ports.py | 13 +++++-------- ironic/tests/unit/api/v1/test_types.py | 2 +- ironic/tests/unit/api/v1/test_utils.py | 2 +- ...d-port-advanced-net-fields-55465091f019d962.yaml | 10 ++++++---- 10 files changed, 30 insertions(+), 26 deletions(-) diff --git a/api-ref/source/baremetal-api-v1-ports.inc b/api-ref/source/baremetal-api-v1-ports.inc index 27a4d4a83..390bd3bb1 100644 --- a/api-ref/source/baremetal-api-v1-ports.inc +++ b/api-ref/source/baremetal-api-v1-ports.inc @@ -32,6 +32,11 @@ API microversion 1.8 added the ``fields`` Request parameter. When specified, this causes the content of the Response to include only the specified fields, rather than the default set. +API microversion 1.19 added the ``pxe_enabled`` and ``local_link_connection`` +fields. + +.. TODO: add pxe_enabled and local_link_connection to all sample files + Normal response code: 200 Request diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index d6465e7af..d1c6b536e 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -34,8 +34,7 @@ API Versions History **1.19** - This API version adds the multitenancy-related ``local_link_connection`` - and ``pxe_enabled`` fields to a port. + Add ``local_link_connection`` and ``pxe_enabled`` fields to the port object. **1.18** diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index 852078b47..15ebab67f 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -99,7 +99,7 @@ class Port(base.APIBase): """Indicates whether pxe is enabled or disabled on the node.""" local_link_connection = types.locallinkconnectiontype - """The port binding profile for each port""" + """The port binding profile for the port""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated port links""" diff --git a/ironic/api/controllers/v1/types.py b/ironic/api/controllers/v1/types.py index 7976af753..9cfe206b7 100644 --- a/ironic/api/controllers/v1/types.py +++ b/ironic/api/controllers/v1/types.py @@ -309,14 +309,15 @@ class LocalLinkConnectionType(wtypes.UserType): # Check switch_id is either a valid mac address or # OpenFlow datapath_id and normalize it. - if utils.is_valid_mac(value['switch_id']): + try: value['switch_id'] = utils.validate_and_normalize_mac( value['switch_id']) - elif utils.is_valid_datapath_id(value['switch_id']): - value['switch_id'] = utils.validate_and_normalize_datapath_id( - value['switch_id']) - else: - raise exception.InvalidSwitchID(switch_id=value['switch_id']) + except exception.InvalidMAC: + try: + value['switch_id'] = utils.validate_and_normalize_datapath_id( + value['switch_id']) + except exception.InvalidDatapathID: + raise exception.InvalidSwitchID(switch_id=value['switch_id']) return value diff --git a/ironic/common/exception.py b/ironic/common/exception.py index ca761cd1d..10d3ac408 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -197,7 +197,7 @@ class InvalidSwitchID(Invalid): "received %(switch_id)s.") -class InvalidDatapathId(Invalid): +class InvalidDatapathID(Invalid): _msg_fmt = _("Expected an OpenFlow datapath ID but received " "%(datapath_id)s.") diff --git a/ironic/common/utils.py b/ironic/common/utils.py index 1f76a9a17..ac2642e9c 100644 --- a/ironic/common/utils.py +++ b/ironic/common/utils.py @@ -308,12 +308,12 @@ def validate_and_normalize_datapath_id(datapath_id): :param datapath_id: OpenFlow datapath_id to be validated and normalized. :returns: Normalized and validated OpenFlow datapath_id. - :raises: InvalidDatapathId If an OpenFlow datapath_id is not valid. + :raises: InvalidDatapathID If an OpenFlow datapath_id is not valid. """ if not is_valid_datapath_id(datapath_id): - raise exception.InvalidDatapathId(datapath_id=datapath_id) + raise exception.InvalidDatapathID(datapath_id=datapath_id) return datapath_id.lower() diff --git a/ironic/tests/unit/api/v1/test_ports.py b/ironic/tests/unit/api/v1/test_ports.py index 87b3f1ac1..59f6648aa 100644 --- a/ironic/tests/unit/api/v1/test_ports.py +++ b/ironic/tests/unit/api/v1/test_ports.py @@ -64,7 +64,6 @@ class TestListPorts(test_api_base.BaseApiTest): def setUp(self): super(TestListPorts, self).setUp() self.node = obj_utils.create_test_node(self.context) - self.headers = {api_base.Version.string: str(api_v1.MAX_VER)} def test_empty(self): data = self.get_json('/ports') @@ -281,7 +280,8 @@ class TestListPorts(test_api_base.BaseApiTest): self.assertEqual(sorted(ports), uuids) def test_sort_key_invalid(self): - invalid_keys_list = ['foo', 'extra', 'internal_info'] + invalid_keys_list = ['foo', 'extra', 'internal_info', + 'local_link_connection'] for invalid_key in invalid_keys_list: response = self.get_json( '/ports?sort_key=%s' % invalid_key, expect_errors=True, @@ -377,8 +377,6 @@ class TestPatch(test_api_base.BaseApiTest): self.mock_gtf = p.start() self.mock_gtf.return_value = 'test-topic' self.addCleanup(p.stop) - self.headers = {api_base.Version.string: str( - versions.MAX_VERSION_STRING)} def test_update_byid(self, mock_upd): extra = {'foo': 'bar'} @@ -471,7 +469,7 @@ class TestPatch(test_api_base.BaseApiTest): '/local_link_connection/switch_id', 'value': switch_id, 'op': 'replace'}], - headers=self.headers) + headers={api_base.Version.string: '1.19'}) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(switch_id, @@ -713,7 +711,7 @@ class TestPatch(test_api_base.BaseApiTest): [{'path': '/pxe_enabled', 'value': pxe_enabled, 'op': 'replace'}], - headers=self.headers) + headers={api_base.Version.string: '1.19'}) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(pxe_enabled, response.json['pxe_enabled']) @@ -936,8 +934,7 @@ class TestPost(test_api_base.BaseApiTest): def test_create_port_with_pxe_enabled_old_api_version(self): headers = {api_base.Version.string: '1.14'} - pdict = post_get_test_port( - pxe_enabled=False) + pdict = post_get_test_port(pxe_enabled=False) del pdict['local_link_connection'] response = self.post_json('/ports', pdict, headers=headers, expect_errors=True) diff --git a/ironic/tests/unit/api/v1/test_types.py b/ironic/tests/unit/api/v1/test_types.py index ba893517f..4c3035d91 100644 --- a/ironic/tests/unit/api/v1/test_types.py +++ b/ironic/tests/unit/api/v1/test_types.py @@ -329,7 +329,7 @@ class TestLocalLinkConnectionType(base.TestCase): self.assertRaisesRegex(exception.Invalid, 'Missing mandatory', v.validate, value) - def test_local_link_connection_type_withou_optional_key(self): + def test_local_link_connection_type_without_optional_key(self): v = types.locallinkconnectiontype value = {'switch_id': '0a:1b:2c:3d:4e:5f', 'port_id': 'value2'} diff --git a/ironic/tests/unit/api/v1/test_utils.py b/ironic/tests/unit/api/v1/test_utils.py index 5d2058d55..0e6998bd3 100644 --- a/ironic/tests/unit/api/v1/test_utils.py +++ b/ironic/tests/unit/api/v1/test_utils.py @@ -226,7 +226,7 @@ class TestApiUtils(base.TestCase): self.assertFalse(utils.allow_port_internal_info()) @mock.patch.object(pecan, 'request', spec_set=['version']) - def test_allow_multitenancy_fields(self, mock_request): + def test_allow_port_advanced_net_fields(self, mock_request): mock_request.version.minor = 19 self.assertTrue(utils.allow_port_advanced_net_fields()) mock_request.version.minor = 18 diff --git a/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml b/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml index a857de47b..a3ab5f820 100644 --- a/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml +++ b/releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml @@ -1,8 +1,10 @@ --- features: - | - API version is bumped to 1.19, ``local_link_connection`` and - ``pxe_enabled`` fields were added to a Port: + Exposes the ``local_link_connection`` and ``pxe_enabled`` properties of the + Port resource to the REST API, raising the API maximum version to 1.19. - * ``pxe_enabled`` indicates whether PXE is enabled for the port. - * ``local_link_connection`` contains the port binding profile. + * The ``pxe_enabled`` field indicates whether this Port should be used when + PXE booting this Node. + * The ``local_link_connection`` field may be used to supply the port + binding profile. From af78314694c6d91d0f33c226581d49c2a327abbc Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Tue, 12 Apr 2016 12:54:00 -0700 Subject: [PATCH 071/166] Centralize config options - [neutron] Nova style refactor of config options in Ironic. Change-Id: I5963f08dd2d476ebf242851b6b4647916393d648 Partial-Bug: #1561100 --- ironic/common/neutron.py | 41 +---------------------------- ironic/conf/__init__.py | 2 ++ ironic/conf/neutron.py | 57 ++++++++++++++++++++++++++++++++++++++++ ironic/conf/opts.py | 2 +- ironic/dhcp/neutron.py | 3 +-- 5 files changed, 62 insertions(+), 43 deletions(-) create mode 100644 ironic/conf/neutron.py diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index 780d4d111..b79e96d3f 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -12,7 +12,6 @@ from neutronclient.common import exceptions as neutron_exceptions from neutronclient.v2_0 import client as clientv20 -from oslo_config import cfg from oslo_log import log from ironic.common import exception @@ -21,48 +20,10 @@ from ironic.common.i18n import _LE from ironic.common.i18n import _LI from ironic.common.i18n import _LW from ironic.common import keystone +from ironic.conf import CONF LOG = log.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('my_ip', 'ironic.netconf') - -neutron_opts = [ - cfg.StrOpt('url', - default='http://$my_ip:9696', - help=_('URL for connecting to neutron.')), - cfg.IntOpt('url_timeout', - default=30, - help=_('Timeout value for connecting to neutron in seconds.')), - cfg.IntOpt('port_setup_delay', - default=0, - min=0, - help=_('Delay value to wait for Neutron agents to setup ' - 'sufficient DHCP configuration for port.')), - cfg.IntOpt('retries', - default=3, - help=_('Client retries in the case of a failed request.')), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=['keystone', 'noauth'], - help=_('Authentication strategy to use when connecting to ' - 'neutron. Running neutron in noauth mode (related to ' - 'but not affected by this setting) is insecure and ' - 'should only be used for testing.')), - cfg.StrOpt('cleaning_network_uuid', - help=_('Neutron network UUID for the ramdisk to be booted ' - 'into for cleaning nodes. Required for "neutron" ' - 'network interface. It is also required if cleaning ' - 'nodes when using "flat" network interface or "neutron" ' - 'DHCP provider.')), - cfg.StrOpt('provisioning_network_uuid', - help=_('Neutron network UUID for the ramdisk to be booted ' - 'into for provisioning nodes. Required for "neutron" ' - 'network interface.')), -] - -CONF.register_opts(neutron_opts, group='neutron') - def get_client(token=None): params = { diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index baff22891..aaccf4933 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -28,6 +28,7 @@ from ironic.conf import inspector from ironic.conf import ipmi from ironic.conf import irmc from ironic.conf import keystone +from ironic.conf import neutron from ironic.conf import oneview from ironic.conf import seamicro from ironic.conf import snmp @@ -50,6 +51,7 @@ inspector.register_opts(CONF) ipmi.register_opts(CONF) irmc.register_opts(CONF) keystone.register_opts(CONF) +neutron.register_opts(CONF) oneview.register_opts(CONF) seamicro.register_opts(CONF) snmp.register_opts(CONF) diff --git a/ironic/conf/neutron.py b/ironic/conf/neutron.py new file mode 100644 index 000000000..03869d594 --- /dev/null +++ b/ironic/conf/neutron.py @@ -0,0 +1,57 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('url', + default='http://$my_ip:9696', + help=_('URL for connecting to neutron.')), + cfg.IntOpt('url_timeout', + default=30, + help=_('Timeout value for connecting to neutron in seconds.')), + cfg.IntOpt('port_setup_delay', + default=0, + min=0, + help=_('Delay value to wait for Neutron agents to setup ' + 'sufficient DHCP configuration for port.')), + cfg.IntOpt('retries', + default=3, + help=_('Client retries in the case of a failed request.')), + cfg.StrOpt('auth_strategy', + default='keystone', + choices=['keystone', 'noauth'], + help=_('Authentication strategy to use when connecting to ' + 'neutron. Running neutron in noauth mode (related to ' + 'but not affected by this setting) is insecure and ' + 'should only be used for testing.')), + cfg.StrOpt('cleaning_network_uuid', + help=_('Neutron network UUID for the ramdisk to be booted ' + 'into for cleaning nodes. Required for "neutron" ' + 'network interface. It is also required if cleaning ' + 'nodes when using "flat" network interface or "neutron" ' + 'DHCP provider.')), + cfg.StrOpt('provisioning_network_uuid', + help=_('Neutron network UUID for the ramdisk to be booted ' + 'into for provisioning nodes. Required for "neutron" ' + 'network interface.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='neutron') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 604db652b..5043cb007 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -71,7 +71,7 @@ _opts = [ ('irmc', ironic.conf.irmc.opts), ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.conf.keystone.opts), - ('neutron', ironic.common.neutron.neutron_opts), + ('neutron', ironic.conf.neutron.opts), ('oneview', ironic.conf.oneview.opts), ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index a5027e05f..b412ae7d9 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -17,7 +17,6 @@ import time from neutronclient.common import exceptions as neutron_client_exc -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils @@ -27,11 +26,11 @@ from ironic.common.i18n import _LE from ironic.common.i18n import _LW from ironic.common import network from ironic.common import neutron +from ironic.conf import CONF from ironic.dhcp import base from ironic.drivers.modules import ssh from ironic import objects -CONF = cfg.CONF LOG = logging.getLogger(__name__) create_cleaning_ports_deprecation = False From 8fe320753c10ede6445781cc884cadc2bc235ea5 Mon Sep 17 00:00:00 2001 From: vsaienko Date: Fri, 11 Dec 2015 12:05:59 +0200 Subject: [PATCH 072/166] Update Ironic VM network connection When VM is dirrectly connected to OVS (current setup) there is no port in OVS when VM is in powered off state. Since Ironic plug port to network when VM is in powered-off state. Port should be present in OVS even when VM is powered-off. This patch adds additional network bridge called br-$VM_NAME for each VM. Adds additional interface ovs-$VM_NAME which is added to both OVS and br-$VM_NAME. And configures VM NIC in br-$VM_NAME instead of direct connection to OVS switch. Now connection looks like: Linux Bridge OVS -------- ----------- --------- | vm-1 | <--> | br-vm-1 | <---> | br-bm | -------- ----------- --------- Change-Id: I317dd9429684a14d16ecf2887d45c2b014e36a84 Partial-bug: #1526403 --- devstack/lib/ironic | 6 ++++++ devstack/tools/ironic/scripts/configure-vm.py | 7 ++++--- devstack/tools/ironic/scripts/create-node.sh | 14 +++++++++++++- devstack/tools/ironic/templates/vm.xml | 5 ++--- 4 files changed, 25 insertions(+), 7 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index c7199ef20..9128e9ceb 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -1306,6 +1306,12 @@ function cleanup_baremetal_basic_ops { local vm_name for vm_name in $(_ironic_bm_vm_names); do sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-node.sh $vm_name" + # Cleanup node bridge/interfaces + sudo ip link set ovs-$vm_name down + sudo ip link set br-$vm_name down + sudo ovs-vsctl del-port ovs-$vm_name + sudo ip link del dev ovs-$vm_name + sudo ip link del dev br-$vm_name done sudo ovs-vsctl --if-exists del-br $IRONIC_VM_NETWORK_BRIDGE diff --git a/devstack/tools/ironic/scripts/configure-vm.py b/devstack/tools/ironic/scripts/configure-vm.py index 24cb04b0f..26feadd73 100755 --- a/devstack/tools/ironic/scripts/configure-vm.py +++ b/devstack/tools/ironic/scripts/configure-vm.py @@ -68,10 +68,11 @@ def main(): help="CPU count for the VM.") parser.add_argument('--bootdev', default='hd', help="What boot device to use (hd/network).") - parser.add_argument('--network', default="brbm", - help='The libvirt network name to use') parser.add_argument('--libvirt-nic-driver', default='virtio', help='The libvirt network driver to use') + parser.add_argument('--bridge', default="br-seed", + help='The linux bridge name to use for seeding \ + the baremetal pseudo-node\'s OS image') parser.add_argument('--console-log', help='File to log console') parser.add_argument('--emulator', default=None, @@ -89,7 +90,7 @@ def main(): 'memory': args.memory, 'cpus': args.cpus, 'bootdev': args.bootdev, - 'network': args.network, + 'bridge': args.bridge, 'nicdriver': args.libvirt_nic_driver, 'emulator': args.emulator, 'disk_format': args.disk_format diff --git a/devstack/tools/ironic/scripts/create-node.sh b/devstack/tools/ironic/scripts/create-node.sh index 172a22ee3..20793db91 100755 --- a/devstack/tools/ironic/scripts/create-node.sh +++ b/devstack/tools/ironic/scripts/create-node.sh @@ -62,6 +62,18 @@ else fi VOL_NAME="${NAME}.${DISK_FORMAT}" +# Create bridge and add VM interface to it. +# Additional interface will be added to this bridge and +# it will be plugged to OVS. +# This is needed in order to have interface in OVS even +# when VM is in shutdown state + +sudo brctl addbr br-$NAME +sudo ip link set br-$NAME up +sudo ovs-vsctl add-port $BRIDGE ovs-$NAME -- set Interface ovs-$NAME type=internal +sudo ip link set ovs-$NAME up +sudo brctl addif br-$NAME ovs-$NAME + if ! virsh list --all | grep -q $NAME; then virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 @@ -73,7 +85,7 @@ if ! virsh list --all | grep -q $NAME; then $TOP_DIR/scripts/configure-vm.py \ --bootdev network --name $NAME --image "$volume_path" \ --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \ - --emulator $EMULATOR --network $BRIDGE --disk-format $DISK_FORMAT $VM_LOGGING >&2 + --emulator $EMULATOR --bridge br-$NAME --disk-format $DISK_FORMAT $VM_LOGGING >&2 # Createa Virtual BMC for the node if IPMI is used if [[ $(type -P vbmc) != "" ]]; then diff --git a/devstack/tools/ironic/templates/vm.xml b/devstack/tools/ironic/templates/vm.xml index cbfa39192..64d7899ba 100644 --- a/devstack/tools/ironic/templates/vm.xml +++ b/devstack/tools/ironic/templates/vm.xml @@ -28,9 +28,8 @@
- - - + +
From 75fc071b54a07fbdbaabaebd359299170bd7fda2 Mon Sep 17 00:00:00 2001 From: vsaienko Date: Fri, 11 Dec 2015 13:14:00 +0200 Subject: [PATCH 073/166] Add configure_provision_network function This change allows to configure ironic provision network by setting IRONIC_PROVISION_NETWORK_NAME variable. In this case additional interface $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID will be configured with IRONIC_PROVISION_SUBNET_GATEWAY ip address. Additional configuration parameters are: IRONIC_PROVISION_PROVIDER_NETWORK_TYPE IRONIC_PROVISION_SEGMENTATION_ID IRONIC_PROVISION_ALLOCATION_POOL IRONIC_PROVISION_SUBNET_GATEWAY IRONIC_PROVISION_SUBNET_PREFIX Change-Id: I619f2fb92aafe7348b0a47eaaaad1790df5ae5c7 Partial-bug: #1526403 --- devstack/lib/ironic | 80 +++++++++++++++++++++++++++++++++++++++++++-- devstack/plugin.sh | 10 ++++++ 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 9128e9ceb..50d8a57ce 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -250,6 +250,30 @@ IRONIC_ENABLED_NETWORK_INTERFACES=${IRONIC_ENABLED_NETWORK_INTERFACES:-} # This is the network interface to use for a node IRONIC_NETWORK_INTERFACE=${IRONIC_NETWORK_INTERFACE:-} +# Ironic provision network name +IRONIC_PROVISION_NETWORK_NAME=${IRONIC_PROVISION_NETWORK_NAME:-} + +# Provision network provider type. Can be flat or vlan. +IRONIC_PROVISION_PROVIDER_NETWORK_TYPE=${IRONIC_PROVISION_PROVIDER_NETWORK_TYPE:-'vlan'} + +# If IRONIC_PROVISION_PROVIDER_NETWORK_TYPE is vlan. VLAN_ID may be specified. If it is not set, +# vlan will be allocated dynamically. +IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-} + +# Allocation network pool for provision network +# Example: IRONIC_PROVISION_ALLOCATION_POOL=start=10.0.5.10,end=10.0.5.100 +IRONIC_PROVISION_ALLOCATION_POOL=${IRONIC_PROVISION_ALLOCATION_POOL:-} + +# Ironic provision subnet name. +IRONIC_PROVISION_PROVIDER_SUBNET_NAME=${IRONIC_PROVISION_PROVIDER_SUBNET_NAME:-${IRONIC_PROVISION_NETWORK_NAME}-subnet} + +# Ironic provision subnet gateway. +IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-} + +# Ironic provision subnet prefix +# Example: IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 +IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-} + # get_pxe_boot_file() - Get the PXE/iPXE boot file path function get_pxe_boot_file { local relpath=syslinux/pxelinux.0 @@ -452,6 +476,59 @@ function configure_ironic_dirs { fi } +function configure_ironic_provision_network { + + die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_PREFIX "You must specify the IRONIC_PROVISION_SUBNET_PREFIX" + die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" + die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_GATEWAY "You must specify the IRONIC_PROVISION_SUBNET_GATEWAY" + + local net_id + net_id=$(neutron net-create --provider:network_type $IRONIC_PROVISION_PROVIDER_NETWORK_TYPE \ + --provider:physical_network "$PHYSICAL_NETWORK" \ + ${IRONIC_PROVISION_SEGMENTATION_ID:+--provider:segmentation_id $IRONIC_PROVISION_SEGMENTATION_ID} \ + ${IRONIC_PROVISION_NETWORK_NAME} | grep ' id ' | get_field 2) + + die_if_not_set $LINENO net_id "Failure creating net_id for $IRONIC_PROVISION_NETWORK_NAME" + local subnet_id + subnet_id="$(neutron subnet-create --ip_version 4 \ + ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \ + --name $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ + --gateway $IRONIC_PROVISION_SUBNET_GATEWAY $net_id \ + $IRONIC_PROVISION_SUBNET_PREFIX | grep ' id ' | get_field 2)" + + die_if_not_set $LINENO subnet_id "Failure creating SUBNET_ID for $IRONIC_PROVISION_NETWORK_NAME" + + iniset $IRONIC_CONF_FILE neutron provisioning_network_uuid $net_id + + IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-`neutron net-show ${net_id} | grep -w 'provider:segmentation_id'| get_field 2`} + provision_net_prefix=${IRONIC_PROVISION_SUBNET_PREFIX##*/} + + # Set provision network GW on physical interface + # Add vlan on br interface in case of IRONIC_PROVISION_PROVIDER_NETWORK_TYPE==vlan + # othervise assign ip to br interface directly. + if [[ "$IRONIC_PROVISION_PROVIDER_NETWORK_TYPE" == "vlan" ]]; then + sudo vconfig add $OVS_PHYSICAL_BRIDGE $IRONIC_PROVISION_SEGMENTATION_ID + sudo ip link set dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID up + sudo ip addr add dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID $IRONIC_PROVISION_SUBNET_GATEWAY/$provision_net_prefix + else + sudo ip link set dev $OVS_PHYSICAL_BRIDGE up + sudo ip addr add dev $OVS_PHYSICAL_BRIDGE $IRONIC_PROVISION_SUBNET_GATEWAY/$provision_net_prefix + fi +} + +function cleanup_ironic_provision_network { + if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then + return 0 + fi + # Cleanup OVS_PHYSICAL_BRIDGE subinterfaces + local bridge_subint + bridge_subint=$(cat /proc/net/dev | sed -n "s/^\(${OVS_PHYSICAL_BRIDGE}\.[0-9]*\).*/\1/p") + for sub_int in $bridge_subint; do + sudo ip link set dev $sub_int down + sudo ip link del dev $sub_int + done +} + # configure_ironic() - Set config files, create data dirs, etc function configure_ironic { configure_ironic_dirs @@ -1281,9 +1358,6 @@ function prepare_baremetal_basic_ops { configure_ironic_auxiliary fi upload_baremetal_ironic_deploy - if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then - create_bridge_and_vms - fi enroll_nodes configure_tftpd configure_iptables diff --git a/devstack/plugin.sh b/devstack/plugin.sh index f10358ba5..bd635c487 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -37,6 +37,15 @@ if is_service_enabled ir-api ir-cond; then # Initialize ironic init_ironic + if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then + echo_summary "Creating bridge and VMs" + create_bridge_and_vms + fi + if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then + echo_summary "Configuring Ironic provisioning network" + configure_ironic_provision_network + fi + # Start the ironic API and ironic taskmgr components echo_summary "Starting Ironic" start_ironic @@ -51,6 +60,7 @@ if is_service_enabled ir-api ir-cond; then # unstack - Called by unstack.sh before other services are shut down. stop_ironic + cleanup_ironic_provision_network cleanup_baremetal_basic_ops fi From 0fcf2e8b51e7dbbcde6d4480b8a7b9c807651546 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 14 Jul 2016 12:20:40 +0100 Subject: [PATCH 074/166] Deprecate [ilo]/clean_priority_erase_devices config The [ilo]/clean_priority_erase_devices configuration option is duplicated by [deploy]/erase_devices_priority, this patch is marking the ilo configuration as deprecated. The ironic.conf.sample was also updated to reflect the changes. Closes-Bug: #1515871 Change-Id: I2bf18e35d97160d31a51c8447745bfd60c099af2 --- etc/ironic/ironic.conf.sample | 32 +++++++++++++------ ironic/conf/ilo.py | 4 +++ ironic/drivers/modules/ilo/deploy.py | 10 +++--- ...vice-priority-config-509661955a11c28e.yaml | 5 +++ 4 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 796358e9f..d5298e646 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -920,17 +920,18 @@ # Size of EFI system partition in MiB when configuring UEFI # systems for local boot. (integer value) -# Deprecated group/name - [deploy]/efi_system_partition_size #efi_system_partition_size = 200 +# Size of BIOS Boot partition in MiB when configuring GPT +# partitioned systems for local boot in BIOS. (integer value) +#bios_boot_partition_size = 1 + # Block size to use when writing to the nodes disk. (string # value) -# Deprecated group/name - [deploy]/dd_block_size #dd_block_size = 1M # Maximum attempts to verify an iSCSI connection is active, # sleeping 1 second between attempts. (integer value) -# Deprecated group/name - [deploy]/iscsi_verify_attempts #iscsi_verify_attempts = 3 @@ -1108,9 +1109,13 @@ # (boolean value) #use_web_server_for_images = false -# Priority for erase devices clean step. If unset, it defaults -# to 10. If set to 0, the step will be disabled and will not -# run during cleaning. (integer value) +# DEPRECATED: Priority for erase devices clean step. If unset, +# it defaults to 10. If set to 0, the step will be disabled +# and will not run during cleaning. (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: This configuration option is duplicated by [deploy] +# erase_devices_priority, please use that instead. #clean_priority_erase_devices = # Priority for reset_ilo clean step. (integer value) @@ -1289,7 +1294,16 @@ # From keystonemiddleware.auth_token # -# Complete public Identity API endpoint. (string value) +# Complete "public" Identity API endpoint. This endpoint +# should not be an "admin" endpoint, as it should be +# accessible by all end users. Unauthenticated clients are +# redirected to this endpoint to authenticate. Although this +# endpoint should ideally be unversioned, client support in +# the wild varies. If you're using a versioned v2 endpoint +# here, then this should *not* be the same endpoint the +# service user utilizes for validating tokens, because normal +# end users may not be able to reach that endpoint. (string +# value) #auth_uri = # API version of the admin Identity API endpoint. (string @@ -1428,12 +1442,12 @@ # (list value) #hash_algorithms = md5 -# Authentication type to load (unknown value) +# Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options -# (unknown value) +# (string value) #auth_section = diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py index 2276e94ed..54f86cbaf 100644 --- a/ironic/conf/ilo.py +++ b/ironic/conf/ilo.py @@ -41,6 +41,10 @@ opts = [ 'to host the floppy images and generated ' 'boot_iso.')), cfg.IntOpt('clean_priority_erase_devices', + deprecated_for_removal=True, + deprecated_reason=_('This configuration option is duplicated ' + 'by [deploy] erase_devices_priority, ' + 'please use that instead.'), help=_('Priority for erase devices clean step. If unset, ' 'it defaults to 10. If set to 0, the step will be ' 'disabled and will not run during cleaning.')), diff --git a/ironic/drivers/modules/ilo/deploy.py b/ironic/drivers/modules/ilo/deploy.py index 9dc57f262..cd4fa4c48 100644 --- a/ironic/drivers/modules/ilo/deploy.py +++ b/ironic/drivers/modules/ilo/deploy.py @@ -293,12 +293,12 @@ class IloVirtualMediaAgentDeploy(agent.AgentDeploy): :returns: A list of clean step dictionaries """ - # TODO(stendulker): All drivers use CONF.deploy.erase_devices_priority - # agent_ilo driver should also use the same. Defect has been filed for - # the same. - # https://bugs.launchpad.net/ironic/+bug/1515871 + priority = CONF.ilo.clean_priority_erase_devices + if priority is None: + priority = CONF.deploy.erase_devices_priority + new_priorities = { - 'erase_devices': CONF.ilo.clean_priority_erase_devices, + 'erase_devices': priority, } return deploy_utils.agent_get_clean_steps( task, interface='deploy', diff --git a/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml b/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml new file mode 100644 index 000000000..24f223140 --- /dev/null +++ b/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - The [ilo]/clean_priority_erase_devices config is deprecated and will be + removed in the Ocata cycle. Please use the [deploy]/erase_devices_priority + config instead. From 2bfd13643c870b66800734f7fafeb6e1e9204896 Mon Sep 17 00:00:00 2001 From: Akilan Pughazhendi Date: Thu, 14 Jul 2016 20:25:37 +0000 Subject: [PATCH 075/166] Grammar fix in code contribution guide Change-Id: Iac29eb7f0b8217476ebc87c2da455efe01d72417 --- doc/source/dev/code-contribution-guide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/code-contribution-guide.rst b/doc/source/dev/code-contribution-guide.rst index 43017916e..e669b7ed3 100644 --- a/doc/source/dev/code-contribution-guide.rst +++ b/doc/source/dev/code-contribution-guide.rst @@ -158,7 +158,7 @@ Agent driver attributes: These are only some fields in use. Other vendor drivers might expose more ``driver_internal_info`` properties, please check their development documentation and/or module docstring for details. It is important for developers to make sure these properties follow the precedent of prefixing their - variable names with a specific interface name(e.g., iboot_bar, amt_xyz), so as to minimize or avoid + variable names with a specific interface name (e.g., iboot_bar, amt_xyz), so as to minimize or avoid any conflicts between interfaces. From 8b2a7bd2aac5e67820e6644a20df19af48eb5015 Mon Sep 17 00:00:00 2001 From: Chris Krelle Date: Tue, 12 Jul 2016 18:13:11 -0700 Subject: [PATCH 076/166] Update devstack section of quickstart to use agent_ipmitool This patch updates the sample local.conf to use the agent_ipmitool driver instead of the pxe_ssh driver. It also adds a note about *_ssh drivers being phased out in favor of ipmitool + vbmc. Change-Id: I1bc58b6b97f2aa0c864ebe112ca3c5a88b9ca561 --- doc/source/dev/dev-quickstart.rst | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index df4e78f9d..21120d1ae 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -377,9 +377,9 @@ Switch to the stack user and clone DevStack:: git clone https://git.openstack.org/openstack-dev/devstack.git devstack Create devstack/local.conf with minimal settings required to enable Ironic. -You can use either of two drivers for deploy: pxe_* or agent_*, see :ref:`IPA` +You can use either of two drivers for deploy: agent\_\* or pxe\_\*, see :ref:`IPA` for explanation. An example local.conf that enables both types of drivers -and uses the ``pxe_ssh`` driver by default:: +and uses the ``agent_ipmitool`` driver by default:: cd devstack cat >local.conf <`_ + to control the power state of the virtual baremetal nodes. + .. note:: When running QEMU as non-root user (e.g. ``qemu`` on Fedora or ``libvirt-qemu`` on Ubuntu), make sure ``IRONIC_VM_LOG_DIR`` points to a directory where QEMU will be able to write. From c62e1bee29e795ece05b6be8c613910d75a7543e Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 17 May 2016 13:59:50 +0300 Subject: [PATCH 077/166] Expose node's network_interface field in API This patch exposes the node's network_interface field in the REST API. It also adds restrictions on the node states in which network interface change is possible and whether the requested network interface is enabled. As a temporary solution until the driver composition work is completed, we have taken an approach that requires all API and Conductor nodes to have the same setting for enabled_network_interfaces. There are inline notes in the code indicating where we will address this in the future. Partial-bug: #1526403 Co-Authored-By: Om Kumar Co-Authored-By: Vasyl Saienko Co-Authored-By: Sivaramakrishna Garimella Co-Authored-By: Vladyslav Drok Co-Authored-By: Zhenguo Niu Change-Id: I67495196c3334f51ed034f4ca6e32a3e01a58f15 --- doc/source/webapi/v1.rst | 4 + etc/ironic/ironic.conf.sample | 29 ++++-- ironic/api/controllers/v1/node.py | 54 ++++++++++- ironic/api/controllers/v1/utils.py | 37 ++++++++ ironic/api/controllers/v1/versions.py | 4 +- ironic/common/driver_factory.py | 6 +- ironic/conductor/manager.py | 24 ++++- ironic/tests/unit/api/v1/test_nodes.py | 90 +++++++++++++++++++ ironic/tests/unit/api/v1/test_utils.py | 23 +++++ ironic/tests/unit/conductor/test_manager.py | 45 ++++++++++ ...etwork-interface-api-a3a56b8d0c796d88.yaml | 7 ++ 11 files changed, 310 insertions(+), 13 deletions(-) create mode 100644 releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index d1c6b536e..0b4f929a9 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -32,6 +32,10 @@ always requests the newest supported API version. API Versions History -------------------- +**1.20** + + Add node ``network_interface`` field. + **1.19** Add ``local_link_connection`` and ``pxe_enabled`` fields to the port object. diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 796358e9f..cde6ae681 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -37,8 +37,11 @@ # recommended set of production-oriented network interfaces. A # complete list of network interfaces present on your system # may be found by enumerating the -# "ironic.hardware.interfaces.network" entrypoint. (list -# value) +# "ironic.hardware.interfaces.network" entrypoint.This value +# must be the same on all ironic-conductor and ironic-api +# services, because it is used by ironic-api service to +# validate a new or updated node's network_interface value. +# (list value) #enabled_network_interfaces = flat,noop # Default network interface to be used for nodes that do not @@ -920,17 +923,18 @@ # Size of EFI system partition in MiB when configuring UEFI # systems for local boot. (integer value) -# Deprecated group/name - [deploy]/efi_system_partition_size #efi_system_partition_size = 200 +# Size of BIOS Boot partition in MiB when configuring GPT +# partitioned systems for local boot in BIOS. (integer value) +#bios_boot_partition_size = 1 + # Block size to use when writing to the nodes disk. (string # value) -# Deprecated group/name - [deploy]/dd_block_size #dd_block_size = 1M # Maximum attempts to verify an iSCSI connection is active, # sleeping 1 second between attempts. (integer value) -# Deprecated group/name - [deploy]/iscsi_verify_attempts #iscsi_verify_attempts = 3 @@ -1289,7 +1293,16 @@ # From keystonemiddleware.auth_token # -# Complete public Identity API endpoint. (string value) +# Complete "public" Identity API endpoint. This endpoint +# should not be an "admin" endpoint, as it should be +# accessible by all end users. Unauthenticated clients are +# redirected to this endpoint to authenticate. Although this +# endpoint should ideally be unversioned, client support in +# the wild varies. If you're using a versioned v2 endpoint +# here, then this should *not* be the same endpoint the +# service user utilizes for validating tokens, because normal +# end users may not be able to reach that endpoint. (string +# value) #auth_uri = # API version of the admin Identity API endpoint. (string @@ -1428,12 +1441,12 @@ # (list value) #hash_algorithms = md5 -# Authentication type to load (unknown value) +# Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options -# (unknown value) +# (string value) #auth_section = diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 303bbd3ca..021aa4de8 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -45,6 +45,7 @@ from ironic import objects CONF = cfg.CONF CONF.import_opt('heartbeat_timeout', 'ironic.conductor.manager', group='conductor') +CONF.import_opt('enabled_network_interfaces', 'ironic.common.driver_factory') LOG = log.getLogger(__name__) _CLEAN_STEPS_SCHEMA = { @@ -109,7 +110,12 @@ def get_nodes_controller_reserved_names(): def hide_fields_in_newer_versions(obj): - # if requested version is < 1.3, hide driver_internal_info + """This method hides fields that were added in newer API versions. + + Certain node fields were introduced at certain API versions. + These fields are only made available when the request's API version + matches or exceeds the versions when these fields were introduced. + """ if pecan.request.version.minor < versions.MINOR_3_DRIVER_INTERNAL_INFO: obj.driver_internal_info = wsme.Unset @@ -128,6 +134,9 @@ def hide_fields_in_newer_versions(obj): obj.raid_config = wsme.Unset obj.target_raid_config = wsme.Unset + if pecan.request.version.minor < versions.MINOR_20_NETWORK_INTERFACE: + obj.network_interface = wsme.Unset + def update_state_in_older_versions(obj): """Change provision state names for API backwards compatability. @@ -696,6 +705,9 @@ class Node(base.APIBase): states = wsme.wsattr([link.Link], readonly=True) """Links to endpoint for retrieving and setting node states""" + network_interface = wsme.wsattr(wtypes.text) + """The network interface to be used for this node""" + # NOTE(deva): "conductor_affinity" shouldn't be presented on the # API because it's an internal value. Don't add it here. @@ -794,7 +806,8 @@ class Node(base.APIBase): maintenance=False, maintenance_reason=None, inspection_finished_at=None, inspection_started_at=time, console_enabled=False, clean_step={}, - raid_config=None, target_raid_config=None) + raid_config=None, target_raid_config=None, + network_interface='flat') # NOTE(matty_dubs): The chassis_uuid getter() is based on the # _chassis_uuid variable: sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12' @@ -1129,6 +1142,7 @@ class NodesController(rest.RestController): api_utils.check_allow_specify_fields(fields) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) + api_utils.check_allow_specify_network_interface_in_fields(fields) if fields is None: fields = _DEFAULT_RETURN_FIELDS return self._get_nodes_collection(chassis_uuid, instance_uuid, @@ -1213,6 +1227,7 @@ class NodesController(rest.RestController): raise exception.OperationNotPermitted() api_utils.check_allow_specify_fields(fields) + api_utils.check_allow_specify_network_interface_in_fields(fields) rpc_node = api_utils.get_rpc_node(node_ident) return Node.convert_with_links(rpc_node, fields=fields) @@ -1226,6 +1241,26 @@ class NodesController(rest.RestController): if self.from_chassis: raise exception.OperationNotPermitted() + n_interface = node.network_interface + if (not api_utils.allow_network_interface() and + n_interface is not wtypes.Unset): + raise exception.NotAcceptable() + + # NOTE(vsaienko) The validation is performed on API side, + # all conductors and api should have the same list of + # enabled_network_interfaces. + # TODO(vsaienko) remove it once driver-composition-reform + # is implemented. + if (n_interface is not wtypes.Unset and + not api_utils.is_valid_network_interface(n_interface)): + error_msg = _("Cannot create node with the invalid network " + "interface '%(n_interface)s'. Enabled network " + "interfaces are: %(enabled_int)s") + raise wsme.exc.ClientSideError( + error_msg % {'n_interface': n_interface, + 'enabled_int': CONF.enabled_network_interfaces}, + status_code=http_client.BAD_REQUEST) + # NOTE(deva): get_topic_for checks if node.driver is in the hash ring # and raises NoValidHost if it is not. # We need to ensure that node has a UUID before it can @@ -1265,6 +1300,21 @@ class NodesController(rest.RestController): if self.from_chassis: raise exception.OperationNotPermitted() + n_interfaces = api_utils.get_patch_values(patch, '/network_interface') + if n_interfaces and not api_utils.allow_network_interface(): + raise exception.NotAcceptable() + + for n_interface in n_interfaces: + if (n_interface is not None and + not api_utils.is_valid_network_interface(n_interface)): + error_msg = _("Node %(node)s: Cannot change " + "network_interface to invalid value: " + "%(n_interface)s") + raise wsme.exc.ClientSideError( + error_msg % {'node': node_ident, + 'n_interface': n_interface}, + status_code=http_client.BAD_REQUEST) + rpc_node = api_utils.get_rpc_node(node_ident) remove_inst_uuid_patch = [{'op': 'remove', 'path': '/instance_uuid'}] diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py index 70098f488..51ccef062 100644 --- a/ironic/api/controllers/v1/utils.py +++ b/ironic/api/controllers/v1/utils.py @@ -240,6 +240,34 @@ def check_allow_specify_fields(fields): raise exception.NotAcceptable() +def check_allow_specify_network_interface_in_fields(fields): + """Check if fetching a network_interface attribute is allowed. + + Version 1.20 of the API allows to fetching a network_interface + attribute. This method check if the required version is being + requested. + """ + if (fields is not None + and 'network_interface' in fields + and not allow_network_interface()): + raise exception.NotAcceptable() + + +# NOTE(vsaienko) The validation is performed on API side, all conductors +# and api should have the same list of enabled_network_interfaces. +# TODO(vsaienko) remove it once driver-composition-reform is implemented. +def is_valid_network_interface(network_interface): + """Determine if the provided network_interface is valid. + + Check to see that the provided network_interface is in the enabled + network interfaces list. + + :param: network_interface: the node network interface to check. + :returns: True if the network_interface is valid, False otherwise. + """ + return network_interface in CONF.enabled_network_interfaces + + def check_allow_management_verbs(verb): min_version = MIN_VERB_VERSIONS.get(verb) if min_version is not None and pecan.request.version.minor < min_version: @@ -322,6 +350,15 @@ def allow_port_advanced_net_fields(): versions.MINOR_19_PORT_ADVANCED_NET_FIELDS) +def allow_network_interface(): + """Check if we should support network_interface node field. + + Version 1.20 of the API added support for network interfaces. + """ + return (pecan.request.version.minor >= + versions.MINOR_20_NETWORK_INTERFACE) + + def get_controller_reserved_names(cls): """Get reserved names for a given controller. diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py index 9bbfd32c7..d2e75862b 100644 --- a/ironic/api/controllers/v1/versions.py +++ b/ironic/api/controllers/v1/versions.py @@ -49,6 +49,7 @@ BASE_VERSION = 1 # v1.17: Add 'adopt' verb for ADOPTING active nodes. # v1.18: Add port.internal_info. # v1.19: Add port.local_link_connection and port.pxe_enabled. +# v1.20: Add node.network_interface MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -70,11 +71,12 @@ MINOR_16_DRIVER_FILTER = 16 MINOR_17_ADOPT_VERB = 17 MINOR_18_PORT_INTERNAL_INFO = 18 MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 +MINOR_20_NETWORK_INTERFACE = 20 # When adding another version, update MINOR_MAX_VERSION and also update # doc/source/webapi/v1.rst with a detailed explanation of what the version has # changed. -MINOR_MAX_VERSION = MINOR_19_PORT_ADVANCED_NET_FIELDS +MINOR_MAX_VERSION = MINOR_20_NETWORK_INTERFACE # String representations of the minor and maximum versions MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py index bd4439f9e..967ac9252 100644 --- a/ironic/common/driver_factory.py +++ b/ironic/common/driver_factory.py @@ -51,7 +51,11 @@ driver_opts = [ 'production-oriented network interfaces. A complete ' 'list of network interfaces present on your system may ' 'be found by enumerating the ' - '"ironic.hardware.interfaces.network" entrypoint.')), + '"ironic.hardware.interfaces.network" entrypoint.' + 'This value must be the same on all ironic-conductor ' + 'and ironic-api services, because it is used by ' + 'ironic-api service to validate a new or updated ' + 'node\'s network_interface value.')), cfg.StrOpt('default_network_interface', help=_('Default network interface to be used for nodes that ' 'do not have network_interface field set. A complete ' diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 5b277b887..28e348751 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -91,7 +91,8 @@ class ConductorManager(base_manager.BaseConductorManager): @messaging.expected_exceptions(exception.InvalidParameterValue, exception.MissingParameterValue, - exception.NodeLocked) + exception.NodeLocked, + exception.InvalidState) def update_node(self, context, node_obj): """Update a node with the supplied data. @@ -113,6 +114,27 @@ class ConductorManager(base_manager.BaseConductorManager): if 'maintenance' in delta and not node_obj.maintenance: node_obj.maintenance_reason = None + if 'network_interface' in delta: + allowed_update_states = [states.ENROLL, states.INSPECTING, + states.MANAGEABLE] + if not (node_obj.provision_state in allowed_update_states or + node_obj.maintenance): + action = _("Node %(node)s can not have network_interface " + "updated unless it is in one of allowed " + "(%(allowed)s) states or in maintenance mode.") + raise exception.InvalidState( + action % {'node': node_obj.uuid, + 'allowed': ', '.join(allowed_update_states)}) + net_iface = node_obj.network_interface + if net_iface not in CONF.enabled_network_interfaces: + raise exception.InvalidParameterValue( + _("Cannot change network_interface to invalid value " + "%(n_interface)s for node %(node)s, valid interfaces " + "are: %(valid_choices)s.") % { + 'n_interface': net_iface, 'node': node_obj.uuid, + 'valid_choices': CONF.enabled_network_interfaces, + }) + driver_name = node_obj.driver if 'driver' in delta else None with task_manager.acquire(context, node_id, shared=False, driver_name=driver_name, diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 483cd069a..5fa6cf2e6 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -110,6 +110,7 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertNotIn('clean_step', data['nodes'][0]) self.assertNotIn('raid_config', data['nodes'][0]) self.assertNotIn('target_raid_config', data['nodes'][0]) + self.assertNotIn('network_interface', data['nodes'][0]) # never expose the chassis_id self.assertNotIn('chassis_id', data['nodes'][0]) @@ -135,6 +136,7 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertIn('inspection_started_at', data) self.assertIn('clean_step', data) self.assertIn('states', data) + self.assertIn('network_interface', data) # never expose the chassis_id self.assertNotIn('chassis_id', data) @@ -206,6 +208,25 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertItemsEqual(['driver_info', 'links'], data) self.assertEqual('******', data['driver_info']['fake_password']) + def test_get_network_interface_fields_invalid_api_version(self): + node = obj_utils.create_test_node(self.context, + chassis_id=self.chassis.id) + fields = 'network_interface' + response = self.get_json( + '/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: str(api_v1.MIN_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + + def test_get_network_interface_fields(self): + node = obj_utils.create_test_node(self.context, + chassis_id=self.chassis.id) + fields = 'network_interface' + response = self.get_json( + '/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertIn('network_interface', response) + def test_detail(self): node = obj_utils.create_test_node(self.context, chassis_id=self.chassis.id) @@ -229,6 +250,7 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertIn('inspection_started_at', data['nodes'][0]) self.assertIn('raid_config', data['nodes'][0]) self.assertIn('target_raid_config', data['nodes'][0]) + self.assertIn('network_interface', data['nodes'][0]) # never expose the chassis_id self.assertNotIn('chassis_id', data['nodes'][0]) @@ -303,6 +325,17 @@ class TestListNodes(test_api_base.BaseApiTest): headers={api_base.Version.string: "1.7"}) self.assertEqual({"foo": "bar"}, data['clean_step']) + def test_hide_fields_in_newer_versions_network_interface(self): + node = obj_utils.create_test_node(self.context, + network_interface='flat') + data = self.get_json( + '/nodes/detail', headers={api_base.Version.string: '1.19'}) + self.assertNotIn('network_interface', data['nodes'][0]) + new_data = self.get_json( + '/nodes/detail', headers={api_base.Version.string: '1.20'}) + self.assertEqual(node.network_interface, + new_data['nodes'][0]["network_interface"]) + def test_many(self): nodes = [] for id in range(5): @@ -1390,6 +1423,35 @@ class TestPatch(test_api_base.BaseApiTest): self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) + def test_update_network_interface(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + network_interface = 'flat' + headers = {api_base.Version.string: str(api_v1.MAX_VER)} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/network_interface', + 'value': network_interface, + 'op': 'add'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_network_interface_old_api(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + network_interface = 'flat' + headers = {api_base.Version.string: '1.15'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/network_interface', + 'value': network_interface, + 'op': 'add'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + class TestPost(test_api_base.BaseApiTest): @@ -1703,6 +1765,34 @@ class TestPost(test_api_base.BaseApiTest): # Assert RPC method wasn't called this time self.assertFalse(get_methods_mock.called) + def test_create_node_network_interface(self): + ndict = test_api_utils.post_get_test_node( + network_interface='flat') + response = self.post_json('/nodes', ndict, + headers={api_base.Version.string: + str(api_v1.MAX_VER)}) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/nodes/%s' % ndict['uuid'], + headers={api_base.Version.string: + str(api_v1.MAX_VER)}) + self.assertEqual('flat', result['network_interface']) + + def test_create_node_network_interface_old_api_version(self): + ndict = test_api_utils.post_get_test_node( + network_interface='flat') + response = self.post_json('/nodes', ndict, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + + def test_create_node_invalid_network_interface(self): + ndict = test_api_utils.post_get_test_node( + network_interface='foo') + response = self.post_json('/nodes', ndict, expect_errors=True, + headers={api_base.Version.string: + str(api_v1.MAX_VER)}) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + class TestDelete(test_api_base.BaseApiTest): diff --git a/ironic/tests/unit/api/v1/test_utils.py b/ironic/tests/unit/api/v1/test_utils.py index 0e6998bd3..68d896d08 100644 --- a/ironic/tests/unit/api/v1/test_utils.py +++ b/ironic/tests/unit/api/v1/test_utils.py @@ -130,6 +130,22 @@ class TestApiUtils(base.TestCase): self.assertRaises(exception.NotAcceptable, utils.check_allow_specify_fields, ['foo']) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_specify_network_interface(self, mock_request): + mock_request.version.minor = 20 + self.assertIsNone( + utils.check_allow_specify_network_interface_in_fields( + ['network_interface'])) + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_specify_network_interface_in_fields_fail( + self, mock_request): + mock_request.version.minor = 19 + self.assertRaises( + exception.NotAcceptable, + utils.check_allow_specify_network_interface_in_fields, + ['network_interface']) + @mock.patch.object(pecan, 'request', spec_set=['version']) def test_check_allow_specify_driver(self, mock_request): mock_request.version.minor = 16 @@ -232,6 +248,13 @@ class TestApiUtils(base.TestCase): mock_request.version.minor = 18 self.assertFalse(utils.allow_port_advanced_net_fields()) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_network_interface(self, mock_request): + mock_request.version.minor = 20 + self.assertTrue(utils.allow_network_interface()) + mock_request.version.minor = 19 + self.assertFalse(utils.allow_network_interface()) + class TestNodeIdent(base.TestCase): diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index 74483b815..6553afa7d 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -285,6 +285,51 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, node.refresh() self.assertEqual(existing_driver, node.driver) + def test_update_network_node_deleting_state(self): + node = obj_utils.create_test_node(self.context, driver='fake', + provision_state=states.DELETING, + network_interface='flat') + old_iface = node.network_interface + node.network_interface = 'noop' + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.update_node, + self.context, node) + self.assertEqual(exception.InvalidState, exc.exc_info[0]) + node.refresh() + self.assertEqual(old_iface, node.network_interface) + + def test_update_network_node_manageable_state(self): + node = obj_utils.create_test_node(self.context, driver='fake', + provision_state=states.MANAGEABLE, + network_interface='flat') + node.network_interface = 'noop' + self.service.update_node(self.context, node) + node.refresh() + self.assertEqual('noop', node.network_interface) + + def test_update_network_node_active_state_and_maintenance(self): + node = obj_utils.create_test_node(self.context, driver='fake', + provision_state=states.ACTIVE, + network_interface='flat', + maintenance=True) + node.network_interface = 'noop' + self.service.update_node(self.context, node) + node.refresh() + self.assertEqual('noop', node.network_interface) + + def test_update_node_invalid_network_interface(self): + node = obj_utils.create_test_node(self.context, driver='fake', + provision_state=states.MANAGEABLE, + network_interface='flat') + old_iface = node.network_interface + node.network_interface = 'cosci' + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.update_node, + self.context, node) + self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0]) + node.refresh() + self.assertEqual(old_iface, node.network_interface) + @mgr_utils.mock_record_keepalive class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, diff --git a/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml b/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml new file mode 100644 index 000000000..d778eb480 --- /dev/null +++ b/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml @@ -0,0 +1,7 @@ +--- +features: + - Bumped API version to 1.20. It adds API methods to work with + ``network_interface`` node object field, that specifies the network + interface to use for that node. Its value must be identical and + present in the ``[DEFAULT]enabled_network_interfaces`` list option + on conductor and api nodes. From b9ff26261b9fbf3d7ace8cd11e0de75259d213ad Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 15 Jul 2016 03:41:47 +0000 Subject: [PATCH 078/166] Updated from global requirements Change-Id: I35bf616f3ebd699b121123be85863a204efac4c2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 61ea5200a..f1cad9928 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,4 +43,4 @@ retrying!=1.3.0,>=1.2.3 # Apache-2.0 oslo.versionedobjects>=1.9.1 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT psutil<2.0.0,>=1.1.1 # BSD -futurist>=0.11.0 # Apache-2.0 +futurist!=0.15.0,>=0.11.0 # Apache-2.0 From c46da47af9bd0c0609650e3edc36b05546657f15 Mon Sep 17 00:00:00 2001 From: Shivanand Tendulker Date: Thu, 14 Jul 2016 21:00:33 -0700 Subject: [PATCH 079/166] Follow-up patch of 0fcf2e8b51e7dbbcde6d4480b8a7b9c807651546 This commit removes documentation related to deprecated config parameter [ilo]/clean_priority_erase_devices Closes-Bug: #1515871 Change-Id: I9a4e063138db53c9ce2a18877e86d5873cb30a85 --- doc/source/drivers/ilo.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/drivers/ilo.rst b/doc/source/drivers/ilo.rst index 19e5680d9..352eaea7e 100644 --- a/doc/source/drivers/ilo.rst +++ b/doc/source/drivers/ilo.rst @@ -775,7 +775,6 @@ Supported **Automated** Cleaning Operations - clean_priority_reset_secure_boot_keys_to_default=20 - clean_priority_clear_secure_boot_keys=0 - clean_priority_reset_ilo_credential=30 - - clean_priority_erase_devices=10 For more information on node automated cleaning, see :ref:`automated_cleaning` From 47985ea077e59c23adb05056e84fd38b8ffa5534 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 14 Jul 2016 17:03:18 -0700 Subject: [PATCH 080/166] Follow-up to 317392 Fixing two nits on the last review of 317392. Change-Id: Ib6e7a696b6ae2b4a5197d20009ee21f06ebba811 --- ironic/common/driver_factory.py | 2 +- .../add-network-interface-api-a3a56b8d0c796d88.yaml | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py index 967ac9252..28906f464 100644 --- a/ironic/common/driver_factory.py +++ b/ironic/common/driver_factory.py @@ -51,7 +51,7 @@ driver_opts = [ 'production-oriented network interfaces. A complete ' 'list of network interfaces present on your system may ' 'be found by enumerating the ' - '"ironic.hardware.interfaces.network" entrypoint.' + '"ironic.hardware.interfaces.network" entrypoint. ' 'This value must be the same on all ironic-conductor ' 'and ironic-api services, because it is used by ' 'ironic-api service to validate a new or updated ' diff --git a/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml b/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml index d778eb480..fb891a3f0 100644 --- a/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml +++ b/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml @@ -1,7 +1,9 @@ --- features: - - Bumped API version to 1.20. It adds API methods to work with - ``network_interface`` node object field, that specifies the network - interface to use for that node. Its value must be identical and - present in the ``[DEFAULT]enabled_network_interfaces`` list option - on conductor and api nodes. + - | + Exposes the ``network_interface`` field of the Node resource to the REST + API, raising the API maximum version to 1.20. This field is the network + interface to use for a node. Its possible values are from the + configuration option ``[DEFAULT]enabled_network_interfaces``. Note that + the value of this option must be the same on all the ironic-conductor and + ironic-api service nodes. From 6c1aa56429f9ce27428b9bf55791903228e24083 Mon Sep 17 00:00:00 2001 From: Igor Kalnitsky Date: Tue, 12 Jul 2016 18:45:01 +0300 Subject: [PATCH 081/166] Fix fake.FakeBoot.prepare_ramdisk() signature According to `BootInterface` [1], the `prepare_ramdisk` method must receive 3 arguments (self, task, ramdisk_params) while `FakeBoot` implementation receives only 2 arguments (self, task). This cause a runtime error when one wants one to use `FakeBoot` driver as a part of its own driver (for test purposes or in case of standalone Ironic with static PXE configuration). The reason why it works now is, no one use `FakeBoot` without other fake parts, so `prepare_ramdisk` is never called. [1]: http://git.openstack.org/cgit/openstack/ironic/tree/ironic/drivers/base.py#n416 Change-Id: I0a55bfaf8aa58747ad2cfcd14ec84430d139e68c --- ironic/drivers/modules/fake.py | 2 +- ironic/tests/unit/drivers/test_fake.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ironic/drivers/modules/fake.py b/ironic/drivers/modules/fake.py index 2497cb8fc..bb2aab875 100644 --- a/ironic/drivers/modules/fake.py +++ b/ironic/drivers/modules/fake.py @@ -63,7 +63,7 @@ class FakeBoot(base.BootInterface): def validate(self, task): pass - def prepare_ramdisk(self, task): + def prepare_ramdisk(self, task, ramdisk_params): pass def clean_up_ramdisk(self, task): diff --git a/ironic/tests/unit/drivers/test_fake.py b/ironic/tests/unit/drivers/test_fake.py index 68acc5942..c0a849f5a 100644 --- a/ironic/tests/unit/drivers/test_fake.py +++ b/ironic/tests/unit/drivers/test_fake.py @@ -43,9 +43,9 @@ class FakeDriverTestCase(db_base.DbTestCase): self.task.driver = self.driver def test_driver_interfaces(self): - # fake driver implements only 5 out of 6 interfaces self.assertIsInstance(self.driver.power, driver_base.PowerInterface) self.assertIsInstance(self.driver.deploy, driver_base.DeployInterface) + self.assertIsInstance(self.driver.boot, driver_base.BootInterface) self.assertIsInstance(self.driver.vendor, driver_base.VendorInterface) self.assertIsInstance(self.driver.console, driver_base.ConsoleInterface) @@ -78,6 +78,14 @@ class FakeDriverTestCase(db_base.DbTestCase): self.driver.deploy.clean_up(None) self.driver.deploy.tear_down(None) + def test_boot_interface(self): + self.assertEqual({}, self.driver.boot.get_properties()) + self.driver.boot.validate(self.task) + self.driver.boot.prepare_ramdisk(self.task, {}) + self.driver.boot.clean_up_ramdisk(self.task) + self.driver.boot.prepare_instance(self.task) + self.driver.boot.clean_up_instance(self.task) + def test_console_interface(self): self.assertEqual({}, self.driver.console.get_properties()) self.driver.console.validate(self.task) From 15c366580a18c77963d75cbc37e0805c96bc2a50 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 5 Apr 2016 21:10:49 +0000 Subject: [PATCH 082/166] Metric chassis, driver, node, and port API calls This change adds initial metrics for Ironic based on new support in ironic-lib. Emits timing metrics for basic Ironic API calls. Bumps ironic-lib to 2.0.0 in requirements to add metrics support, as well as adding ironic_lib.metrics and ironic_lib.metrics_statds to ironic-config-generator.conf to get them in the sample config, which is also regenerated. Change-Id: Ic35802e4cd11763ebbedb8ddc28f7e8dc535cc2f Partial-bug: #1526219 --- etc/ironic/ironic.conf.sample | 68 +++++++++++++++++-- ironic/api/controllers/v1/chassis.py | 9 +++ ironic/api/controllers/v1/driver.py | 9 +++ ironic/api/controllers/v1/node.py | 23 +++++++ ironic/api/controllers/v1/port.py | 9 +++ ...etrics-for-api-calls-69f18fd1b9d54b05.yaml | 5 ++ requirements.txt | 2 +- tools/config/ironic-config-generator.conf | 2 + 8 files changed, 121 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 1ff5b0974..31063978d 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -135,6 +135,7 @@ # is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) +# Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = @@ -713,8 +714,12 @@ # From oslo.db # -# The file name to use with SQLite. (string value) +# DEPRECATED: The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use config option connection or +# slave_connection to connect the database. #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) @@ -902,17 +907,18 @@ # Size of EFI system partition in MiB when configuring UEFI # systems for local boot. (integer value) -# Deprecated group/name - [deploy]/efi_system_partition_size #efi_system_partition_size = 200 +# Size of BIOS Boot partition in MiB when configuring GPT +# partitioned systems for local boot in BIOS. (integer value) +#bios_boot_partition_size = 1 + # Block size to use when writing to the nodes disk. (string # value) -# Deprecated group/name - [deploy]/dd_block_size #dd_block_size = 1M # Maximum attempts to verify an iSCSI connection is active, # sleeping 1 second between attempts. (integer value) -# Deprecated group/name - [deploy]/iscsi_verify_attempts #iscsi_verify_attempts = 3 @@ -1271,7 +1277,16 @@ # From keystonemiddleware.auth_token # -# Complete public Identity API endpoint. (string value) +# Complete "public" Identity API endpoint. This endpoint +# should not be an "admin" endpoint, as it should be +# accessible by all end users. Unauthenticated clients are +# redirected to this endpoint to authenticate. Although this +# endpoint should ideally be unversioned, client support in +# the wild varies. If you're using a versioned v2 endpoint +# here, then this should *not* be the same endpoint the +# service user utilizes for validating tokens, because normal +# end users may not be able to reach that endpoint. (string +# value) #auth_uri = # API version of the admin Identity API endpoint. (string @@ -1470,6 +1485,49 @@ #socket_timeout = 10000 +[metrics] + +# +# From ironic_lib.metrics +# + +# Backend to use for the metrics system. (string value) +# Allowed values: noop, statsd +#backend = noop + +# Prepend the hostname to all metric names. The format of +# metric names is +# [global_prefix.][host_name.]prefix.metric_name. (boolean +# value) +#prepend_host = false + +# Split the prepended host value by "." and reverse it (to +# better match the reverse hierarchical form of domain names). +# (boolean value) +#prepend_host_reverse = true + +# Prefix all metric names with this value. By default, there +# is no global prefix. The format of metric names is +# [global_prefix.][host_name.]prefix.metric_name. (string +# value) +#global_prefix = + + +[metrics_statsd] + +# +# From ironic_lib.metrics_statsd +# + +# Host for use with the statsd backend. (string value) +#statsd_host = localhost + +# Port to use with the statsd backend. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#statsd_port = 8125 + + [neutron] # diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py index 4c5ffe5e3..e43841b5f 100644 --- a/ironic/api/controllers/v1/chassis.py +++ b/ironic/api/controllers/v1/chassis.py @@ -15,6 +15,7 @@ import datetime +from ironic_lib import metrics_utils import pecan from pecan import rest from six.moves import http_client @@ -32,6 +33,8 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic import objects +METRICS = metrics_utils.get_metrics_logger(__name__) + _DEFAULT_RETURN_FIELDS = ('uuid', 'description') @@ -190,6 +193,7 @@ class ChassisController(rest.RestController): sort_key=sort_key, sort_dir=sort_dir) + @METRICS.timer('ChassisController.get_all') @expose.expose(ChassisCollection, types.uuid, int, wtypes.text, wtypes.text, types.listtype) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', @@ -209,6 +213,7 @@ class ChassisController(rest.RestController): return self._get_chassis_collection(marker, limit, sort_key, sort_dir, fields=fields) + @METRICS.timer('ChassisController.detail') @expose.expose(ChassisCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): @@ -228,6 +233,7 @@ class ChassisController(rest.RestController): return self._get_chassis_collection(marker, limit, sort_key, sort_dir, resource_url) + @METRICS.timer('ChassisController.get_one') @expose.expose(Chassis, types.uuid, types.listtype) def get_one(self, chassis_uuid, fields=None): """Retrieve information about the given chassis. @@ -241,6 +247,7 @@ class ChassisController(rest.RestController): chassis_uuid) return Chassis.convert_with_links(rpc_chassis, fields=fields) + @METRICS.timer('ChassisController.post') @expose.expose(Chassis, body=Chassis, status_code=http_client.CREATED) def post(self, chassis): """Create a new chassis. @@ -254,6 +261,7 @@ class ChassisController(rest.RestController): pecan.response.location = link.build_url('chassis', new_chassis.uuid) return Chassis.convert_with_links(new_chassis) + @METRICS.timer('ChassisController.patch') @wsme.validate(types.uuid, [ChassisPatchType]) @expose.expose(Chassis, types.uuid, body=[ChassisPatchType]) def patch(self, chassis_uuid, patch): @@ -286,6 +294,7 @@ class ChassisController(rest.RestController): rpc_chassis.save() return Chassis.convert_with_links(rpc_chassis) + @METRICS.timer('ChassisController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, chassis_uuid): """Delete a chassis. diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py index f84c1aad2..4d327e9b2 100644 --- a/ironic/api/controllers/v1/driver.py +++ b/ironic/api/controllers/v1/driver.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from ironic_lib import metrics_utils import pecan from pecan import rest from six.moves import http_client @@ -27,6 +28,8 @@ from ironic.api import expose from ironic.common import exception +METRICS = metrics_utils.get_metrics_logger(__name__) + # Property information for drivers: # key = driver name; # value = dictionary of properties of that driver: @@ -139,6 +142,7 @@ class DriverPassthruController(rest.RestController): 'methods': ['GET'] } + @METRICS.timer('DriverPassthruController.methods') @expose.expose(wtypes.text, wtypes.text) def methods(self, driver_name): """Retrieve information about vendor methods of the given driver. @@ -157,6 +161,7 @@ class DriverPassthruController(rest.RestController): return _VENDOR_METHODS[driver_name] + @METRICS.timer('DriverPassthruController._default') @expose.expose(wtypes.text, wtypes.text, wtypes.text, body=wtypes.text) def _default(self, driver_name, method, data=None): @@ -178,6 +183,7 @@ class DriverRaidController(rest.RestController): 'logical_disk_properties': ['GET'] } + @METRICS.timer('DriverRaidController.logical_disk_properties') @expose.expose(types.jsontype, wtypes.text) def logical_disk_properties(self, driver_name): """Returns the logical disk properties for the driver. @@ -222,6 +228,7 @@ class DriversController(rest.RestController): 'properties': ['GET'], } + @METRICS.timer('DriversController.get_all') @expose.expose(DriverList) def get_all(self): """Retrieve a list of drivers.""" @@ -232,6 +239,7 @@ class DriversController(rest.RestController): driver_list = pecan.request.dbapi.get_active_driver_dict() return DriverList.convert_with_links(driver_list) + @METRICS.timer('DriversController.get_one') @expose.expose(Driver, wtypes.text) def get_one(self, driver_name): """Retrieve a single driver.""" @@ -247,6 +255,7 @@ class DriversController(rest.RestController): raise exception.DriverNotFound(driver_name=driver_name) + @METRICS.timer('DriversController.properties') @expose.expose(wtypes.text, wtypes.text) def properties(self, driver_name): """Retrieve property information of the given driver. diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 303bbd3ca..bd0978987 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -16,6 +16,7 @@ import ast import datetime +from ironic_lib import metrics_utils import jsonschema from oslo_config import cfg from oslo_log import log @@ -78,6 +79,8 @@ _CLEAN_STEPS_SCHEMA = { } } +METRICS = metrics_utils.get_metrics_logger(__name__) + # Vendor information for node's driver: # key = driver name; # value = dictionary of node vendor methods of that driver: @@ -167,6 +170,7 @@ class BootDeviceController(rest.RestController): return pecan.request.rpcapi.get_boot_device(pecan.request.context, rpc_node.uuid, topic) + @METRICS.timer('BootDeviceController.put') @expose.expose(None, types.uuid_or_name, wtypes.text, types.boolean, status_code=http_client.NO_CONTENT) def put(self, node_ident, boot_device, persistent=False): @@ -190,6 +194,7 @@ class BootDeviceController(rest.RestController): persistent=persistent, topic=topic) + @METRICS.timer('BootDeviceController.get') @expose.expose(wtypes.text, types.uuid_or_name) def get(self, node_ident): """Get the current boot device for a node. @@ -205,6 +210,7 @@ class BootDeviceController(rest.RestController): """ return self._get_boot_device(node_ident) + @METRICS.timer('BootDeviceController.supported') @expose.expose(wtypes.text, types.uuid_or_name) def supported(self, node_ident): """Get a list of the supported boot devices. @@ -242,6 +248,7 @@ class ConsoleInfo(base.APIBase): class NodeConsoleController(rest.RestController): + @METRICS.timer('NodeConsoleController.get') @expose.expose(ConsoleInfo, types.uuid_or_name) def get(self, node_ident): """Get connection information about the console. @@ -260,6 +267,7 @@ class NodeConsoleController(rest.RestController): return ConsoleInfo(console_enabled=console_state, console_info=console) + @METRICS.timer('NodeConsoleController.put') @expose.expose(None, types.uuid_or_name, types.boolean, status_code=http_client.ACCEPTED) def put(self, node_ident, enabled): @@ -350,6 +358,7 @@ class NodeStatesController(rest.RestController): console = NodeConsoleController() """Expose console as a sub-element of states""" + @METRICS.timer('NodeStatesController.get') @expose.expose(NodeStates, types.uuid_or_name) def get(self, node_ident): """List the states of the node. @@ -362,6 +371,7 @@ class NodeStatesController(rest.RestController): rpc_node = api_utils.get_rpc_node(node_ident) return NodeStates.convert(rpc_node) + @METRICS.timer('NodeStatesController.raid') @expose.expose(None, types.uuid_or_name, body=types.jsontype) def raid(self, node_ident, target_raid_config): """Set the target raid config of the node. @@ -390,6 +400,7 @@ class NodeStatesController(rest.RestController): e.code = http_client.NOT_FOUND raise + @METRICS.timer('NodeStatesController.power') @expose.expose(None, types.uuid_or_name, wtypes.text, status_code=http_client.ACCEPTED) def power(self, node_ident, target): @@ -429,6 +440,7 @@ class NodeStatesController(rest.RestController): url_args = '/'.join([node_ident, 'states']) pecan.response.location = link.build_url('nodes', url_args) + @METRICS.timer('NodeStatesController.provision') @expose.expose(None, types.uuid_or_name, wtypes.text, wtypes.text, types.jsontype, status_code=http_client.ACCEPTED) @@ -860,6 +872,7 @@ class NodeVendorPassthruController(rest.RestController): 'methods': ['GET'] } + @METRICS.timer('NodeVendorPassthruController.methods') @expose.expose(wtypes.text, types.uuid_or_name) def methods(self, node_ident): """Retrieve information about vendor methods of the given node. @@ -880,6 +893,7 @@ class NodeVendorPassthruController(rest.RestController): return _VENDOR_METHODS[rpc_node.driver] + @METRICS.timer('NodeVendorPassthruController._default') @expose.expose(wtypes.text, types.uuid_or_name, wtypes.text, body=wtypes.text) def _default(self, node_ident, method, data=None): @@ -911,6 +925,7 @@ class NodeMaintenanceController(rest.RestController): pecan.request.rpcapi.update_node(pecan.request.context, rpc_node, topic=topic) + @METRICS.timer('NodeMaintenanceController.put') @expose.expose(None, types.uuid_or_name, wtypes.text, status_code=http_client.ACCEPTED) def put(self, node_ident, reason=None): @@ -922,6 +937,7 @@ class NodeMaintenanceController(rest.RestController): """ self._set_maintenance(node_ident, True, reason=reason) + @METRICS.timer('NodeMaintenanceController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.ACCEPTED) def delete(self, node_ident): """Remove the node from maintenance mode. @@ -1096,6 +1112,7 @@ class NodesController(rest.RestController): "enabled. Please stop the console first.") % node_ident, status_code=http_client.CONFLICT) + @METRICS.timer('NodesController.get_all') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, types.listtype) @@ -1137,6 +1154,7 @@ class NodesController(rest.RestController): limit, sort_key, sort_dir, driver, fields=fields) + @METRICS.timer('NodesController.detail') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, wtypes.text, wtypes.text) @@ -1178,6 +1196,7 @@ class NodesController(rest.RestController): limit, sort_key, sort_dir, driver, resource_url) + @METRICS.timer('NodesController.validate') @expose.expose(wtypes.text, types.uuid_or_name, types.uuid) def validate(self, node=None, node_uuid=None): """Validate the driver interfaces, using the node's UUID or name. @@ -1201,6 +1220,7 @@ class NodesController(rest.RestController): return pecan.request.rpcapi.validate_driver_interfaces( pecan.request.context, rpc_node.uuid, topic) + @METRICS.timer('NodesController.get_one') @expose.expose(Node, types.uuid_or_name, types.listtype) def get_one(self, node_ident, fields=None): """Retrieve information about the given node. @@ -1217,6 +1237,7 @@ class NodesController(rest.RestController): rpc_node = api_utils.get_rpc_node(node_ident) return Node.convert_with_links(rpc_node, fields=fields) + @METRICS.timer('NodesController.post') @expose.expose(Node, body=Node, status_code=http_client.CREATED) def post(self, node): """Create a new node. @@ -1254,6 +1275,7 @@ class NodesController(rest.RestController): pecan.response.location = link.build_url('nodes', new_node.uuid) return Node.convert_with_links(new_node) + @METRICS.timer('NodesController.patch') @wsme.validate(types.uuid, [NodePatchType]) @expose.expose(Node, types.uuid_or_name, body=[NodePatchType]) def patch(self, node_ident, patch): @@ -1316,6 +1338,7 @@ class NodesController(rest.RestController): return Node.convert_with_links(new_node) + @METRICS.timer('NodesController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, node_ident): diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index b5f332c2c..b764def95 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -15,6 +15,7 @@ import datetime +from ironic_lib import metrics_utils from oslo_utils import uuidutils import pecan from pecan import rest @@ -32,6 +33,8 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic import objects +METRICS = metrics_utils.get_metrics_logger(__name__) + _DEFAULT_RETURN_FIELDS = ('uuid', 'address') @@ -263,6 +266,7 @@ class PortsController(rest.RestController): except exception.PortNotFound: return [] + @METRICS.timer('PortsController.get_all') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, types.macaddress, types.uuid, int, wtypes.text, wtypes.text, types.listtype) @@ -302,6 +306,7 @@ class PortsController(rest.RestController): limit, sort_key, sort_dir, fields=fields) + @METRICS.timer('PortsController.detail') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, types.macaddress, types.uuid, int, wtypes.text, wtypes.text) @@ -341,6 +346,7 @@ class PortsController(rest.RestController): limit, sort_key, sort_dir, resource_url) + @METRICS.timer('PortsController.get_one') @expose.expose(Port, types.uuid, types.listtype) def get_one(self, port_uuid, fields=None): """Retrieve information about the given port. @@ -357,6 +363,7 @@ class PortsController(rest.RestController): rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid) return Port.convert_with_links(rpc_port, fields=fields) + @METRICS.timer('PortsController.post') @expose.expose(Port, body=Port, status_code=http_client.CREATED) def post(self, port): """Create a new port. @@ -373,6 +380,7 @@ class PortsController(rest.RestController): pecan.response.location = link.build_url('ports', new_port.uuid) return Port.convert_with_links(new_port) + @METRICS.timer('PortsController.patch') @wsme.validate(types.uuid, [PortPatchType]) @expose.expose(Port, types.uuid, body=[PortPatchType]) def patch(self, port_uuid, patch): @@ -417,6 +425,7 @@ class PortsController(rest.RestController): return Port.convert_with_links(new_port) + @METRICS.timer('PortsController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, port_uuid): """Delete a port. diff --git a/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml b/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml new file mode 100644 index 000000000..2397950fd --- /dev/null +++ b/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml @@ -0,0 +1,5 @@ +--- +features: + - With this change, ironic now emits timing metrics + for all API methods to statsd, if enabled by config + in the [metrics] and [metrics_statsd] sections. diff --git a/requirements.txt b/requirements.txt index 3264502f8..61ea5200a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=4.2.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 -ironic-lib>=1.3.0 # Apache-2.0 +ironic-lib>=2.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT stevedore>=1.10.0 # Apache-2.0 diff --git a/tools/config/ironic-config-generator.conf b/tools/config/ironic-config-generator.conf index 4196cda88..088c234bf 100644 --- a/tools/config/ironic-config-generator.conf +++ b/tools/config/ironic-config-generator.conf @@ -4,6 +4,8 @@ wrap_width = 62 namespace = ironic namespace = ironic_lib.disk_utils namespace = ironic_lib.disk_partitioner +namespace = ironic_lib.metrics +namespace = ironic_lib.metrics_statsd namespace = ironic_lib.utils namespace = oslo.db namespace = oslo.messaging From dac5aee05d6f04f0828e2630357398b93132f81e Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Fri, 15 Jul 2016 20:58:47 +0000 Subject: [PATCH 083/166] Add py35 to tox environments Since we're running python 3.5 tests now, we should also be running thme when running all tests with just `tox`. Change-Id: I6ec077878906fd48e9e4dc654c63da3dbc83173b --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index beb8a7182..ef60d136e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,7 @@ [tox] minversion = 1.8 skipsdist = True -envlist = py34,py27,pep8 +envlist = py35,py34,py27,pep8 [testenv] usedevelop = True From 6f0aea966b49ec4c1470d18e91fb2bf95cfb5963 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 15 Apr 2016 15:51:42 -0700 Subject: [PATCH 084/166] Centralize config options - [DEFAULT] Nova style refactor of config options in Ironic. Change-Id: I6baebf1bbc829238441ddd1399f6487fad33a15e Partial-Bug: #1561100 --- etc/ironic/ironic.conf.sample | 37 +++-- ironic/api/app.py | 24 +-- ironic/cmd/dbsync.py | 4 +- ironic/common/driver_factory.py | 41 +----- ironic/common/exception.py | 15 +- ironic/common/hash_ring.py | 33 +---- ironic/common/images.py | 23 +-- ironic/common/paths.py | 22 +-- ironic/common/service.py | 18 +-- ironic/common/utils.py | 17 +-- ironic/conf/__init__.py | 2 + ironic/conf/default.py | 203 ++++++++++++++++++++++++++ ironic/conf/opts.py | 31 ++-- ironic/drivers/base.py | 4 - ironic/drivers/modules/agent.py | 1 - ironic/drivers/modules/image_cache.py | 13 +- ironic/netconf.py | 34 ----- ironic/tests/base.py | 5 +- 18 files changed, 247 insertions(+), 280 deletions(-) create mode 100644 ironic/conf/default.py diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 10e7b8ac7..397e4ceb3 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -37,7 +37,7 @@ # recommended set of production-oriented network interfaces. A # complete list of network interfaces present on your system # may be found by enumerating the -# "ironic.hardware.interfaces.network" entrypoint.This value +# "ironic.hardware.interfaces.network" entrypoint. This value # must be the same on all ironic-conductor and ironic-api # services, because it is used by ironic-api service to # validate a new or updated node's network_interface value. @@ -96,6 +96,15 @@ # Template file for grub configuration file. (string value) #grub_config_template = $pybasedir/common/grub_conf.template +# Run image downloads and raw format conversions in parallel. +# (boolean value) +#parallel_image_downloads = false + +# IP address of this host. If unset, will determine the IP +# programmatically. If unable to do so, will use "127.0.0.1". +# (string value) +#my_ip = 127.0.0.1 + # Directory where the ironic python module is installed. # (string value) #pybasedir = /usr/lib/python/site-packages/ironic/ironic @@ -123,15 +132,6 @@ # (string value) #tempdir = /tmp -# Run image downloads and raw format conversions in parallel. -# (boolean value) -#parallel_image_downloads = false - -# IP address of this host. If unset, will determine the IP -# programmatically. If unable to do so, will use "127.0.0.1". -# (string value) -#my_ip = 127.0.0.1 - # # From oslo.log # @@ -156,6 +156,7 @@ # is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) +# Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = @@ -635,13 +636,13 @@ # From ironic # -# Path to serial console terminal program. Used only by -# Shell In A Box console. (string value) +# Path to serial console terminal program. Used only by Shell +# In A Box console. (string value) #terminal = shellinaboxd -# Directory containing the terminal SSL cert(PEM) for serial -# console access. Used only by Shell In A Box console. -# (string value) +# Directory containing the terminal SSL cert (PEM) for serial +# console access. Used only by Shell In A Box console. (string +# value) #terminal_cert_dir = # Directory for holding terminal pid files. If not specified, @@ -736,8 +737,12 @@ # From oslo.db # -# The file name to use with SQLite. (string value) +# DEPRECATED: The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use config option connection or +# slave_connection to connect the database. #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) diff --git a/ironic/api/app.py b/ironic/api/app.py index b390334e1..f81b3e6c6 100644 --- a/ironic/api/app.py +++ b/ironic/api/app.py @@ -24,29 +24,7 @@ from ironic.api import config from ironic.api.controllers.base import Version from ironic.api import hooks from ironic.api import middleware -from ironic.common.i18n import _ - -api_opts = [ - cfg.StrOpt( - 'auth_strategy', - default='keystone', - choices=['noauth', 'keystone'], - help=_('Authentication strategy used by ironic-api. "noauth" should ' - 'not be used in a production environment because all ' - 'authentication will be disabled.')), - cfg.BoolOpt('debug_tracebacks_in_api', - default=False, - help=_('Return server tracebacks in the API response for any ' - 'error responses. WARNING: this is insecure ' - 'and should not be used in a production environment.')), - cfg.BoolOpt('pecan_debug', - default=False, - help=_('Enable pecan debug mode. WARNING: this is insecure ' - 'and should not be used in a production environment.')), -] - -CONF = cfg.CONF -CONF.register_opts(api_opts) +from ironic.conf import CONF def get_pecan_config(): diff --git a/ironic/cmd/dbsync.py b/ironic/cmd/dbsync.py index 732be6c51..601737a7b 100644 --- a/ironic/cmd/dbsync.py +++ b/ironic/cmd/dbsync.py @@ -25,12 +25,10 @@ from oslo_config import cfg from ironic.common.i18n import _ from ironic.common import service +from ironic.conf import CONF from ironic.db import migration -CONF = cfg.CONF - - class DBCommand(object): def upgrade(self): diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py index 28906f464..98800c44c 100644 --- a/ironic/common/driver_factory.py +++ b/ironic/common/driver_factory.py @@ -16,57 +16,18 @@ import collections from oslo_concurrency import lockutils -from oslo_config import cfg from oslo_log import log from stevedore import dispatch from ironic.common import exception -from ironic.common.i18n import _ from ironic.common.i18n import _LI from ironic.common.i18n import _LW +from ironic.conf import CONF from ironic.drivers import base as driver_base LOG = log.getLogger(__name__) -driver_opts = [ - cfg.ListOpt('enabled_drivers', - default=['pxe_ipmitool'], - help=_('Specify the list of drivers to load during service ' - 'initialization. Missing drivers, or drivers which ' - 'fail to initialize, will prevent the conductor ' - 'service from starting. The option default is a ' - 'recommended set of production-oriented drivers. A ' - 'complete list of drivers present on your system may ' - 'be found by enumerating the "ironic.drivers" ' - 'entrypoint. An example may be found in the ' - 'developer documentation online.')), - cfg.ListOpt('enabled_network_interfaces', - default=['flat', 'noop'], - help=_('Specify the list of network interfaces to load during ' - 'service initialization. Missing network interfaces, ' - 'or network interfaces which fail to initialize, will ' - 'prevent the conductor service from starting. The ' - 'option default is a recommended set of ' - 'production-oriented network interfaces. A complete ' - 'list of network interfaces present on your system may ' - 'be found by enumerating the ' - '"ironic.hardware.interfaces.network" entrypoint. ' - 'This value must be the same on all ironic-conductor ' - 'and ironic-api services, because it is used by ' - 'ironic-api service to validate a new or updated ' - 'node\'s network_interface value.')), - cfg.StrOpt('default_network_interface', - help=_('Default network interface to be used for nodes that ' - 'do not have network_interface field set. A complete ' - 'list of network interfaces present on your system may ' - 'be found by enumerating the ' - '"ironic.hardware.interfaces.network" entrypoint.')) -] - -CONF = cfg.CONF -CONF.register_opts(driver_opts) - EM_SEMAPHORE = 'extension_manager' diff --git a/ironic/common/exception.py b/ironic/common/exception.py index 10d3ac408..f93305d21 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -20,29 +20,16 @@ SHOULD include dedicated exception logging. """ -from oslo_config import cfg from oslo_log import log as logging import six from six.moves import http_client from ironic.common.i18n import _ from ironic.common.i18n import _LE - +from ironic.conf import CONF LOG = logging.getLogger(__name__) -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help=_('Used if there is a formatting error when generating ' - 'an exception message (a programming error). If True, ' - 'raise an exception; if False, use the unformatted ' - 'message.')), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - class IronicException(Exception): """Base Ironic Exception diff --git a/ironic/common/hash_ring.py b/ironic/common/hash_ring.py index 20304d394..bf58a1cda 100644 --- a/ironic/common/hash_ring.py +++ b/ironic/common/hash_ring.py @@ -18,44 +18,13 @@ import hashlib import threading import time -from oslo_config import cfg import six from ironic.common import exception from ironic.common.i18n import _ +from ironic.conf import CONF from ironic.db import api as dbapi -hash_opts = [ - cfg.IntOpt('hash_partition_exponent', - default=5, - help=_('Exponent to determine number of hash partitions to use ' - 'when distributing load across conductors. Larger ' - 'values will result in more even distribution of load ' - 'and less load when rebalancing the ring, but more ' - 'memory usage. Number of partitions per conductor is ' - '(2^hash_partition_exponent). This determines the ' - 'granularity of rebalancing: given 10 hosts, and an ' - 'exponent of the 2, there are 40 partitions in the ring.' - 'A few thousand partitions should make rebalancing ' - 'smooth in most cases. The default is suitable for up ' - 'to a few hundred conductors. Too many partitions has a ' - 'CPU impact.')), - cfg.IntOpt('hash_distribution_replicas', - default=1, - help=_('[Experimental Feature] ' - 'Number of hosts to map onto each hash partition. ' - 'Setting this to more than one will cause additional ' - 'conductor services to prepare deployment environments ' - 'and potentially allow the Ironic cluster to recover ' - 'more quickly if a conductor instance is terminated.')), - cfg.IntOpt('hash_ring_reset_interval', - default=180, - help=_('Interval (in seconds) between hash ring resets.')), -] - -CONF = cfg.CONF -CONF.register_opts(hash_opts) - class HashRing(object): """A stable hash ring. diff --git a/ironic/common/images.py b/ironic/common/images.py index 682a1a7a2..2505ea53e 100644 --- a/ironic/common/images.py +++ b/ironic/common/images.py @@ -26,7 +26,6 @@ from ironic_lib import disk_utils from ironic_lib import utils as ironic_utils import jinja2 from oslo_concurrency import processutils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils @@ -35,31 +34,11 @@ from ironic.common.glance_service import service_utils as glance_utils from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common import image_service as service -from ironic.common import paths from ironic.common import utils +from ironic.conf import CONF LOG = logging.getLogger(__name__) -image_opts = [ - cfg.BoolOpt('force_raw_images', - default=True, - help=_('If True, convert backing images to "raw" disk image ' - 'format.')), - cfg.StrOpt('isolinux_bin', - default='/usr/lib/syslinux/isolinux.bin', - help=_('Path to isolinux binary file.')), - cfg.StrOpt('isolinux_config_template', - default=paths.basedir_def('common/isolinux_config.template'), - help=_('Template file for isolinux configuration file.')), - cfg.StrOpt('grub_config_template', - default=paths.basedir_def('common/grub_conf.template'), - help=_('Template file for grub configuration file.')), -] - - -CONF = cfg.CONF -CONF.register_opts(image_opts) - def _create_root_fs(root_directory, files_info): """Creates a filesystem root in given directory. diff --git a/ironic/common/paths.py b/ironic/common/paths.py index c35d3f495..b4118ba36 100644 --- a/ironic/common/paths.py +++ b/ironic/common/paths.py @@ -17,27 +17,7 @@ import os -from oslo_config import cfg - -from ironic.common.i18n import _ - -path_opts = [ - cfg.StrOpt('pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - sample_default='/usr/lib/python/site-packages/ironic/ironic', - help=_('Directory where the ironic python module is ' - 'installed.')), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help=_('Directory where ironic binaries are installed.')), - cfg.StrOpt('state_path', - default='$pybasedir', - help=_("Top-level directory for maintaining ironic's state.")), -] - -CONF = cfg.CONF -CONF.register_opts(path_opts) +from ironic.conf import CONF def basedir_def(*args): diff --git a/ironic/common/service.py b/ironic/common/service.py index fa79f8cbd..e2d4d3c45 100644 --- a/ironic/common/service.py +++ b/ironic/common/service.py @@ -15,10 +15,8 @@ # under the License. import signal -import socket from oslo_concurrency import processutils -from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_service import service @@ -33,26 +31,12 @@ from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LI from ironic.common import rpc +from ironic.conf import CONF from ironic import objects from ironic.objects import base as objects_base - -service_opts = [ - cfg.StrOpt('host', - default=socket.getfqdn(), - sample_default='localhost', - help=_('Name of this node. This can be an opaque identifier. ' - 'It is not necessarily a hostname, FQDN, or IP address. ' - 'However, the node name must be valid within ' - 'an AMQP key, and if using ZeroMQ, a valid ' - 'hostname, FQDN, or IP address.')), -] - -CONF = cfg.CONF LOG = log.getLogger(__name__) -CONF.register_opts(service_opts) - class RPCService(service.Service): diff --git a/ironic/common/utils.py b/ironic/common/utils.py index ac2642e9c..e6daf7027 100644 --- a/ironic/common/utils.py +++ b/ironic/common/utils.py @@ -30,7 +30,6 @@ import tempfile import netaddr from oslo_concurrency import processutils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import paramiko @@ -41,21 +40,7 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LW - -utils_opts = [ - cfg.StrOpt('rootwrap_config', - default="/etc/ironic/rootwrap.conf", - help=_('Path to the rootwrap configuration file to use for ' - 'running commands as root.')), - cfg.StrOpt('tempdir', - default=tempfile.gettempdir(), - sample_default='/tmp', - help=_('Temporary working directory, default is Python temp ' - 'dir.')), -] - -CONF = cfg.CONF -CONF.register_opts(utils_opts) +from ironic.conf import CONF LOG = logging.getLogger(__name__) diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index aaccf4933..d172c9205 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -20,6 +20,7 @@ from ironic.conf import cisco_ucs from ironic.conf import conductor from ironic.conf import console from ironic.conf import database +from ironic.conf import default from ironic.conf import dhcp from ironic.conf import glance from ironic.conf import iboot @@ -43,6 +44,7 @@ cisco_ucs.register_opts(CONF) conductor.register_opts(CONF) console.register_opts(CONF) database.register_opts(CONF) +default.register_opts(CONF) dhcp.register_opts(CONF) glance.register_opts(CONF) iboot.register_opts(CONF) diff --git a/ironic/conf/default.py b/ironic/conf/default.py new file mode 100644 index 000000000..8e4a94309 --- /dev/null +++ b/ironic/conf/default.py @@ -0,0 +1,203 @@ +# Copyright 2016 Intel Corporation +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# Copyright 2013 Red Hat, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket +import tempfile + +from oslo_config import cfg +from oslo_utils import netutils + +from ironic.common.i18n import _ + +api_opts = [ + cfg.StrOpt( + 'auth_strategy', + default='keystone', + choices=['noauth', 'keystone'], + help=_('Authentication strategy used by ironic-api. "noauth" should ' + 'not be used in a production environment because all ' + 'authentication will be disabled.')), + cfg.BoolOpt('debug_tracebacks_in_api', + default=False, + help=_('Return server tracebacks in the API response for any ' + 'error responses. WARNING: this is insecure ' + 'and should not be used in a production environment.')), + cfg.BoolOpt('pecan_debug', + default=False, + help=_('Enable pecan debug mode. WARNING: this is insecure ' + 'and should not be used in a production environment.')), +] + +driver_opts = [ + cfg.ListOpt('enabled_drivers', + default=['pxe_ipmitool'], + help=_('Specify the list of drivers to load during service ' + 'initialization. Missing drivers, or drivers which ' + 'fail to initialize, will prevent the conductor ' + 'service from starting. The option default is a ' + 'recommended set of production-oriented drivers. A ' + 'complete list of drivers present on your system may ' + 'be found by enumerating the "ironic.drivers" ' + 'entrypoint. An example may be found in the ' + 'developer documentation online.')), + cfg.ListOpt('enabled_network_interfaces', + default=['flat', 'noop'], + help=_('Specify the list of network interfaces to load during ' + 'service initialization. Missing network interfaces, ' + 'or network interfaces which fail to initialize, will ' + 'prevent the conductor service from starting. The ' + 'option default is a recommended set of ' + 'production-oriented network interfaces. A complete ' + 'list of network interfaces present on your system may ' + 'be found by enumerating the ' + '"ironic.hardware.interfaces.network" entrypoint. ' + 'This value must be the same on all ironic-conductor ' + 'and ironic-api services, because it is used by ' + 'ironic-api service to validate a new or updated ' + 'node\'s network_interface value.')), + cfg.StrOpt('default_network_interface', + help=_('Default network interface to be used for nodes that ' + 'do not have network_interface field set. A complete ' + 'list of network interfaces present on your system may ' + 'be found by enumerating the ' + '"ironic.hardware.interfaces.network" entrypoint.')) +] + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help=_('Used if there is a formatting error when generating ' + 'an exception message (a programming error). If True, ' + 'raise an exception; if False, use the unformatted ' + 'message.')), +] + +hash_opts = [ + cfg.IntOpt('hash_partition_exponent', + default=5, + help=_('Exponent to determine number of hash partitions to use ' + 'when distributing load across conductors. Larger ' + 'values will result in more even distribution of load ' + 'and less load when rebalancing the ring, but more ' + 'memory usage. Number of partitions per conductor is ' + '(2^hash_partition_exponent). This determines the ' + 'granularity of rebalancing: given 10 hosts, and an ' + 'exponent of the 2, there are 40 partitions in the ring.' + 'A few thousand partitions should make rebalancing ' + 'smooth in most cases. The default is suitable for up ' + 'to a few hundred conductors. Too many partitions has a ' + 'CPU impact.')), + cfg.IntOpt('hash_distribution_replicas', + default=1, + help=_('[Experimental Feature] ' + 'Number of hosts to map onto each hash partition. ' + 'Setting this to more than one will cause additional ' + 'conductor services to prepare deployment environments ' + 'and potentially allow the Ironic cluster to recover ' + 'more quickly if a conductor instance is terminated.')), + cfg.IntOpt('hash_ring_reset_interval', + default=180, + help=_('Interval (in seconds) between hash ring resets.')), +] + +image_opts = [ + cfg.BoolOpt('force_raw_images', + default=True, + help=_('If True, convert backing images to "raw" disk image ' + 'format.')), + cfg.StrOpt('isolinux_bin', + default='/usr/lib/syslinux/isolinux.bin', + help=_('Path to isolinux binary file.')), + cfg.StrOpt('isolinux_config_template', + default=os.path.join('$pybasedir', + 'common/isolinux_config.template'), + help=_('Template file for isolinux configuration file.')), + cfg.StrOpt('grub_config_template', + default=os.path.join('$pybasedir', + 'common/grub_conf.template'), + help=_('Template file for grub configuration file.')), +] + +img_cache_opts = [ + cfg.BoolOpt('parallel_image_downloads', + default=False, + help=_('Run image downloads and raw format conversions in ' + 'parallel.')), +] + +netconf_opts = [ + cfg.StrOpt('my_ip', + default=netutils.get_my_ipv4(), + sample_default='127.0.0.1', + help=_('IP address of this host. If unset, will determine the ' + 'IP programmatically. If unable to do so, will use ' + '"127.0.0.1".')), +] + +path_opts = [ + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + sample_default='/usr/lib/python/site-packages/ironic/ironic', + help=_('Directory where the ironic python module is ' + 'installed.')), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help=_('Directory where ironic binaries are installed.')), + cfg.StrOpt('state_path', + default='$pybasedir', + help=_("Top-level directory for maintaining ironic's state.")), +] + +service_opts = [ + cfg.StrOpt('host', + default=socket.getfqdn(), + sample_default='localhost', + help=_('Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address. ' + 'However, the node name must be valid within ' + 'an AMQP key, and if using ZeroMQ, a valid ' + 'hostname, FQDN, or IP address.')), +] + +utils_opts = [ + cfg.StrOpt('rootwrap_config', + default="/etc/ironic/rootwrap.conf", + help=_('Path to the rootwrap configuration file to use for ' + 'running commands as root.')), + cfg.StrOpt('tempdir', + default=tempfile.gettempdir(), + sample_default='/tmp', + help=_('Temporary working directory, default is Python temp ' + 'dir.')), +] + + +def register_opts(conf): + conf.register_opts(api_opts) + conf.register_opts(driver_opts) + conf.register_opts(exc_log_opts) + conf.register_opts(hash_opts) + conf.register_opts(image_opts) + conf.register_opts(img_cache_opts) + conf.register_opts(netconf_opts) + conf.register_opts(path_opts) + conf.register_opts(service_opts) + conf.register_opts(utils_opts) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 5043cb007..1be7cd349 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -13,37 +13,26 @@ import itertools import ironic.api -import ironic.api.app -import ironic.common.driver_factory -import ironic.common.exception -import ironic.common.hash_ring -import ironic.common.images -import ironic.common.neutron -import ironic.common.paths -import ironic.common.service -import ironic.common.utils import ironic.drivers.modules.agent import ironic.drivers.modules.agent_base_vendor import ironic.drivers.modules.agent_client import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.deploy_utils -import ironic.drivers.modules.image_cache import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe -import ironic.netconf _default_opt_lists = [ - ironic.api.app.api_opts, - ironic.common.driver_factory.driver_opts, - ironic.common.exception.exc_log_opts, - ironic.common.hash_ring.hash_opts, - ironic.common.images.image_opts, - ironic.common.paths.path_opts, - ironic.common.service.service_opts, - ironic.common.utils.utils_opts, - ironic.drivers.modules.image_cache.img_cache_opts, - ironic.netconf.netconf_opts, + ironic.conf.default.api_opts, + ironic.conf.default.driver_opts, + ironic.conf.default.exc_log_opts, + ironic.conf.default.hash_opts, + ironic.conf.default.image_opts, + ironic.conf.default.img_cache_opts, + ironic.conf.default.netconf_opts, + ironic.conf.default.path_opts, + ironic.conf.default.service_opts, + ironic.conf.default.utils_opts, ] _opts = [ diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py index b59c82e14..63e44f94b 100644 --- a/ironic/drivers/base.py +++ b/ironic/drivers/base.py @@ -24,7 +24,6 @@ import inspect import json import os -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six @@ -39,9 +38,6 @@ RAID_CONFIG_SCHEMA = os.path.join(os.path.dirname(__file__), 'raid_config_schema.json') -CONF = cfg.CONF - - @six.add_metaclass(abc.ABCMeta) class BaseDriver(object): """Base class for all drivers. diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 2cf56809f..eddd81c08 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -65,7 +65,6 @@ agent_opts = [ ] CONF = cfg.CONF -CONF.import_opt('my_ip', 'ironic.netconf') CONF.import_opt('erase_devices_priority', 'ironic.drivers.modules.deploy_utils', group='deploy') CONF.register_opts(agent_opts, group='agent') diff --git a/ironic/drivers/modules/image_cache.py b/ironic/drivers/modules/image_cache.py index 183542590..533ce7583 100644 --- a/ironic/drivers/modules/image_cache.py +++ b/ironic/drivers/modules/image_cache.py @@ -24,33 +24,22 @@ import time import uuid from oslo_concurrency import lockutils -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils import six from ironic.common import exception from ironic.common.glance_service import service_utils -from ironic.common.i18n import _ from ironic.common.i18n import _LI from ironic.common.i18n import _LW from ironic.common import image_service from ironic.common import images from ironic.common import utils +from ironic.conf import CONF LOG = logging.getLogger(__name__) -img_cache_opts = [ - cfg.BoolOpt('parallel_image_downloads', - default=False, - help=_('Run image downloads and raw format conversions in ' - 'parallel.')), -] - -CONF = cfg.CONF -CONF.register_opts(img_cache_opts) - # This would contain a sorted list of instances of ImageCache to be # considered for cleanup. This list will be kept sorted in non-increasing # order of priority. diff --git a/ironic/netconf.py b/ironic/netconf.py index bfebd1a55..e69de29bb 100644 --- a/ironic/netconf.py +++ b/ironic/netconf.py @@ -1,34 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import netutils - -from ironic.common.i18n import _ - -CONF = cfg.CONF - -netconf_opts = [ - cfg.StrOpt('my_ip', - default=netutils.get_my_ipv4(), - sample_default='127.0.0.1', - help=_('IP address of this host. If unset, will determine the ' - 'IP programmatically. If unable to do so, will use ' - '"127.0.0.1".')), -] - -CONF.register_opts(netconf_opts) diff --git a/ironic/tests/base.py b/ironic/tests/base.py index 8a205ba63..e46ec07d2 100644 --- a/ironic/tests/base.py +++ b/ironic/tests/base.py @@ -29,7 +29,6 @@ import tempfile import eventlet eventlet.monkey_patch(os=False) import fixtures -from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log import log as logging from oslo_utils import uuidutils @@ -38,13 +37,11 @@ import testtools from ironic.common import config as ironic_config from ironic.common import context as ironic_context from ironic.common import hash_ring +from ironic.conf import CONF from ironic.objects import base as objects_base from ironic.tests.unit import policy_fixture -CONF = cfg.CONF -CONF.import_opt('host', 'ironic.common.service') -CONF.import_opt('cleaning_network_uuid', 'ironic.common.neutron', 'neutron') logging.register_options(CONF) logging.setup(CONF, 'ironic') From 598dee274c975f3544e09c9bfe4f676ff4643a49 Mon Sep 17 00:00:00 2001 From: Bharath kumar Date: Fri, 15 Jul 2016 15:20:19 +0000 Subject: [PATCH 085/166] Following the hacking rule for string interpolation at logging String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. See the oslo i18n guideline. * http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-log-messages * http://docs.openstack.org/developer/oslo.i18n/guidelines.html#avoid-forcing-the-translation-of-translatable-variables Change-Id: I96eaede777ca60c7ba2b83ea6968108910b62953 Closes-Bug: #1596829 --- ironic/conductor/manager.py | 11 +++++------ ironic/drivers/modules/ilo/common.py | 2 +- ironic/drivers/modules/seamicro.py | 3 +-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 633c6c283..45a384715 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -164,8 +164,8 @@ class ConductorManager(base_manager.BaseConductorManager): """ LOG.debug("RPC change_node_power_state called for node %(node)s. " - "The desired new state is %(state)s." - % {'node': node_id, 'state': new_state}) + "The desired new state is %(state)s.", + {'node': node_id, 'state': new_state}) with task_manager.acquire(context, node_id, shared=False, purpose='changing node power state') as task: @@ -1580,8 +1580,7 @@ class ConductorManager(base_manager.BaseConductorManager): async task """ LOG.debug('RPC set_console_mode called for node %(node)s with ' - 'enabled %(enabled)s' % {'node': node_id, - 'enabled': enabled}) + 'enabled %(enabled)s', {'node': node_id, 'enabled': enabled}) with task_manager.acquire(context, node_id, shared=False, purpose='setting console mode') as task: @@ -2526,8 +2525,8 @@ def _do_inspect_hardware(task): if new_state == states.MANAGEABLE: task.process_event('done') - LOG.info(_LI('Successfully inspected node %(node)s') - % {'node': node.uuid}) + LOG.info(_LI('Successfully inspected node %(node)s'), + {'node': node.uuid}) elif new_state != states.INSPECTING: error = (_("During inspection, driver returned unexpected " "state %(state)s") % {'state': new_state}) diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py index f48407a70..32c74ecdc 100644 --- a/ironic/drivers/modules/ilo/common.py +++ b/ironic/drivers/modules/ilo/common.py @@ -708,7 +708,7 @@ def verify_image_checksum(image_location, expected_checksum): LOG.error(_LE("Error opening file: %(file)s"), {'file': image_location}) raise exception.ImageRefValidationFailed(image_href=image_location, - reason=six.text_type(e)) + reason=e) if actual_checksum != expected_checksum: msg = (_('Error verifying image checksum. Image %(image)s failed to ' diff --git a/ironic/drivers/modules/seamicro.py b/ironic/drivers/modules/seamicro.py index 02b05378e..a57c7a163 100644 --- a/ironic/drivers/modules/seamicro.py +++ b/ironic/drivers/modules/seamicro.py @@ -24,7 +24,6 @@ import re from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils -import six from six.moves.urllib import parse as urlparse from ironic.common import boot_devices @@ -564,7 +563,7 @@ class Management(base.ManagementInterface): LOG.error(_LE("Seamicro set boot device failed for node " "%(node)s with the following error: %(error)s"), {'node': task.node.uuid, 'error': ex}) - raise exception.IronicException(message=six.text_type(ex)) + raise exception.IronicException(ex) def get_boot_device(self, task): """Get the current boot device for the task's node. From ad5a06c53422bf9872e884974461cb5a307bd20c Mon Sep 17 00:00:00 2001 From: vsaienko Date: Wed, 16 Dec 2015 17:53:58 +0200 Subject: [PATCH 086/166] Add multitenancy devstack configuration examples. The guide shows how to setup Devstack with Ironic multitenancy and ``networking-generic-switch`` Neutron ML2 driver. Depends-On: If2fb996783b9ac26a5bae2aadd6387207750def9 Change-Id: I24ef394536e5109cf1499ec87f6a495063a87f5d Closes-bug: #1526403 --- .../dev/ironic-multitenant-networking.rst | 136 ++++++++++++++++++ doc/source/index.rst | 2 + 2 files changed, 138 insertions(+) create mode 100644 doc/source/dev/ironic-multitenant-networking.rst diff --git a/doc/source/dev/ironic-multitenant-networking.rst b/doc/source/dev/ironic-multitenant-networking.rst new file mode 100644 index 000000000..583619e13 --- /dev/null +++ b/doc/source/dev/ironic-multitenant-networking.rst @@ -0,0 +1,136 @@ +========================================== +Ironic multitenant networking and DevStack +========================================== + +This guide will walk you through using OpenStack Ironic/Neutron with the ML2 +``networking-generic-switch`` plugin. + + +Using VMs as baremetal servers +============================== + +This scenario shows how to setup Devstack to use Ironic/Neutron integration +with VMs as baremetal servers and ML2 ``networking-generic-switch`` +that interacts with OVS. + + +DevStack Configuration +---------------------- +The following is ``local.conf`` that will setup Devstack with 3 VMs that are +registered in ironic. ``networking-generic-switch`` driver will be installed and +configured in Neutron. + +:: + + [[local|localrc]] + + # Configure ironic from ironic devstack plugin. + enable_plugin ironic https://review.openstack.org/openstack/ironic + + # Install networking-generic-switch Neutron ML2 driver that interacts with OVS + enable_plugin networking-generic-switch https://review.openstack.org/openstack/networking-generic-switch + Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron/plugins/ml2 + Q_PLUGIN_EXTRA_CONF_FILES['networking-generic-switch']=ml2_conf_genericswitch.ini + + # Add link local info when registering Ironic node + IRONIC_USE_LINK_LOCAL=True + + IRONIC_ENABLED_NETWORK_INTERFACES=flat,neutron + IRONIC_NETWORK_INTERFACE=neutron + + #Networking configuration + OVS_PHYSICAL_BRIDGE=brbm + PHYSICAL_NETWORK=mynetwork + IRONIC_PROVISION_NETWORK_NAME=ironic-provision + IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 + IRONIC_PROVISION_SUBNET_GATEWAY=10.0.5.1 + + Q_PLUGIN=ml2 + ENABLE_TENANT_VLANS=True + Q_ML2_TENANT_NETWORK_TYPE=vlan + TENANT_VLAN_RANGE=100:150 + + # Credentials + ADMIN_PASSWORD=password + RABBIT_PASSWORD=password + DATABASE_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + SWIFT_HASH=password + SWIFT_TEMPURL_KEY=password + + # Enable Ironic API and Ironic Conductor + enable_service ironic + enable_service ir-api + enable_service ir-cond + + # Enable Neutron which is required by Ironic and disable nova-network. + disable_service n-net + disable_service n-novnc + enable_service q-svc + enable_service q-agt + enable_service q-dhcp + enable_service q-l3 + enable_service q-meta + enable_service neutron + + # Enable Swift for agent_* drivers + enable_service s-proxy + enable_service s-object + enable_service s-container + enable_service s-account + + # Disable Horizon + disable_service horizon + + # Disable Heat + disable_service heat h-api h-api-cfn h-api-cw h-eng + + # Disable Cinder + disable_service cinder c-sch c-api c-vol + + # Disable Tempest + disable_service tempest + + # Swift temp URL's are required for agent_* drivers. + SWIFT_ENABLE_TEMPURLS=True + + # Create 3 virtual machines to pose as Ironic's baremetal nodes. + IRONIC_VM_COUNT=3 + IRONIC_VM_SSH_PORT=22 + IRONIC_BAREMETAL_BASIC_OPS=True + + # Enable Ironic drivers. + IRONIC_ENABLED_DRIVERS=fake,agent_ssh,agent_ipmitool,pxe_ssh,pxe_ipmitool + + # Change this to alter the default driver for nodes created by devstack. + # This driver should be in the enabled list above. + IRONIC_DEPLOY_DRIVER=agent_ssh + + # The parameters below represent the minimum possible values to create + # functional nodes. + IRONIC_VM_SPECS_RAM=1024 + IRONIC_VM_SPECS_DISK=10 + + # Size of the ephemeral partition in GB. Use 0 for no ephemeral partition. + IRONIC_VM_EPHEMERAL_DISK=0 + + # To build your own IPA ramdisk from source, set this to True + IRONIC_BUILD_DEPLOY_RAMDISK=False + + VIRT_DRIVER=ironic + + # By default, DevStack creates a 10.0.0.0/24 network for instances. + # If this overlaps with the hosts network, you may adjust with the + # following. + NETWORK_GATEWAY=10.1.0.1 + FIXED_RANGE=10.1.0.0/24 + FIXED_NETWORK_SIZE=256 + + # Log all output to files + LOGFILE=$HOME/devstack.log + LOGDIR=$HOME/logs + IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs + + IRONIC_ENABLED_NETWORK_INTERFACES=neutron + IRONIC_NETWORK_INTERFACE=neutron diff --git a/doc/source/index.rst b/doc/source/index.rst index c3263df3b..a970bbeea 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -68,6 +68,8 @@ Developer's Guide dev/code-contribution-guide dev/dev-quickstart dev/vendor-passthru + dev/ironic-multitenant-networking + dev/faq Indices and tables From 9084c647a95d71aabce89377ae56bdfca9ba3116 Mon Sep 17 00:00:00 2001 From: Yuriy Yekovenko Date: Mon, 18 Jan 2016 17:49:47 +0200 Subject: [PATCH 087/166] Add test to verify ironic multitenancy * Create two tenants with own networks. * Boot 2 baremetal instances in the same IP subnet in different tenants. * Verify L3 connectivity between instances IP's * Verify L3 connectivity between instances floating IP's Co-Authored-By: Vasyl Saienko (vsaienko@mirantis.com) Change-Id: I4fe31ecae3393abc2779a5e80e348899f9113f1b Related-Bug: 1520230 --- devstack/lib/ironic | 3 + ironic_tempest_plugin/config.py | 3 + .../tests/scenario/baremetal_manager.py | 61 +++++-- .../scenario/test_baremetal_basic_ops.py | 4 +- .../scenario/test_baremetal_multitenancy.py | 155 ++++++++++++++++++ 5 files changed, 207 insertions(+), 19 deletions(-) create mode 100644 ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 50d8a57ce..dd19a1f19 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -1409,6 +1409,9 @@ function ironic_configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $bm_flavor_id iniset $TEMPEST_CONFIG compute flavor_ref_alt $bm_flavor_id iniset $TEMPEST_CONFIG compute-feature-enabled disk_config False + if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then + iniset $TEMPEST_CONFIG baremetal use_provision_network True + fi } # Restore xtrace + pipefail diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py index e39f8f883..00e9a35d2 100644 --- a/ironic_tempest_plugin/config.py +++ b/ironic_tempest_plugin/config.py @@ -81,4 +81,7 @@ BaremetalGroup = [ "min_microversion and max_microversion. " "If both values are None, Tempest avoids tests which " "require a microversion."), + cfg.BoolOpt('use_provision_network', + default=False, + help="Whether the Ironic/Neutron tenant isolation is enabled") ] diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py index a56c94148..3f7c04418 100644 --- a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py +++ b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py @@ -145,38 +145,65 @@ class BaremetalScenarioTest(manager.ScenarioTest): dest = self.get_remote_client(self.instance) dest.validate_authentication() - def boot_instance(self): - self.instance = self.create_server( - key_name=self.keypair['name']) + def boot_instance(self, clients=None, keypair=None, + net_id=None, fixed_ip=None): + if clients is None: + servers_client = self.servers_client + else: + servers_client = clients.servers_client + if keypair is None: + keypair = self.keypair - self.wait_node(self.instance['id']) - self.node = self.get_node(instance_id=self.instance['id']) + if any([net_id, fixed_ip]): + network = {} + if net_id: + network['uuid'] = net_id + if fixed_ip: + network['fixed_ip'] = fixed_ip + instance = self.create_server( + key_name=keypair['name'], + networks=[network], + clients=clients + ) + else: + instance = self.create_server( + key_name=keypair['name'], + clients=clients + ) - self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON) + self.wait_node(instance['id']) + node = self.get_node(instance_id=instance['id']) + + self.wait_power_state(node['uuid'], BaremetalPowerStates.POWER_ON) self.wait_provisioning_state( - self.node['uuid'], + node['uuid'], [BaremetalProvisionStates.DEPLOYWAIT, BaremetalProvisionStates.ACTIVE], timeout=CONF.baremetal.deploywait_timeout) - self.wait_provisioning_state(self.node['uuid'], + self.wait_provisioning_state(node['uuid'], BaremetalProvisionStates.ACTIVE, timeout=CONF.baremetal.active_timeout, interval=30) - waiters.wait_for_server_status(self.servers_client, - self.instance['id'], 'ACTIVE') - self.node = self.get_node(instance_id=self.instance['id']) - self.instance = (self.servers_client.show_server(self.instance['id']) - ['server']) + waiters.wait_for_server_status(servers_client, + instance['id'], 'ACTIVE') + node = self.get_node(instance_id=instance['id']) + instance = servers_client.show_server(instance['id'])['server'] - def terminate_instance(self): - self.servers_client.delete_server(self.instance['id']) - self.wait_power_state(self.node['uuid'], + return instance, node + + def terminate_instance(self, instance, servers_client=None): + if servers_client is None: + servers_client = self.servers_client + + node = self.get_node(instance_id=instance['id']) + servers_client.delete_server(instance['id']) + self.wait_power_state(node['uuid'], BaremetalPowerStates.POWER_OFF) self.wait_provisioning_state( - self.node['uuid'], + node['uuid'], [BaremetalProvisionStates.NOSTATE, BaremetalProvisionStates.AVAILABLE], timeout=CONF.baremetal.unprovision_timeout, diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py index 0564c1049..a95b3d43a 100644 --- a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py +++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py @@ -110,7 +110,7 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest): @test.services('baremetal', 'compute', 'image', 'network') def test_baremetal_server_ops(self): self.add_keypair() - self.boot_instance() + self.instance, self.node = self.boot_instance() self.validate_ports() ip_address = self.get_server_ip(self.instance) self.get_remote_client(ip_address).validate_authentication() @@ -125,4 +125,4 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest): self.create_timestamp( ip_address, private_key=self.keypair['private_key']) - self.terminate_instance() + self.terminate_instance(self.instance) diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py new file mode 100644 index 000000000..551c885d1 --- /dev/null +++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py @@ -0,0 +1,155 @@ +# +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from ironic_tempest_plugin.tests.scenario import baremetal_manager +from tempest import config +from tempest.lib.common.utils import data_utils +from tempest.scenario import manager +from tempest import test + +CONF = config.CONF + +LOG = logging.getLogger(__name__) + + +class BaremetalMultitenancy(baremetal_manager.BaremetalScenarioTest, + manager.NetworkScenarioTest): + """Check L2 isolation of baremetal instances in different tenants: + + * Create a keypair, network, subnet and router for the primary tenant + * Boot 2 instances in the different tenant's network using the keypair + * Associate floating ips to both instance + * Verify there is no L3 connectivity between instances of different tenants + * Verify connectivity between instances floating IP's + * Delete both instances + """ + + credentials = ['primary', 'alt', 'admin'] + + @classmethod + def skip_checks(cls): + super(BaremetalMultitenancy, cls).skip_checks() + if not CONF.baremetal.use_provision_network: + msg = 'Ironic/Neutron tenant isolation is not configured.' + raise cls.skipException(msg) + + def create_tenant_network(self, clients, tenant_cidr): + network = self._create_network( + networks_client=clients.networks_client, + tenant_id=clients.credentials.tenant_id) + router = self._get_router( + client=clients.routers_client, + tenant_id=clients.credentials.tenant_id) + + result = clients.subnets_client.create_subnet( + name=data_utils.rand_name('subnet'), + network_id=network['id'], + tenant_id=clients.credentials.tenant_id, + ip_version=4, + cidr=tenant_cidr) + subnet = result['subnet'] + clients.routers_client.add_router_interface(router['id'], + subnet_id=subnet['id']) + self.addCleanup(clients.subnets_client.delete_subnet, subnet['id']) + self.addCleanup(clients.routers_client.remove_router_interface, + router['id'], subnet_id=subnet['id']) + + return network, subnet, router + + def verify_l3_connectivity(self, source_ip, private_key, + destination_ip, conn_expected=True): + remote = self.get_remote_client(source_ip, private_key=private_key) + remote.validate_authentication() + + cmd = 'ping %s -c4 -w4 || exit 0' % destination_ip + success_substring = "64 bytes from %s" % destination_ip + output = remote.exec_command(cmd) + if conn_expected: + self.assertIn(success_substring, output) + else: + self.assertNotIn(success_substring, output) + + @test.idempotent_id('26e2f145-2a8e-4dc7-8457-7f2eb2c6749d') + @test.services('baremetal', 'compute', 'image', 'network') + def test_baremetal_multitenancy(self): + + tenant_cidr = '10.0.100.0/24' + fixed_ip1 = '10.0.100.3' + fixed_ip2 = '10.0.100.5' + keypair = self.create_keypair() + network, subnet, router = self.create_tenant_network( + self.manager, tenant_cidr) + + # Boot 2 instances in the primary tenant network + # and check L2 connectivity between them + instance1, node1 = self.boot_instance( + clients=self.manager, + keypair=keypair, + net_id=network['id'], + fixed_ip=fixed_ip1 + ) + floating_ip1 = self.create_floating_ip( + instance1, + )['floating_ip_address'] + self.check_vm_connectivity(ip_address=floating_ip1, + private_key=keypair['private_key']) + + # Boot instance in the alt tenant network and ensure there is no + # L2 connectivity between instances of the different tenants + alt_keypair = self.create_keypair(self.alt_manager.keypairs_client) + alt_network, alt_subnet, alt_router = self.create_tenant_network( + self.alt_manager, tenant_cidr) + + alt_instance, alt_node = self.boot_instance( + keypair=alt_keypair, + clients=self.alt_manager, + net_id=alt_network['id'], + fixed_ip=fixed_ip2 + ) + alt_floating_ip = self.create_floating_ip( + alt_instance, + client=self.alt_manager.floating_ips_client + )['floating_ip_address'] + + self.check_vm_connectivity(ip_address=alt_floating_ip, + private_key=alt_keypair['private_key']) + + self.verify_l3_connectivity( + alt_floating_ip, + alt_keypair['private_key'], + fixed_ip1, + conn_expected=False + ) + + self.verify_l3_connectivity( + floating_ip1, + keypair['private_key'], + fixed_ip2, + conn_expected=False + ) + + self.verify_l3_connectivity( + floating_ip1, + keypair['private_key'], + alt_floating_ip, + conn_expected=True + ) + + self.terminate_instance( + instance=alt_instance, + servers_client=self.alt_manager.servers_client) + self.terminate_instance(instance=instance1) From 4cb336506a252044d8789627e5e4817fdfc11742 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Mon, 18 Jul 2016 15:37:24 +0300 Subject: [PATCH 088/166] Update {configure|cleanup}ironic_provision_network. Use OSC instead of Neutron client in configure_ironic_provision_network. Do not rely on local.conf variable when perform cleaning in cleanup_ironic_provision_network. Change-Id: Ie6f5df657231ea16691a6ab88481c34bbdb1f281 --- devstack/lib/ironic | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index dd19a1f19..c22f8516d 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -483,24 +483,24 @@ function configure_ironic_provision_network { die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_GATEWAY "You must specify the IRONIC_PROVISION_SUBNET_GATEWAY" local net_id - net_id=$(neutron net-create --provider:network_type $IRONIC_PROVISION_PROVIDER_NETWORK_TYPE \ - --provider:physical_network "$PHYSICAL_NETWORK" \ - ${IRONIC_PROVISION_SEGMENTATION_ID:+--provider:segmentation_id $IRONIC_PROVISION_SEGMENTATION_ID} \ - ${IRONIC_PROVISION_NETWORK_NAME} | grep ' id ' | get_field 2) + net_id=$(openstack network create --provider-network-type $IRONIC_PROVISION_PROVIDER_NETWORK_TYPE \ + --provider-physical-network "$PHYSICAL_NETWORK" \ + ${IRONIC_PROVISION_SEGMENTATION_ID:+--provider-segment $IRONIC_PROVISION_SEGMENTATION_ID} \ + ${IRONIC_PROVISION_NETWORK_NAME} -f value -c id) die_if_not_set $LINENO net_id "Failure creating net_id for $IRONIC_PROVISION_NETWORK_NAME" local subnet_id - subnet_id="$(neutron subnet-create --ip_version 4 \ + subnet_id="$(openstack subnet create --ip-version 4 \ ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \ - --name $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ - --gateway $IRONIC_PROVISION_SUBNET_GATEWAY $net_id \ - $IRONIC_PROVISION_SUBNET_PREFIX | grep ' id ' | get_field 2)" + $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ + --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \ + --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)" die_if_not_set $LINENO subnet_id "Failure creating SUBNET_ID for $IRONIC_PROVISION_NETWORK_NAME" iniset $IRONIC_CONF_FILE neutron provisioning_network_uuid $net_id - IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-`neutron net-show ${net_id} | grep -w 'provider:segmentation_id'| get_field 2`} + IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-`openstack network show ${net_id} -f value -c provider:segmentation_id`} provision_net_prefix=${IRONIC_PROVISION_SUBNET_PREFIX##*/} # Set provision network GW on physical interface @@ -517,9 +517,6 @@ function configure_ironic_provision_network { } function cleanup_ironic_provision_network { - if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then - return 0 - fi # Cleanup OVS_PHYSICAL_BRIDGE subinterfaces local bridge_subint bridge_subint=$(cat /proc/net/dev | sed -n "s/^\(${OVS_PHYSICAL_BRIDGE}\.[0-9]*\).*/\1/p") From d36bd6f74aa12f6d460fbf086893150a2dc92d63 Mon Sep 17 00:00:00 2001 From: Shivanand Tendulker Date: Wed, 6 Jul 2016 21:26:33 -0700 Subject: [PATCH 089/166] Support to validate iLO SSL certificate in iLO drivers iLO drivers do not validate iLO SSL certificate. This commit adds support in iLO drivers to validate iLO SSL certificate. Change-Id: Iff0d02799d3d9338b7dbdd77eab1f12f709a7765 Closes-Bug: #1599710 --- doc/source/drivers/ilo.rst | 3 ++ etc/ironic/ironic.conf.sample | 22 +++++++++----- ironic/conf/ilo.py | 2 ++ ironic/drivers/modules/ilo/common.py | 10 ++++++- ironic/tests/unit/conductor/test_manager.py | 7 +++-- .../unit/drivers/modules/ilo/test_common.py | 30 +++++++++++++++++-- ...ate-ilo-certificates-3ab98bb8cfad7d60.yaml | 8 +++++ 7 files changed, 69 insertions(+), 13 deletions(-) create mode 100644 releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml diff --git a/doc/source/drivers/ilo.rst b/doc/source/drivers/ilo.rst index 352eaea7e..7d181cf07 100644 --- a/doc/source/drivers/ilo.rst +++ b/doc/source/drivers/ilo.rst @@ -288,6 +288,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``ilo_deploy_iso``: The glance UUID of the deploy ramdisk ISO image. +- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout @@ -425,6 +426,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``ilo_deploy_iso``: The glance UUID of the deploy ramdisk ISO image. +- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout @@ -543,6 +545,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_password``: Password for the above iLO user. - ``deploy_kernel``: The glance UUID of the deployment kernel. - ``deploy_ramdisk``: The glance UUID of the deployment ramdisk. +- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 10e7b8ac7..a0f8cca2a 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -37,7 +37,7 @@ # recommended set of production-oriented network interfaces. A # complete list of network interfaces present on your system # may be found by enumerating the -# "ironic.hardware.interfaces.network" entrypoint.This value +# "ironic.hardware.interfaces.network" entrypoint. This value # must be the same on all ironic-conductor and ironic-api # services, because it is used by ironic-api service to # validate a new or updated node's network_interface value. @@ -156,6 +156,7 @@ # is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) +# Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = @@ -635,13 +636,13 @@ # From ironic # -# Path to serial console terminal program. Used only by -# Shell In A Box console. (string value) +# Path to serial console terminal program. Used only by Shell +# In A Box console. (string value) #terminal = shellinaboxd -# Directory containing the terminal SSL cert(PEM) for serial -# console access. Used only by Shell In A Box console. -# (string value) +# Directory containing the terminal SSL cert (PEM) for serial +# console access. Used only by Shell In A Box console. (string +# value) #terminal_cert_dir = # Directory for holding terminal pid files. If not specified, @@ -736,8 +737,12 @@ # From oslo.db # -# The file name to use with SQLite. (string value) +# DEPRECATED: The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use config option connection or +# slave_connection to connect the database. #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) @@ -1153,6 +1158,9 @@ # operations (integer value) #power_wait = 2 +# CA certificate file to validate iLO. (string value) +#ca_file = + [inspector] diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py index 54f86cbaf..172b138d9 100644 --- a/ironic/conf/ilo.py +++ b/ironic/conf/ilo.py @@ -78,6 +78,8 @@ opts = [ default=2, help=_('Amount of time in seconds to wait in between power ' 'operations')), + cfg.StrOpt('ca_file', + help=_('CA certificate file to validate iLO.')), ] diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py index f48407a70..69060f41c 100644 --- a/ironic/drivers/modules/ilo/common.py +++ b/ironic/drivers/modules/ilo/common.py @@ -59,6 +59,7 @@ REQUIRED_PROPERTIES = { OPTIONAL_PROPERTIES = { 'client_port': _("port to be used for iLO operations. Optional."), 'client_timeout': _("timeout (in seconds) for iLO operations. Optional."), + 'ca_file': _("CA certificate file to validate iLO. optional"), } CONSOLE_PROPERTIES = { 'console_port': _("node's UDP port to connect to. Only required for " @@ -211,6 +212,12 @@ def parse_driver_info(node): value = info.get(param, CONF.ilo.get(param)) if param == "client_port": d_info[param] = utils.validate_network_port(value, param) + elif param == "ca_file": + if value and not os.path.isfile(value): + raise exception.InvalidParameterValue(_( + '%(param)s "%(value)s" is not found.') % + {'param': param, 'value': value}) + d_info[param] = value else: try: d_info[param] = int(value) @@ -250,7 +257,8 @@ def get_ilo_object(node): driver_info['ilo_username'], driver_info['ilo_password'], driver_info['client_timeout'], - driver_info['client_port']) + driver_info['client_port'], + cacert=driver_info.get('ca_file')) return ilo_object diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index 6553afa7d..75c194751 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -4022,21 +4022,22 @@ class ManagerTestProperties(tests_db_base.DbTestCase): def test_driver_properties_fake_ilo(self): expected = ['ilo_address', 'ilo_username', 'ilo_password', - 'client_port', 'client_timeout', 'ilo_change_password'] + 'client_port', 'client_timeout', 'ilo_change_password', + 'ca_file'] self._check_driver_properties("fake_ilo", expected) def test_driver_properties_ilo_iscsi(self): expected = ['ilo_address', 'ilo_username', 'ilo_password', 'client_port', 'client_timeout', 'ilo_deploy_iso', 'console_port', 'ilo_change_password', - 'deploy_forces_oob_reboot'] + 'deploy_forces_oob_reboot', 'ca_file'] self._check_driver_properties("iscsi_ilo", expected) def test_driver_properties_agent_ilo(self): expected = ['ilo_address', 'ilo_username', 'ilo_password', 'client_port', 'client_timeout', 'ilo_deploy_iso', 'console_port', 'ilo_change_password', - 'deploy_forces_oob_reboot'] + 'deploy_forces_oob_reboot', 'ca_file'] self._check_driver_properties("agent_ilo", expected) def test_driver_properties_fail(self): diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py index 5dc492973..cde0808a3 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_common.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py @@ -61,7 +61,9 @@ class IloValidateParametersTestCase(db_base.DbTestCase): self.context, driver='fake_ilo', driver_info=INFO_DICT) - def test_parse_driver_info(self): + @mock.patch.object(os.path, 'isfile', return_value=True, autospec=True) + def _test_parse_driver_info(self, isFile_mock): + info = ilo_common.parse_driver_info(self.node) self.assertEqual(INFO_DICT['ilo_address'], info['ilo_address']) @@ -69,6 +71,15 @@ class IloValidateParametersTestCase(db_base.DbTestCase): self.assertEqual(INFO_DICT['ilo_password'], info['ilo_password']) self.assertEqual(60, info['client_timeout']) self.assertEqual(443, info['client_port']) + self.assertEqual('/home/user/cafile.pem', info['ca_file']) + + def test_parse_driver_info_ca_file_in_driver_info(self): + self.node.driver_info['ca_file'] = '/home/user/cafile.pem' + self._test_parse_driver_info() + + def test_parse_driver_info_ca_file_in_conf_file(self): + self.config(ca_file='/home/user/cafile.pem', group='ilo') + self._test_parse_driver_info() def test_parse_driver_info_missing_address(self): del self.node.driver_info['ilo_address'] @@ -85,6 +96,13 @@ class IloValidateParametersTestCase(db_base.DbTestCase): self.assertRaises(exception.MissingParameterValue, ilo_common.parse_driver_info, self.node) + @mock.patch.object(os.path, 'isfile', return_value=False, autospec=True) + def test_parse_driver_info_invalid_cafile(self, isFile_mock): + self.node.driver_info['ca_file'] = '/home/missing.pem' + self.assertRaisesRegex(exception.InvalidParameterValue, + 'ca_file "/home/missing.pem" is not found.', + ilo_common.parse_driver_info, self.node) + def test_parse_driver_info_invalid_timeout(self): self.node.driver_info['client_timeout'] = 'qwe' self.assertRaises(exception.InvalidParameterValue, @@ -132,11 +150,13 @@ class IloCommonMethodsTestCase(db_base.DbTestCase): self.node = obj_utils.create_test_node( self.context, driver='fake_ilo', driver_info=self.info) + @mock.patch.object(os.path, 'isfile', return_value=True, autospec=True) @mock.patch.object(ilo_client, 'IloClient', spec_set=True, autospec=True) - def test_get_ilo_object(self, ilo_client_mock): + def _test_get_ilo_object(self, ilo_client_mock, isFile_mock, ca_file=None): self.info['client_timeout'] = 60 self.info['client_port'] = 443 + self.info['ca_file'] = ca_file ilo_client_mock.return_value = 'ilo_object' returned_ilo_object = ilo_common.get_ilo_object(self.node) ilo_client_mock.assert_called_with( @@ -147,6 +167,12 @@ class IloCommonMethodsTestCase(db_base.DbTestCase): self.info['client_port']) self.assertEqual('ilo_object', returned_ilo_object) + def test_get_ilo_object_cafile(self): + self._test_get_ilo_object(ca_file='/home/user/ilo.pem') + + def test_get_ilo_object_no_cafile(self): + self._test_get_ilo_object() + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) def test_get_ilo_license(self, get_ilo_object_mock): diff --git a/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml b/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml new file mode 100644 index 000000000..9d9d7d1ab --- /dev/null +++ b/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added support to validate iLO SSL certificate in iLO + drivers. + New config parameter '[ilo]/ca_file' added to + specify iLO CA certificate file. + If 'ca_file' is specified, iLO drivers will validate + iLO SSL certificates. From 662c5666d67757dd3c7ba29cd1c1ec5e9b678fee Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 18 Jul 2016 18:09:20 +0000 Subject: [PATCH 090/166] Updated from global requirements Change-Id: Id129fce21a0455bc8357b9c578ede22b758619f2 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index f1cad9928..4dcb71959 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,12 +16,12 @@ python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 ironic-lib>=2.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT -stevedore>=1.10.0 # Apache-2.0 +stevedore>=1.16.0 # Apache-2.0 pysendfile>=2.0.0 # MIT websockify>=0.8.0 # LGPLv3 oslo.concurrency>=3.8.0 # Apache-2.0 oslo.config>=3.12.0 # Apache-2.0 -oslo.context>=2.4.0 # Apache-2.0 +oslo.context!=2.6.0,>=2.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 From 67f2eaf3b5eb14a6852dadeb9022027c64ebafcf Mon Sep 17 00:00:00 2001 From: Aparna Date: Thu, 7 Jul 2016 07:13:25 +0000 Subject: [PATCH 091/166] Doc update for in-band cleaning support on more drivers This commit updates the documentation on iLO drivers support for in-band cleaning. Also updates the cleaning documentation with in-band cleaning support on more drivers. Change-Id: I2c2677ce996b3016c4709afcfec78e5f7e2bdf1c --- doc/source/deploy/cleaning.rst | 4 ++-- doc/source/drivers/ilo.rst | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/deploy/cleaning.rst b/doc/source/deploy/cleaning.rst index 3b0fc28e7..f5cf1af6f 100644 --- a/doc/source/deploy/cleaning.rst +++ b/doc/source/deploy/cleaning.rst @@ -201,8 +201,8 @@ out-of-band. Ironic supports using both methods to clean a node. In-band ------- In-band steps are performed by ironic making API calls to a ramdisk running -on the node using a Deploy driver. Currently, only the ironic-python-agent -ramdisk used with an agent_* driver supports in-band cleaning. By default, +on the node using a Deploy driver. Currently, all the drivers using +ironic-python-agent ramdisk support in-band cleaning. By default, ironic-python-agent ships with a minimal cleaning configuration, only erasing disks. However, with this ramdisk, you can add your own cleaning steps and/or override default cleaning steps with a custom Hardware Manager. diff --git a/doc/source/drivers/ilo.rst b/doc/source/drivers/ilo.rst index 19e5680d9..264aca2fd 100644 --- a/doc/source/drivers/ilo.rst +++ b/doc/source/drivers/ilo.rst @@ -245,7 +245,8 @@ Features image provisioning is done using iSCSI over data network, so this driver has the benefit of security enhancement with the same performance. It segregates management info from data channel. -* Support for out-of-band cleaning operations. +* Supports both out-of-band and in-band cleaning operations. For more details, + see :ref:`InbandvsOutOfBandCleaning`. * Remote Console * HW Sensors * Works well for machines with resource constraints (lesser amount of memory). @@ -503,7 +504,8 @@ Features * Automatic detection of current boot mode. * Automatic setting of the required boot mode, if UEFI boot mode is requested by the nova flavor's extra spec. -* Support for out-of-band cleaning operations. +* Supports both out-of-band and in-band cleaning operations. For more details, + see :ref:`InbandvsOutOfBandCleaning`. * Support for out-of-band hardware inspection. * Supports UEFI Boot mode * Supports UEFI Secure Boot From f2eda991ba842563e2b47f7f222922e60299e197 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 19 Jul 2016 21:07:56 +0000 Subject: [PATCH 092/166] Updated from global requirements Change-Id: I681bd205dc77ef0abae3142d4112e3ada5320766 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4dcb71959..374414c4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ Jinja2>=2.8 # BSD License (3 clause) keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 oslo.messaging>=5.2.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 -oslo.versionedobjects>=1.9.1 # Apache-2.0 +oslo.versionedobjects>=1.13.0 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT psutil<2.0.0,>=1.1.1 # BSD futurist!=0.15.0,>=0.11.0 # Apache-2.0 From 430b815ad52c37500de70e49d41ce80a82f87efe Mon Sep 17 00:00:00 2001 From: Yatin Kumbhare Date: Tue, 12 Jul 2016 17:33:02 +0530 Subject: [PATCH 093/166] Add Python 3.5 tox venv Now that there is a passing gate job, we can claim support for Python 3.5. This patch also adds the convenience for py35 venv. Change-Id: I38ae3ae11ff0d9ece94d9ca684f5485225d9dc45 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index beb8a7182..ef60d136e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,7 @@ [tox] minversion = 1.8 skipsdist = True -envlist = py34,py27,pep8 +envlist = py35,py34,py27,pep8 [testenv] usedevelop = True From a9216bb07f4ccd4dc202fd1f5c14b36a08ac6046 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 24 Jun 2016 17:24:58 +0200 Subject: [PATCH 094/166] Implement new heartbeat for AgentDeploy Refactor heartbeat and related calls to a new AgentDeployMixin used in AgentDeploy, ISCSIDeploy and BaseAgentVendor. Also removes excessive passing of kwargs to where they are not used. It has a potential of breaking out-of-tree drivers, but it also greatly simplifies tracking how values are passed during deployment. Also made several methods just functions. Change-Id: I56c728c13b06f1aea0baeec0dddc7ba160a7a211 Partial-Bug: #1570841 --- ironic/drivers/modules/agent.py | 308 ++++----- ironic/drivers/modules/agent_base_vendor.py | 596 ++++++++--------- ironic/drivers/modules/amt/vendor.py | 4 +- ironic/drivers/modules/ilo/vendor.py | 4 +- ironic/drivers/modules/iscsi_deploy.py | 55 +- ironic/drivers/modules/oneview/vendor.py | 3 +- .../unit/drivers/modules/amt/test_vendor.py | 10 +- .../unit/drivers/modules/ilo/test_vendor.py | 15 +- .../tests/unit/drivers/modules/test_agent.py | 6 + .../drivers/modules/test_agent_base_vendor.py | 598 +++++++++--------- .../unit/drivers/modules/test_iscsi_deploy.py | 6 + .../notes/agent-api-bf9f18d8d38075e4.yaml | 5 + 12 files changed, 825 insertions(+), 785 deletions(-) create mode 100644 releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 2cf56809f..ae98653e6 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -224,7 +224,159 @@ def validate_image_proxies(node): raise exception.InvalidParameterValue(msg) -class AgentDeploy(base.DeployInterface): +class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): + + def deploy_has_started(self, task): + commands = self._client.get_commands_status(task.node) + + for command in commands: + if command['command_name'] == 'prepare_image': + # deploy did start at some point + return True + return False + + def deploy_is_done(self, task): + commands = self._client.get_commands_status(task.node) + if not commands: + return False + + last_command = commands[-1] + + if last_command['command_name'] != 'prepare_image': + # catches race condition where prepare_image is still processing + # so deploy hasn't started yet + return False + + if last_command['command_status'] != 'RUNNING': + return True + + return False + + @task_manager.require_exclusive_lock + def continue_deploy(self, task): + task.process_event('resume') + node = task.node + image_source = node.instance_info.get('image_source') + LOG.debug('Continuing deploy for node %(node)s with image %(img)s', + {'node': node.uuid, 'img': image_source}) + + image_info = { + 'id': image_source.split('/')[-1], + 'urls': [node.instance_info['image_url']], + 'checksum': node.instance_info['image_checksum'], + # NOTE(comstud): Older versions of ironic do not set + # 'disk_format' nor 'container_format', so we use .get() + # to maintain backwards compatibility in case code was + # upgraded in the middle of a build request. + 'disk_format': node.instance_info.get('image_disk_format'), + 'container_format': node.instance_info.get( + 'image_container_format'), + 'stream_raw_images': CONF.agent.stream_raw_images, + } + + proxies = {} + for scheme in ('http', 'https'): + proxy_param = 'image_%s_proxy' % scheme + proxy = node.driver_info.get(proxy_param) + if proxy: + proxies[scheme] = proxy + if proxies: + image_info['proxies'] = proxies + no_proxy = node.driver_info.get('image_no_proxy') + if no_proxy is not None: + image_info['no_proxy'] = no_proxy + + iwdi = node.driver_internal_info.get('is_whole_disk_image') + if not iwdi: + for label in PARTITION_IMAGE_LABELS: + image_info[label] = node.instance_info.get(label) + boot_option = deploy_utils.get_boot_option(node) + boot_mode = deploy_utils.get_boot_mode_for_deploy(node) + if boot_mode: + image_info['deploy_boot_mode'] = boot_mode + else: + image_info['deploy_boot_mode'] = 'bios' + image_info['boot_option'] = boot_option + disk_label = deploy_utils.get_disk_label(node) + if disk_label is not None: + image_info['disk_label'] = disk_label + image_info['node_uuid'] = node.uuid + + # Tell the client to download and write the image with the given args + self._client.prepare_image(node, image_info) + + task.process_event('wait') + + def _get_uuid_from_result(self, task, type_uuid): + command = self._client.get_commands_status(task.node)[-1] + + if command['command_result'] is not None: + words = command['command_result']['result'].split() + for word in words: + if type_uuid in word: + result = word.split('=')[1] + if not result: + msg = (_('Command result did not return %(type_uuid)s ' + 'for node %(node)s. The version of the IPA ' + 'ramdisk used in the deployment might not ' + 'have support for provisioning of ' + 'partition images.') % + {'type_uuid': type_uuid, + 'node': task.node.uuid}) + LOG.error(msg) + deploy_utils.set_failed_state(task, msg) + return + return result + + def check_deploy_success(self, node): + # should only ever be called after we've validated that + # the prepare_image command is complete + command = self._client.get_commands_status(node)[-1] + if command['command_status'] == 'FAILED': + return command['command_error'] + + def reboot_to_instance(self, task): + task.process_event('resume') + node = task.node + iwdi = task.node.driver_internal_info.get('is_whole_disk_image') + error = self.check_deploy_success(node) + if error is not None: + # TODO(jimrollenhagen) power off if using neutron dhcp to + # align with pxe driver? + msg = (_('node %(node)s command status errored: %(error)s') % + {'node': node.uuid, 'error': error}) + LOG.error(msg) + deploy_utils.set_failed_state(task, msg) + return + if not iwdi: + root_uuid = self._get_uuid_from_result(task, 'root_uuid') + if deploy_utils.get_boot_mode_for_deploy(node) == 'uefi': + efi_sys_uuid = ( + self._get_uuid_from_result(task, + 'efi_system_partition_uuid')) + else: + efi_sys_uuid = None + task.node.driver_internal_info['root_uuid_or_disk_id'] = root_uuid + task.node.save() + self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid) + LOG.info(_LI('Image successfully written to node %s'), node.uuid) + LOG.debug('Rebooting node %s to instance', node.uuid) + if iwdi: + manager_utils.node_set_boot_device(task, 'disk', persistent=True) + + self.reboot_and_finish_deploy(task) + + # NOTE(TheJulia): If we deployed a whole disk image, we + # should expect a whole disk image and clean-up the tftp files + # on-disk incase the node is disregarding the boot preference. + # TODO(rameshg87): Not all in-tree drivers using reboot_to_instance + # have a boot interface. So include a check for now. Remove this + # check once all in-tree drivers have a boot interface. + if task.driver.boot and iwdi: + task.driver.boot.clean_up_ramdisk(task) + + +class AgentDeploy(AgentDeployMixin, base.DeployInterface): """Interface for deploy-related actions.""" def get_properties(self): @@ -422,156 +574,12 @@ class AgentDeploy(base.DeployInterface): task, manage_boot=CONF.agent.manage_agent_boot) -class AgentVendorInterface(agent_base_vendor.BaseAgentVendor): +class AgentVendorInterface(agent_base_vendor.BaseAgentVendor, + AgentDeployMixin): + """Implementation of agent vendor interface. - def deploy_has_started(self, task): - commands = self._client.get_commands_status(task.node) - - for command in commands: - if command['command_name'] == 'prepare_image': - # deploy did start at some point - return True - return False - - def deploy_is_done(self, task): - commands = self._client.get_commands_status(task.node) - if not commands: - return False - - last_command = commands[-1] - - if last_command['command_name'] != 'prepare_image': - # catches race condition where prepare_image is still processing - # so deploy hasn't started yet - return False - - if last_command['command_status'] != 'RUNNING': - return True - - return False - - @task_manager.require_exclusive_lock - def continue_deploy(self, task, **kwargs): - task.process_event('resume') - node = task.node - image_source = node.instance_info.get('image_source') - LOG.debug('Continuing deploy for node %(node)s with image %(img)s', - {'node': node.uuid, 'img': image_source}) - - image_info = { - 'id': image_source.split('/')[-1], - 'urls': [node.instance_info['image_url']], - 'checksum': node.instance_info['image_checksum'], - # NOTE(comstud): Older versions of ironic do not set - # 'disk_format' nor 'container_format', so we use .get() - # to maintain backwards compatibility in case code was - # upgraded in the middle of a build request. - 'disk_format': node.instance_info.get('image_disk_format'), - 'container_format': node.instance_info.get( - 'image_container_format'), - 'stream_raw_images': CONF.agent.stream_raw_images, - } - - proxies = {} - for scheme in ('http', 'https'): - proxy_param = 'image_%s_proxy' % scheme - proxy = node.driver_info.get(proxy_param) - if proxy: - proxies[scheme] = proxy - if proxies: - image_info['proxies'] = proxies - no_proxy = node.driver_info.get('image_no_proxy') - if no_proxy is not None: - image_info['no_proxy'] = no_proxy - - iwdi = node.driver_internal_info.get('is_whole_disk_image') - if not iwdi: - for label in PARTITION_IMAGE_LABELS: - image_info[label] = node.instance_info.get(label) - boot_option = deploy_utils.get_boot_option(node) - boot_mode = deploy_utils.get_boot_mode_for_deploy(node) - if boot_mode: - image_info['deploy_boot_mode'] = boot_mode - else: - image_info['deploy_boot_mode'] = 'bios' - image_info['boot_option'] = boot_option - disk_label = deploy_utils.get_disk_label(node) - if disk_label is not None: - image_info['disk_label'] = disk_label - image_info['node_uuid'] = node.uuid - - # Tell the client to download and write the image with the given args - self._client.prepare_image(node, image_info) - - task.process_event('wait') - - def _get_uuid_from_result(self, task, type_uuid): - command = self._client.get_commands_status(task.node)[-1] - - if command['command_result'] is not None: - words = command['command_result']['result'].split() - for word in words: - if type_uuid in word: - result = word.split('=')[1] - if not result: - msg = (_('Command result did not return %(type_uuid)s ' - 'for node %(node)s. The version of the IPA ' - 'ramdisk used in the deployment might not ' - 'have support for provisioning of ' - 'partition images.') % - {'type_uuid': type_uuid, - 'node': task.node.uuid}) - LOG.error(msg) - deploy_utils.set_failed_state(task, msg) - return - return result - - def check_deploy_success(self, node): - # should only ever be called after we've validated that - # the prepare_image command is complete - command = self._client.get_commands_status(node)[-1] - if command['command_status'] == 'FAILED': - return command['command_error'] - - def reboot_to_instance(self, task, **kwargs): - task.process_event('resume') - node = task.node - iwdi = task.node.driver_internal_info.get('is_whole_disk_image') - error = self.check_deploy_success(node) - if error is not None: - # TODO(jimrollenhagen) power off if using neutron dhcp to - # align with pxe driver? - msg = (_('node %(node)s command status errored: %(error)s') % - {'node': node.uuid, 'error': error}) - LOG.error(msg) - deploy_utils.set_failed_state(task, msg) - return - if not iwdi: - root_uuid = self._get_uuid_from_result(task, 'root_uuid') - if deploy_utils.get_boot_mode_for_deploy(node) == 'uefi': - efi_sys_uuid = ( - self._get_uuid_from_result(task, - 'efi_system_partition_uuid')) - else: - efi_sys_uuid = None - task.node.driver_internal_info['root_uuid_or_disk_id'] = root_uuid - task.node.save() - self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid) - LOG.info(_LI('Image successfully written to node %s'), node.uuid) - LOG.debug('Rebooting node %s to instance', node.uuid) - if iwdi: - manager_utils.node_set_boot_device(task, 'disk', persistent=True) - - self.reboot_and_finish_deploy(task) - - # NOTE(TheJulia): If we deployed a whole disk image, we - # should expect a whole disk image and clean-up the tftp files - # on-disk incase the node is disregarding the boot preference. - # TODO(rameshg87): Not all in-tree drivers using reboot_to_instance - # have a boot interface. So include a check for now. Remove this - # check once all in-tree drivers have a boot interface. - if task.driver.boot and iwdi: - task.driver.boot.clean_up_ramdisk(task) + Contains old lookup and heartbeat endpoints currently pending deprecation. + """ class AgentRAID(base.RAIDInterface): diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index a5614d609..4f4dc79cf 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -142,87 +142,125 @@ def _get_post_clean_step_hook(node): pass -class BaseAgentVendor(base.VendorInterface): +def _cleaning_reboot(task): + """Reboots a node out of band after a clean step that requires it. + + If an agent clean step has 'reboot_requested': True, reboots the + node when the step is completed. Will put the node in CLEANFAIL + if the node cannot be rebooted. + + :param task: a TaskManager instance + """ + try: + manager_utils.node_power_action(task, states.REBOOT) + except Exception as e: + msg = (_('Reboot requested by clean step %(step)s failed for ' + 'node %(node)s: %(err)s') % + {'step': task.node.clean_step, + 'node': task.node.uuid, + 'err': e}) + LOG.error(msg) + # do not set cleaning_reboot if we didn't reboot + manager_utils.cleaning_error_handler(task, msg) + return + + # Signify that we've rebooted + driver_internal_info = task.node.driver_internal_info + driver_internal_info['cleaning_reboot'] = True + task.node.driver_internal_info = driver_internal_info + task.node.save() + + +def _notify_conductor_resume_clean(task): + LOG.debug('Sending RPC to conductor to resume cleaning for node %s', + task.node.uuid) + uuid = task.node.uuid + rpc = rpcapi.ConductorAPI() + topic = rpc.get_topic_for(task.node) + # Need to release the lock to let the conductor take it + task.release_resources() + rpc.continue_node_clean(task.context, uuid, topic=topic) + + +def _get_completed_cleaning_command(task, commands): + """Returns None or a completed cleaning command from the agent. + + :param task: a TaskManager instance to act on. + :param commands: a set of command results from the agent, typically + fetched with agent_client.get_commands_status(). + """ + if not commands: + return + + last_command = commands[-1] + + if last_command['command_name'] != 'execute_clean_step': + # catches race condition where execute_clean_step is still + # processing so the command hasn't started yet + LOG.debug('Expected agent last command to be "execute_clean_step" ' + 'for node %(node)s, instead got "%(command)s". Waiting ' + 'for next heartbeat.', + {'node': task.node.uuid, + 'command': last_command['command_name']}) + return + + last_result = last_command.get('command_result') or {} + last_step = last_result.get('clean_step') + if last_command['command_status'] == 'RUNNING': + LOG.debug('Clean step still running for node %(node)s: %(step)s', + {'step': last_step, 'node': task.node.uuid}) + return + elif (last_command['command_status'] == 'SUCCEEDED' and + last_step != task.node.clean_step): + # A previous clean_step was running, the new command has not yet + # started. + LOG.debug('Clean step not yet started for node %(node)s: %(step)s', + {'step': last_step, 'node': task.node.uuid}) + return + else: + return last_command + + +def log_and_raise_deployment_error(task, msg): + """Helper method to log the error and raise exception.""" + LOG.error(msg) + deploy_utils.set_failed_state(task, msg) + raise exception.InstanceDeployFailure(msg) + + +class AgentDeployMixin(object): + """Mixin with deploy methods.""" def __init__(self): - self.supported_payload_versions = ['2'] self._client = _get_client() - def continue_deploy(self, task, **kwargs): + def continue_deploy(self, task): """Continues the deployment of baremetal node. This method continues the deployment of the baremetal node after the ramdisk have been booted. :param task: a TaskManager instance - """ - pass def deploy_has_started(self, task): """Check if the deployment has started already. :returns: True if the deploy has started, False otherwise. """ - pass def deploy_is_done(self, task): """Check if the deployment is already completed. :returns: True if the deployment is completed. False otherwise - """ - pass - def reboot_to_instance(self, task, **kwargs): + def reboot_to_instance(self, task): """Method invoked after the deployment is completed. :param task: a TaskManager instance """ - pass - - def get_properties(self): - """Return the properties of the interface. - - :returns: dictionary of : entries. - """ - return VENDOR_PROPERTIES - - def validate(self, task, method, **kwargs): - """Validate the driver-specific Node deployment info. - - No validation necessary. - - :param task: a TaskManager instance - :param method: method to be validated - """ - pass - - def driver_validate(self, method, **kwargs): - """Validate the driver deployment info. - - :param method: method to be validated. - """ - version = kwargs.get('version') - - if not version: - raise exception.MissingParameterValue(_('Missing parameter ' - 'version')) - if version not in self.supported_payload_versions: - raise exception.InvalidParameterValue(_('Unknown lookup ' - 'payload version: %s') - % version) - - def notify_conductor_resume_clean(self, task): - LOG.debug('Sending RPC to conductor to resume cleaning for node %s', - task.node.uuid) - uuid = task.node.uuid - rpc = rpcapi.ConductorAPI() - topic = rpc.get_topic_for(task.node) - # Need to release the lock to let the conductor take it - task.release_resources() - rpc.continue_node_clean(task.context, uuid, topic=topic) def _refresh_clean_steps(self, task): """Refresh the node's cached clean steps from the booted agent. @@ -308,13 +346,13 @@ class BaseAgentVendor(base.VendorInterface): info = task.node.driver_internal_info info.pop('cleaning_reboot', None) task.node.driver_internal_info = info - self.notify_conductor_resume_clean(task) + _notify_conductor_resume_clean(task) return else: # Agent has no commands whatsoever return - command = self._get_completed_cleaning_command(task, agent_commands) + command = _get_completed_cleaning_command(task, agent_commands) LOG.debug('Cleaning command status for node %(node)s on step %(step)s:' ' %(command)s', {'node': node.uuid, 'step': node.clean_step, @@ -374,7 +412,7 @@ class BaseAgentVendor(base.VendorInterface): LOG.exception(msg) return manager_utils.cleaning_error_handler(task, msg) - self.notify_conductor_resume_clean(task) + _notify_conductor_resume_clean(task) elif command.get('command_status') == 'SUCCEEDED': clean_step_hook = _get_post_clean_step_hook(node) @@ -398,12 +436,12 @@ class BaseAgentVendor(base.VendorInterface): return manager_utils.cleaning_error_handler(task, msg) if task.node.clean_step.get('reboot_requested'): - self._cleaning_reboot(task) + _cleaning_reboot(task) return LOG.info(_LI('Agent on node %s returned cleaning command success, ' 'moving to next clean step'), node.uuid) - self.notify_conductor_resume_clean(task) + _notify_conductor_resume_clean(task) else: msg = (_('Agent returned unknown status for clean step %(step)s ' 'on node %(node)s : %(err)s.') % @@ -413,48 +451,16 @@ class BaseAgentVendor(base.VendorInterface): LOG.error(msg) return manager_utils.cleaning_error_handler(task, msg) - def _cleaning_reboot(self, task): - """Reboots a node out of band after a clean step that requires it. + def heartbeat(self, task, callback_url): + """Process a heartbeat. - If an agent clean step has 'reboot_requested': True, reboots the - node when the step is completed. Will put the node in CLEANFAIL - if the node cannot be rebooted. - - :param task: a TaskManager instance + :param task: task to work with. + :param callback_url: agent HTTP API URL. """ - try: - manager_utils.node_power_action(task, states.REBOOT) - except Exception as e: - msg = (_('Reboot requested by clean step %(step)s failed for ' - 'node %(node)s: %(err)s') % - {'step': task.node.clean_step, - 'node': task.node.uuid, - 'err': e}) - LOG.error(msg) - # do not set cleaning_reboot if we didn't reboot - manager_utils.cleaning_error_handler(task, msg) - return + # TODO(dtantsur): upgrade lock only if we actually take action other + # than updating the last timestamp. + task.upgrade_lock() - # Signify that we've rebooted - driver_internal_info = task.node.driver_internal_info - driver_internal_info['cleaning_reboot'] = True - task.node.driver_internal_info = driver_internal_info - task.node.save() - - @base.passthru(['POST']) - @task_manager.require_exclusive_lock - def heartbeat(self, task, **kwargs): - """Method for agent to periodically check in. - - The agent should be sending its agent_url (so Ironic can talk back) - as a kwarg. kwargs should have the following format:: - - { - 'agent_url': 'http://AGENT_HOST:AGENT_PORT' - } - - AGENT_PORT defaults to 9999. - """ node = task.node driver_internal_info = node.driver_internal_info LOG.debug( @@ -463,7 +469,7 @@ class BaseAgentVendor(base.VendorInterface): 'heartbeat': driver_internal_info.get('agent_last_heartbeat')}) driver_internal_info['agent_last_heartbeat'] = int(time.time()) try: - driver_internal_info['agent_url'] = kwargs['agent_url'] + driver_internal_info['agent_url'] = callback_url except KeyError: raise exception.MissingParameterValue(_('For heartbeat operation, ' '"agent_url" must be ' @@ -484,11 +490,11 @@ class BaseAgentVendor(base.VendorInterface): elif (node.provision_state == states.DEPLOYWAIT and not self.deploy_has_started(task)): msg = _('Node failed to get image for deploy.') - self.continue_deploy(task, **kwargs) + self.continue_deploy(task) elif (node.provision_state == states.DEPLOYWAIT and self.deploy_is_done(task)): msg = _('Node failed to move to active state.') - self.reboot_to_instance(task, **kwargs) + self.reboot_to_instance(task) elif (node.provision_state == states.DEPLOYWAIT and self.deploy_has_started(task)): node.touch_provisioning() @@ -504,10 +510,10 @@ class BaseAgentVendor(base.VendorInterface): self._refresh_clean_steps(task) # Then set/verify node clean steps and start cleaning manager_utils.set_node_cleaning_steps(task) - self.notify_conductor_resume_clean(task) + _notify_conductor_resume_clean(task) else: msg = _('Node failed to check cleaning progress.') - self.continue_cleaning(task, **kwargs) + self.continue_cleaning(task) except exception.NoFreeConductorWorker: # waiting for the next heartbeat, node.last_error and # logging message is filled already via conductor's hook @@ -523,6 +529,210 @@ class BaseAgentVendor(base.VendorInterface): elif node.provision_state in (states.DEPLOYING, states.DEPLOYWAIT): deploy_utils.set_failed_state(task, last_error) + def reboot_and_finish_deploy(self, task): + """Helper method to trigger reboot on the node and finish deploy. + + This method initiates a reboot on the node. On success, it + marks the deploy as complete. On failure, it logs the error + and marks deploy as failure. + + :param task: a TaskManager object containing the node + :raises: InstanceDeployFailure, if node reboot failed. + """ + wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000 + attempts = CONF.agent.post_deploy_get_power_state_retries + 1 + + @retrying.retry( + stop_max_attempt_number=attempts, + retry_on_result=lambda state: state != states.POWER_OFF, + wait_fixed=wait + ) + def _wait_until_powered_off(task): + return task.driver.power.get_power_state(task) + + node = task.node + # Whether ironic should power off the node via out-of-band or + # in-band methods + oob_power_off = strutils.bool_from_string( + node.driver_info.get('deploy_forces_oob_reboot', False)) + + try: + if not oob_power_off: + try: + self._client.power_off(node) + _wait_until_powered_off(task) + except Exception as e: + LOG.warning( + _LW('Failed to soft power off node %(node_uuid)s ' + 'in at least %(timeout)d seconds. ' + 'Error: %(error)s'), + {'node_uuid': node.uuid, + 'timeout': (wait * (attempts - 1)) / 1000, + 'error': e}) + manager_utils.node_power_action(task, states.POWER_OFF) + else: + # Flush the file system prior to hard rebooting the node + result = self._client.sync(node) + error = result.get('faultstring') + if error: + if 'Unknown command' in error: + error = _('The version of the IPA ramdisk used in ' + 'the deployment do not support the ' + 'command "sync"') + LOG.warning(_LW( + 'Failed to flush the file system prior to hard ' + 'rebooting the node %(node)s. Error: %(error)s'), + {'node': node.uuid, 'error': error}) + + manager_utils.node_power_action(task, states.POWER_OFF) + + task.driver.network.remove_provisioning_network(task) + task.driver.network.configure_tenant_networks(task) + + manager_utils.node_power_action(task, states.POWER_ON) + except Exception as e: + msg = (_('Error rebooting node %(node)s after deploy. ' + 'Error: %(error)s') % + {'node': node.uuid, 'error': e}) + log_and_raise_deployment_error(task, msg) + + task.process_event('done') + LOG.info(_LI('Deployment to node %s done'), task.node.uuid) + + def prepare_instance_to_boot(self, task, root_uuid, efi_sys_uuid): + """Prepares instance to boot. + + :param task: a TaskManager object containing the node + :param root_uuid: the UUID for root partition + :param efi_sys_uuid: the UUID for the efi partition + :raises: InvalidState if fails to prepare instance + """ + + node = task.node + if deploy_utils.get_boot_option(node) == "local": + # Install the boot loader + self.configure_local_boot( + task, root_uuid=root_uuid, + efi_system_part_uuid=efi_sys_uuid) + try: + task.driver.boot.prepare_instance(task) + except Exception as e: + LOG.error(_LE('Deploy failed for instance %(instance)s. ' + 'Error: %(error)s'), + {'instance': node.instance_uuid, 'error': e}) + msg = _('Failed to continue agent deployment.') + log_and_raise_deployment_error(task, msg) + + def configure_local_boot(self, task, root_uuid=None, + efi_system_part_uuid=None): + """Helper method to configure local boot on the node. + + This method triggers bootloader installation on the node. + On successful installation of bootloader, this method sets the + node to boot from disk. + + :param task: a TaskManager object containing the node + :param root_uuid: The UUID of the root partition. This is used + for identifying the partition which contains the image deployed + or None in case of whole disk images which we expect to already + have a bootloader installed. + :param efi_system_part_uuid: The UUID of the efi system partition. + This is used only in uefi boot mode. + :raises: InstanceDeployFailure if bootloader installation failed or + on encountering error while setting the boot device on the node. + """ + node = task.node + LOG.debug('Configuring local boot for node %s', node.uuid) + if not node.driver_internal_info.get( + 'is_whole_disk_image') and root_uuid: + LOG.debug('Installing the bootloader for node %(node)s on ' + 'partition %(part)s, EFI system partition %(efi)s', + {'node': node.uuid, 'part': root_uuid, + 'efi': efi_system_part_uuid}) + result = self._client.install_bootloader( + node, root_uuid=root_uuid, + efi_system_part_uuid=efi_system_part_uuid) + if result['command_status'] == 'FAILED': + msg = (_("Failed to install a bootloader when " + "deploying node %(node)s. Error: %(error)s") % + {'node': node.uuid, + 'error': result['command_error']}) + log_and_raise_deployment_error(task, msg) + + try: + deploy_utils.try_set_boot_device(task, boot_devices.DISK) + except Exception as e: + msg = (_("Failed to change the boot device to %(boot_dev)s " + "when deploying node %(node)s. Error: %(error)s") % + {'boot_dev': boot_devices.DISK, 'node': node.uuid, + 'error': e}) + log_and_raise_deployment_error(task, msg) + + LOG.info(_LI('Local boot successfully configured for node %s'), + node.uuid) + + +class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): + + def __init__(self): + self.supported_payload_versions = ['2'] + super(BaseAgentVendor, self).__init__() + + def get_properties(self): + """Return the properties of the interface. + + :returns: dictionary of : entries. + """ + return VENDOR_PROPERTIES + + def validate(self, task, method, **kwargs): + """Validate the driver-specific Node deployment info. + + No validation necessary. + + :param task: a TaskManager instance + :param method: method to be validated + """ + pass + + def driver_validate(self, method, **kwargs): + """Validate the driver deployment info. + + :param method: method to be validated. + """ + version = kwargs.get('version') + + if not version: + raise exception.MissingParameterValue(_('Missing parameter ' + 'version')) + if version not in self.supported_payload_versions: + raise exception.InvalidParameterValue(_('Unknown lookup ' + 'payload version: %s') + % version) + + @base.passthru(['POST']) + @task_manager.require_exclusive_lock + def heartbeat(self, task, **kwargs): + """Method for agent to periodically check in. + + The agent should be sending its agent_url (so Ironic can talk back) + as a kwarg. kwargs should have the following format:: + + { + 'agent_url': 'http://AGENT_HOST:AGENT_PORT' + } + + AGENT_PORT defaults to 9999. + """ + try: + callback_url = kwargs['agent_url'] + except KeyError: + raise exception.MissingParameterValue(_('For heartbeat operation, ' + '"agent_url" must be ' + 'specified.')) + + super(BaseAgentVendor, self).heartbeat(task, callback_url) + @base.driver_passthru(['POST'], async=False) def lookup(self, context, **kwargs): """Find a matching node for the agent. @@ -593,43 +803,6 @@ class BaseAgentVendor(base.VendorInterface): 'node': ndict, } - def _get_completed_cleaning_command(self, task, commands): - """Returns None or a completed cleaning command from the agent. - - :param commands: a set of command results from the agent, typically - fetched with agent_client.get_commands_status() - """ - if not commands: - return - - last_command = commands[-1] - - if last_command['command_name'] != 'execute_clean_step': - # catches race condition where execute_clean_step is still - # processing so the command hasn't started yet - LOG.debug('Expected agent last command to be "execute_clean_step" ' - 'for node %(node)s, instead got "%(command)s". Waiting ' - 'for next heartbeat.', - {'node': task.node.uuid, - 'command': last_command['command_name']}) - return - - last_result = last_command.get('command_result') or {} - last_step = last_result.get('clean_step') - if last_command['command_status'] == 'RUNNING': - LOG.debug('Clean step still running for node %(node)s: %(step)s', - {'step': last_step, 'node': task.node.uuid}) - return - elif (last_command['command_status'] == 'SUCCEEDED' and - last_step != task.node.clean_step): - # A previous clean_step was running, the new command has not yet - # started. - LOG.debug('Clean step not yet started for node %(node)s: %(step)s', - {'step': last_step, 'node': task.node.uuid}) - return - else: - return last_command - def _get_interfaces(self, inventory): interfaces = [] try: @@ -721,150 +894,3 @@ class BaseAgentVendor(base.VendorInterface): # Only have one node_id left, return it. return node_ids.pop() - - def _log_and_raise_deployment_error(self, task, msg): - """Helper method to log the error and raise exception.""" - LOG.error(msg) - deploy_utils.set_failed_state(task, msg) - raise exception.InstanceDeployFailure(msg) - - def reboot_and_finish_deploy(self, task): - """Helper method to trigger reboot on the node and finish deploy. - - This method initiates a reboot on the node. On success, it - marks the deploy as complete. On failure, it logs the error - and marks deploy as failure. - - :param task: a TaskManager object containing the node - :raises: InstanceDeployFailure, if node reboot failed. - """ - wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000 - attempts = CONF.agent.post_deploy_get_power_state_retries + 1 - - @retrying.retry( - stop_max_attempt_number=attempts, - retry_on_result=lambda state: state != states.POWER_OFF, - wait_fixed=wait - ) - def _wait_until_powered_off(task): - return task.driver.power.get_power_state(task) - - node = task.node - # Whether ironic should power off the node via out-of-band or - # in-band methods - oob_power_off = strutils.bool_from_string( - node.driver_info.get('deploy_forces_oob_reboot', False)) - - try: - if not oob_power_off: - try: - self._client.power_off(node) - _wait_until_powered_off(task) - except Exception as e: - LOG.warning( - _LW('Failed to soft power off node %(node_uuid)s ' - 'in at least %(timeout)d seconds. ' - 'Error: %(error)s'), - {'node_uuid': node.uuid, - 'timeout': (wait * (attempts - 1)) / 1000, - 'error': e}) - manager_utils.node_power_action(task, states.POWER_OFF) - else: - # Flush the file system prior to hard rebooting the node - result = self._client.sync(node) - error = result.get('faultstring') - if error: - if 'Unknown command' in error: - error = _('The version of the IPA ramdisk used in ' - 'the deployment do not support the ' - 'command "sync"') - LOG.warning(_LW( - 'Failed to flush the file system prior to hard ' - 'rebooting the node %(node)s. Error: %(error)s'), - {'node': node.uuid, 'error': error}) - manager_utils.node_power_action(task, states.POWER_OFF) - - task.driver.network.remove_provisioning_network(task) - task.driver.network.configure_tenant_networks(task) - - manager_utils.node_power_action(task, states.POWER_ON) - except Exception as e: - msg = (_('Error rebooting node %(node)s after deploy. ' - 'Error: %(error)s') % - {'node': node.uuid, 'error': e}) - self._log_and_raise_deployment_error(task, msg) - - task.process_event('done') - LOG.info(_LI('Deployment to node %s done'), task.node.uuid) - - def prepare_instance_to_boot(self, task, root_uuid, efi_sys_uuid): - """Prepares instance to boot. - - :param task: a TaskManager object containing the node - :param root_uuid: the UUID for root partition - :param efi_sys_uuid: the UUID for the efi partition - :raises: InvalidState if fails to prepare instance - """ - - node = task.node - if deploy_utils.get_boot_option(node) == "local": - # Install the boot loader - self.configure_local_boot( - task, root_uuid=root_uuid, - efi_system_part_uuid=efi_sys_uuid) - try: - task.driver.boot.prepare_instance(task) - except Exception as e: - LOG.error(_LE('Deploy failed for instance %(instance)s. ' - 'Error: %(error)s'), - {'instance': node.instance_uuid, 'error': e}) - msg = _('Failed to continue agent deployment.') - self._log_and_raise_deployment_error(task, msg) - - def configure_local_boot(self, task, root_uuid=None, - efi_system_part_uuid=None): - """Helper method to configure local boot on the node. - - This method triggers bootloader installation on the node. - On successful installation of bootloader, this method sets the - node to boot from disk. - - :param task: a TaskManager object containing the node - :param root_uuid: The UUID of the root partition. This is used - for identifying the partition which contains the image deployed - or None in case of whole disk images which we expect to already - have a bootloader installed. - :param efi_system_part_uuid: The UUID of the efi system partition. - This is used only in uefi boot mode. - :raises: InstanceDeployFailure if bootloader installation failed or - on encountering error while setting the boot device on the node. - """ - node = task.node - LOG.debug('Configuring local boot for node %s', node.uuid) - if not node.driver_internal_info.get( - 'is_whole_disk_image') and root_uuid: - LOG.debug('Installing the bootloader for node %(node)s on ' - 'partition %(part)s, EFI system partition %(efi)s', - {'node': node.uuid, 'part': root_uuid, - 'efi': efi_system_part_uuid}) - result = self._client.install_bootloader( - node, root_uuid=root_uuid, - efi_system_part_uuid=efi_system_part_uuid) - if result['command_status'] == 'FAILED': - msg = (_("Failed to install a bootloader when " - "deploying node %(node)s. Error: %(error)s") % - {'node': node.uuid, - 'error': result['command_error']}) - self._log_and_raise_deployment_error(task, msg) - - try: - deploy_utils.try_set_boot_device(task, boot_devices.DISK) - except Exception as e: - msg = (_("Failed to change the boot device to %(boot_dev)s " - "when deploying node %(node)s. Error: %(error)s") % - {'boot_dev': boot_devices.DISK, 'node': node.uuid, - 'error': e}) - self._log_and_raise_deployment_error(task, msg) - - LOG.info(_LI('Local boot successfully configured for node %s'), - node.uuid) diff --git a/ironic/drivers/modules/amt/vendor.py b/ironic/drivers/modules/amt/vendor.py index 08124268c..365f5e363 100644 --- a/ironic/drivers/modules/amt/vendor.py +++ b/ironic/drivers/modules/amt/vendor.py @@ -23,8 +23,8 @@ from ironic.drivers.modules import iscsi_deploy class AMTPXEVendorPassthru(iscsi_deploy.VendorPassthru): @task_manager.require_exclusive_lock - def continue_deploy(self, task, **kwargs): + def continue_deploy(self, task): if deploy_utils.get_boot_option(task.node) == "netboot": task.driver.management.ensure_next_boot_device(task.node, boot_devices.PXE) - super(AMTPXEVendorPassthru, self).continue_deploy(task, **kwargs) + super(AMTPXEVendorPassthru, self).continue_deploy(task) diff --git a/ironic/drivers/modules/ilo/vendor.py b/ironic/drivers/modules/ilo/vendor.py index 525f73264..55e7290ad 100644 --- a/ironic/drivers/modules/ilo/vendor.py +++ b/ironic/drivers/modules/ilo/vendor.py @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) class IloVirtualMediaAgentVendorInterface(agent.AgentVendorInterface): """Interface for vendor passthru related actions.""" - def reboot_to_instance(self, task, **kwargs): + def reboot_to_instance(self, task): node = task.node LOG.debug('Preparing to reboot to instance for node %s', node.uuid) @@ -48,7 +48,7 @@ class IloVirtualMediaAgentVendorInterface(agent.AgentVendorInterface): ilo_common.update_secure_boot_mode(task, True) super(IloVirtualMediaAgentVendorInterface, - self).reboot_to_instance(task, **kwargs) + self).reboot_to_instance(task) class VendorPassthru(iscsi_deploy.VendorPassthru): diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 957368eb3..9f215dc19 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -403,7 +403,34 @@ def validate(task): deploy_utils.parse_instance_info(task.node) -class ISCSIDeploy(base.DeployInterface): +class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): + + @task_manager.require_exclusive_lock + def continue_deploy(self, task): + """Method invoked when deployed using iSCSI. + + This method is invoked during a heartbeat from an agent when + the node is in wait-call-back state. This deploys the image on + the node and then configures the node to boot according to the + desired boot option (netboot or localboot). + + :param task: a TaskManager object containing the node. + :param kwargs: the kwargs passed from the heartbeat method. + :raises: InstanceDeployFailure, if it encounters some error during + the deploy. + """ + task.process_event('resume') + node = task.node + LOG.debug('Continuing the deployment on node %s', node.uuid) + + uuid_dict_returned = do_agent_iscsi_deploy(task, self._client) + root_uuid = uuid_dict_returned.get('root uuid') + efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid') + self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid) + self.reboot_and_finish_deploy(task) + + +class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): """iSCSI Deploy Interface for deploy-related actions.""" def get_properties(self): @@ -562,29 +589,5 @@ class ISCSIDeploy(base.DeployInterface): task, manage_boot=True) -class VendorPassthru(agent_base_vendor.BaseAgentVendor): +class VendorPassthru(AgentDeployMixin, agent_base_vendor.BaseAgentVendor): """Interface to mix IPMI and PXE vendor-specific interfaces.""" - - @task_manager.require_exclusive_lock - def continue_deploy(self, task, **kwargs): - """Method invoked when deployed using iSCSI. - - This method is invoked during a heartbeat from an agent when - the node is in wait-call-back state. This deploys the image on - the node and then configures the node to boot according to the - desired boot option (netboot or localboot). - - :param task: a TaskManager object containing the node. - :param kwargs: the kwargs passed from the heartbeat method. - :raises: InstanceDeployFailure, if it encounters some error during - the deploy. - """ - task.process_event('resume') - node = task.node - LOG.debug('Continuing the deployment on node %s', node.uuid) - - uuid_dict_returned = do_agent_iscsi_deploy(task, self._client) - root_uuid = uuid_dict_returned.get('root uuid') - efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid') - self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid) - self.reboot_and_finish_deploy(task) diff --git a/ironic/drivers/modules/oneview/vendor.py b/ironic/drivers/modules/oneview/vendor.py index 98b0be218..18001a6c6 100644 --- a/ironic/drivers/modules/oneview/vendor.py +++ b/ironic/drivers/modules/oneview/vendor.py @@ -23,6 +23,7 @@ from ironic.common.i18n import _LW from ironic.common import states from ironic.conductor import utils as manager_utils from ironic.drivers.modules import agent +from ironic.drivers.modules import agent_base_vendor from ironic.drivers.modules import deploy_utils LOG = log.getLogger(__name__) @@ -107,7 +108,7 @@ class AgentVendorInterface(agent.AgentVendorInterface): msg = (_('Error rebooting node %(node)s after deploy. ' 'Error: %(error)s') % {'node': node.uuid, 'error': e}) - self._log_and_raise_deployment_error(task, msg) + agent_base_vendor.log_and_raise_deployment_error(task, msg) task.process_event('done') LOG.info(_LI('Deployment to node %s done'), task.node.uuid) diff --git a/ironic/tests/unit/drivers/modules/amt/test_vendor.py b/ironic/tests/unit/drivers/modules/amt/test_vendor.py index d6f74ecbb..f8dbb1188 100644 --- a/ironic/tests/unit/drivers/modules/amt/test_vendor.py +++ b/ironic/tests/unit/drivers/modules/amt/test_vendor.py @@ -59,7 +59,6 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase): def test_vendorpassthru_continue_deploy_netboot(self, mock_pxe_vendorpassthru, mock_ensure): - kwargs = {'address': '123456'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.provision_state = states.DEPLOYWAIT @@ -67,11 +66,11 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase): task.node.instance_info['capabilities'] = { "boot_option": "netboot" } - task.driver.vendor.continue_deploy(task, **kwargs) + task.driver.vendor.continue_deploy(task) mock_ensure.assert_called_with( task.driver.management, task.node, boot_devices.PXE) mock_pxe_vendorpassthru.assert_called_once_with( - task.driver.vendor, task, **kwargs) + task.driver.vendor, task) @mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device', spec_set=True, autospec=True) @@ -80,13 +79,12 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase): def test_vendorpassthru_continue_deploy_localboot(self, mock_pxe_vendorpassthru, mock_ensure): - kwargs = {'address': '123456'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.provision_state = states.DEPLOYWAIT task.node.target_provision_state = states.ACTIVE task.node.instance_info['capabilities'] = {"boot_option": "local"} - task.driver.vendor.continue_deploy(task, **kwargs) + task.driver.vendor.continue_deploy(task) self.assertFalse(mock_ensure.called) mock_pxe_vendorpassthru.assert_called_once_with( - task.driver.vendor, task, **kwargs) + task.driver.vendor, task) diff --git a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py index 7b0732a3f..87e72cfb5 100644 --- a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py +++ b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py @@ -117,16 +117,15 @@ class VendorPassthruTestCase(db_base.DbTestCase): func_update_boot_mode, func_update_secure_boot_mode, pxe_vendorpassthru_mock): - kwargs = {'address': '123456'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.provision_state = states.DEPLOYWAIT task.node.target_provision_state = states.ACTIVE - task.driver.vendor.continue_deploy(task, **kwargs) + task.driver.vendor.continue_deploy(task) func_update_boot_mode.assert_called_once_with(task) func_update_secure_boot_mode.assert_called_once_with(task, True) pxe_vendorpassthru_mock.assert_called_once_with( - mock.ANY, task, **kwargs) + mock.ANY, task) class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase): @@ -149,17 +148,16 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase): func_update_boot_mode, check_deploy_success_mock, agent_reboot_to_instance_mock): - kwargs = {'address': '123456'} check_deploy_success_mock.return_value = None with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - task.driver.vendor.reboot_to_instance(task, **kwargs) + task.driver.vendor.reboot_to_instance(task) check_deploy_success_mock.assert_called_once_with( mock.ANY, task.node) func_update_boot_mode.assert_called_once_with(task) func_update_secure_boot_mode.assert_called_once_with(task, True) agent_reboot_to_instance_mock.assert_called_once_with( - mock.ANY, task, **kwargs) + mock.ANY, task) @mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance', spec_set=True, autospec=True) @@ -173,14 +171,13 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase): func_update_boot_mode, check_deploy_success_mock, agent_reboot_to_instance_mock): - kwargs = {'address': '123456'} check_deploy_success_mock.return_value = "Error" with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - task.driver.vendor.reboot_to_instance(task, **kwargs) + task.driver.vendor.reboot_to_instance(task) check_deploy_success_mock.assert_called_once_with( mock.ANY, task.node) self.assertFalse(func_update_boot_mode.called) self.assertFalse(func_update_secure_boot_mode.called) agent_reboot_to_instance_mock.assert_called_once_with( - mock.ANY, task, **kwargs) + mock.ANY, task) diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py index e2b647b89..9595f9830 100644 --- a/ironic/tests/unit/drivers/modules/test_agent.py +++ b/ironic/tests/unit/drivers/modules/test_agent.py @@ -608,6 +608,12 @@ class TestAgentDeploy(db_base.DbTestCase): tear_down_cleaning_mock.assert_called_once_with( task, manage_boot=False) + def test_heartbeat(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + self.driver.heartbeat(task, 'url') + self.assertFalse(task.shared) + class TestAgentVendor(db_base.DbTestCase): diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 08f91d447..389b9f0bc 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -312,243 +312,6 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.assertRaises(exception.MissingParameterValue, self.passthru.heartbeat, task, **kwargs) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started', - autospec=True) - @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_is_done', - autospec=True) - @mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True) - def test_heartbeat_deploy_done_fails(self, log_mock, done_mock, - failed_mock, deploy_started_mock): - deploy_started_mock.return_value = True - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - done_mock.side_effect = Exception('LlamaException') - with task_manager.acquire( - self.context, self.node['uuid'], shared=False) as task: - task.node.provision_state = states.DEPLOYWAIT - task.node.target_provision_state = states.ACTIVE - self.passthru.heartbeat(task, **kwargs) - failed_mock.assert_called_once_with(task, mock.ANY) - log_mock.assert_called_once_with( - 'Asynchronous exception for node ' - '1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy ' - 'is done. Exception: LlamaException') - - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started', - autospec=True) - @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_is_done', - autospec=True) - @mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True) - def test_heartbeat_deploy_done_raises_with_event(self, log_mock, done_mock, - failed_mock, - deploy_started_mock): - deploy_started_mock.return_value = True - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - with task_manager.acquire( - self.context, self.node['uuid'], shared=False) as task: - - def driver_failure(*args, **kwargs): - # simulate driver failure that both advances the FSM - # and raises an exception - task.node.provision_state = states.DEPLOYFAIL - raise Exception('LlamaException') - - task.node.provision_state = states.DEPLOYWAIT - task.node.target_provision_state = states.ACTIVE - done_mock.side_effect = driver_failure - self.passthru.heartbeat(task, **kwargs) - # task.node.provision_state being set to DEPLOYFAIL - # within the driver_failue, hearbeat should not call - # deploy_utils.set_failed_state anymore - self.assertFalse(failed_mock.called) - log_mock.assert_called_once_with( - 'Asynchronous exception for node ' - '1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy ' - 'is done. Exception: LlamaException') - - @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - '_refresh_clean_steps', autospec=True) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) - def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps, - mock_refresh, mock_touch): - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.clean_step = {} - self.node.provision_state = states.CLEANWAIT - self.node.save() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_touch.assert_called_once_with(mock.ANY) - mock_refresh.assert_called_once_with(mock.ANY, task) - mock_notify.assert_called_once_with(mock.ANY, task) - mock_set_steps.assert_called_once_with(task) - - @mock.patch.object(manager_utils, 'cleaning_error_handler') - @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - '_refresh_clean_steps', autospec=True) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) - def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps, - mock_refresh, mock_touch, - mock_handler): - mocks = [mock_refresh, mock_set_steps, mock_notify] - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.clean_step = {} - self.node.provision_state = states.CLEANWAIT - self.node.save() - for i in range(len(mocks)): - before_failed_mocks = mocks[:i] - failed_mock = mocks[i] - after_failed_mocks = mocks[i + 1:] - failed_mock.side_effect = Exception() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_touch.assert_called_once_with(mock.ANY) - mock_handler.assert_called_once_with(task, mock.ANY) - for called in before_failed_mocks + [failed_mock]: - self.assertTrue(called.called) - for not_called in after_failed_mocks: - self.assertFalse(not_called.called) - - # Reset mocks for the next interaction - for m in mocks + [mock_touch, mock_handler]: - m.reset_mock() - failed_mock.side_effect = None - - @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'continue_cleaning', autospec=True) - def test_heartbeat_continue_cleaning(self, mock_continue, mock_touch): - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.clean_step = { - 'priority': 10, - 'interface': 'deploy', - 'step': 'foo', - 'reboot_requested': False - } - self.node.provision_state = states.CLEANWAIT - self.node.save() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_touch.assert_called_once_with(mock.ANY) - mock_continue.assert_called_once_with(mock.ANY, task, **kwargs) - - @mock.patch.object(manager_utils, 'cleaning_error_handler') - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'continue_cleaning', autospec=True) - def test_heartbeat_continue_cleaning_fails(self, mock_continue, - mock_handler): - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.clean_step = { - 'priority': 10, - 'interface': 'deploy', - 'step': 'foo', - 'reboot_requested': False - } - - mock_continue.side_effect = Exception() - - self.node.provision_state = states.CLEANWAIT - self.node.save() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_continue.assert_called_once_with(mock.ANY, task, **kwargs) - mock_handler.assert_called_once_with(task, mock.ANY) - - @mock.patch.object(manager_utils, 'cleaning_error_handler') - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'continue_cleaning', autospec=True) - def test_heartbeat_continue_cleaning_no_worker(self, mock_continue, - mock_handler): - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.clean_step = { - 'priority': 10, - 'interface': 'deploy', - 'step': 'foo', - 'reboot_requested': False - } - - mock_continue.side_effect = exception.NoFreeConductorWorker() - - self.node.provision_state = states.CLEANWAIT - self.node.save() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_continue.assert_called_once_with(mock.ANY, task, **kwargs) - self.assertFalse(mock_handler.called) - - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'continue_deploy', - autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_to_instance', - autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) - def test_heartbeat_noops_maintenance_mode(self, ncrc_mock, rti_mock, - cd_mock): - """Ensures that heartbeat() no-ops for a maintenance node.""" - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - self.node.maintenance = True - for state in (states.AVAILABLE, states.DEPLOYWAIT, states.DEPLOYING, - states.CLEANING): - self.node.provision_state = state - self.node.save() - with task_manager.acquire( - self.context, self.node['uuid'], shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - self.assertEqual(0, ncrc_mock.call_count) - self.assertEqual(0, rti_mock.call_count) - self.assertEqual(0, cd_mock.call_count) - - @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started', - autospec=True) - def test_heartbeat_touch_provisioning(self, mock_deploy_started, - mock_touch): - mock_deploy_started.return_value = True - kwargs = { - 'agent_url': 'http://127.0.0.1:9999/bar' - } - - self.node.provision_state = states.DEPLOYWAIT - self.node.save() - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - self.passthru.heartbeat(task, **kwargs) - - mock_touch.assert_called_once_with(mock.ANY) - def test_vendor_passthru_vendor_routes(self): expected = ['heartbeat'] with task_manager.acquire(self.context, self.node.uuid, @@ -565,6 +328,238 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.assertIsInstance(driver_routes, dict) self.assertEqual(expected, list(driver_routes)) + def test_get_properties(self): + expected = agent_base_vendor.VENDOR_PROPERTIES + self.assertEqual(expected, self.passthru.get_properties()) + + +class AgentDeployMixinBaseTest(db_base.DbTestCase): + + def setUp(self): + super(AgentDeployMixinBaseTest, self).setUp() + mgr_utils.mock_the_extension_manager(driver="fake_agent") + self.deploy = agent_base_vendor.AgentDeployMixin() + n = { + 'driver': 'fake_agent', + 'instance_info': INSTANCE_INFO, + 'driver_info': DRIVER_INFO, + 'driver_internal_info': DRIVER_INTERNAL_INFO, + } + self.node = object_utils.create_test_node(self.context, **n) + + +class TestHeartbeat(AgentDeployMixinBaseTest): + + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'deploy_has_started', autospec=True) + @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'deploy_is_done', + autospec=True) + @mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True) + def test_heartbeat_deploy_done_fails(self, log_mock, done_mock, + failed_mock, deploy_started_mock): + deploy_started_mock.return_value = True + done_mock.side_effect = Exception('LlamaException') + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.DEPLOYWAIT + task.node.target_provision_state = states.ACTIVE + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + failed_mock.assert_called_once_with(task, mock.ANY) + log_mock.assert_called_once_with( + 'Asynchronous exception for node ' + '1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy ' + 'is done. Exception: LlamaException') + + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'deploy_has_started', autospec=True) + @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'deploy_is_done', + autospec=True) + @mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True) + def test_heartbeat_deploy_done_raises_with_event(self, log_mock, done_mock, + failed_mock, + deploy_started_mock): + deploy_started_mock.return_value = True + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + + def driver_failure(*args, **kwargs): + # simulate driver failure that both advances the FSM + # and raises an exception + task.node.provision_state = states.DEPLOYFAIL + raise Exception('LlamaException') + + task.node.provision_state = states.DEPLOYWAIT + task.node.target_provision_state = states.ACTIVE + done_mock.side_effect = driver_failure + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + # task.node.provision_state being set to DEPLOYFAIL + # within the driver_failue, hearbeat should not call + # deploy_utils.set_failed_state anymore + self.assertFalse(failed_mock.called) + log_mock.assert_called_once_with( + 'Asynchronous exception for node ' + '1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy ' + 'is done. Exception: LlamaException') + + @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + '_refresh_clean_steps', autospec=True) + @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) + def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps, + mock_refresh, mock_touch): + self.node.clean_step = {} + self.node.provision_state = states.CLEANWAIT + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_touch.assert_called_once_with(mock.ANY) + mock_refresh.assert_called_once_with(mock.ANY, task) + mock_notify.assert_called_once_with(task) + mock_set_steps.assert_called_once_with(task) + + @mock.patch.object(manager_utils, 'cleaning_error_handler') + @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + '_refresh_clean_steps', autospec=True) + @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) + def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps, + mock_refresh, mock_touch, + mock_handler): + mocks = [mock_refresh, mock_set_steps, mock_notify] + self.node.clean_step = {} + self.node.provision_state = states.CLEANWAIT + self.node.save() + for i in range(len(mocks)): + before_failed_mocks = mocks[:i] + failed_mock = mocks[i] + after_failed_mocks = mocks[i + 1:] + failed_mock.side_effect = Exception() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_touch.assert_called_once_with(mock.ANY) + mock_handler.assert_called_once_with(task, mock.ANY) + for called in before_failed_mocks + [failed_mock]: + self.assertTrue(called.called) + for not_called in after_failed_mocks: + self.assertFalse(not_called.called) + + # Reset mocks for the next interaction + for m in mocks + [mock_touch, mock_handler]: + m.reset_mock() + failed_mock.side_effect = None + + @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'continue_cleaning', autospec=True) + def test_heartbeat_continue_cleaning(self, mock_continue, mock_touch): + self.node.clean_step = { + 'priority': 10, + 'interface': 'deploy', + 'step': 'foo', + 'reboot_requested': False + } + self.node.provision_state = states.CLEANWAIT + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_touch.assert_called_once_with(mock.ANY) + mock_continue.assert_called_once_with(mock.ANY, task) + + @mock.patch.object(manager_utils, 'cleaning_error_handler') + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'continue_cleaning', autospec=True) + def test_heartbeat_continue_cleaning_fails(self, mock_continue, + mock_handler): + self.node.clean_step = { + 'priority': 10, + 'interface': 'deploy', + 'step': 'foo', + 'reboot_requested': False + } + + mock_continue.side_effect = Exception() + + self.node.provision_state = states.CLEANWAIT + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_continue.assert_called_once_with(mock.ANY, task) + mock_handler.assert_called_once_with(task, mock.ANY) + + @mock.patch.object(manager_utils, 'cleaning_error_handler') + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'continue_cleaning', autospec=True) + def test_heartbeat_continue_cleaning_no_worker(self, mock_continue, + mock_handler): + self.node.clean_step = { + 'priority': 10, + 'interface': 'deploy', + 'step': 'foo', + 'reboot_requested': False + } + + mock_continue.side_effect = exception.NoFreeConductorWorker() + + self.node.provision_state = states.CLEANWAIT + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_continue.assert_called_once_with(mock.ANY, task) + self.assertFalse(mock_handler.called) + + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'continue_deploy', + autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'reboot_to_instance', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) + def test_heartbeat_noops_maintenance_mode(self, ncrc_mock, rti_mock, + cd_mock): + """Ensures that heartbeat() no-ops for a maintenance node.""" + self.node.maintenance = True + for state in (states.AVAILABLE, states.DEPLOYWAIT, states.DEPLOYING, + states.CLEANING): + self.node.provision_state = state + self.node.save() + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + self.assertEqual(0, ncrc_mock.call_count) + self.assertEqual(0, rti_mock.call_count) + self.assertEqual(0, cd_mock.call_count) + + @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, + 'deploy_has_started', autospec=True) + def test_heartbeat_touch_provisioning(self, mock_deploy_started, + mock_touch): + mock_deploy_started.return_value = True + + self.node.provision_state = states.DEPLOYWAIT + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080') + + mock_touch.assert_called_once_with(mock.ANY) + @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', @@ -581,7 +576,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): shared=True) as task: get_power_state_mock.side_effect = [states.POWER_ON, states.POWER_OFF] - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(2, get_power_state_mock.call_count) node_power_action_mock.assert_called_once_with( @@ -609,7 +604,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: get_power_state_mock.return_value = states.POWER_ON - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) node_power_action_mock.assert_has_calls([ @@ -637,7 +632,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) node_power_action_mock.assert_has_calls([ mock.call(task, states.POWER_OFF), @@ -667,7 +662,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: get_power_state_mock.side_effect = RuntimeError("boom") - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) node_power_action_mock.assert_has_calls([ @@ -696,7 +691,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): get_power_state_mock.return_value = states.POWER_ON node_power_action_mock.side_effect = RuntimeError("boom") self.assertRaises(exception.InstanceDeployFailure, - self.passthru.reboot_and_finish_deploy, + self.deploy.reboot_and_finish_deploy, task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) @@ -720,7 +715,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) sync_mock.assert_called_once_with(task.node) node_power_action_mock.assert_has_calls([ @@ -747,7 +742,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: sync_mock.return_value = {'faultstring': 'Unknown command: blah'} - self.passthru.reboot_and_finish_deploy(task) + self.deploy.reboot_and_finish_deploy(task) sync_mock.assert_called_once_with(task.node) node_power_action_mock.assert_has_calls([ @@ -773,8 +768,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = False - self.passthru.configure_local_boot(task, - root_uuid='some-root-uuid') + self.deploy.configure_local_boot(task, root_uuid='some-root-uuid') try_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK) install_bootloader_mock.assert_called_once_with( @@ -791,7 +785,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = False - self.passthru.configure_local_boot( + self.deploy.configure_local_boot( task, root_uuid='some-root-uuid', efi_system_part_uuid='efi-system-part-uuid') try_set_boot_device_mock.assert_called_once_with( @@ -807,7 +801,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self, install_bootloader_mock, try_set_boot_device_mock): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.configure_local_boot(task) + self.deploy.configure_local_boot(task) self.assertFalse(install_bootloader_mock.called) try_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK) @@ -820,7 +814,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = False - self.passthru.configure_local_boot(task) + self.deploy.configure_local_boot(task) self.assertFalse(install_bootloader_mock.called) try_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK) @@ -838,7 +832,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = False self.assertRaises(exception.InstanceDeployFailure, - self.passthru.configure_local_boot, + self.deploy.configure_local_boot, task, root_uuid='some-root-uuid') install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', @@ -861,7 +855,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = False self.assertRaises(exception.InstanceDeployFailure, - self.passthru.configure_local_boot, + self.deploy.configure_local_boot, task, root_uuid='some-root-uuid') install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', @@ -874,7 +868,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'configure_local_boot', autospec=True) def test_prepare_instance_to_boot_netboot(self, configure_mock, boot_option_mock, @@ -889,8 +883,8 @@ class TestBaseAgentVendor(db_base.DbTestCase): efi_system_part_uuid = 'efi_sys_uuid' with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.prepare_instance_to_boot(task, root_uuid, - efi_system_part_uuid) + self.deploy.prepare_instance_to_boot(task, root_uuid, + efi_system_part_uuid) self.assertFalse(configure_mock.called) boot_option_mock.assert_called_once_with(task.node) prepare_instance_mock.assert_called_once_with(task.driver.boot, @@ -900,7 +894,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'configure_local_boot', autospec=True) def test_prepare_instance_to_boot_localboot(self, configure_mock, boot_option_mock, @@ -915,9 +909,9 @@ class TestBaseAgentVendor(db_base.DbTestCase): efi_system_part_uuid = 'efi_sys_uuid' with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.prepare_instance_to_boot(task, root_uuid, - efi_system_part_uuid) - configure_mock.assert_called_once_with(self.passthru, task, + self.deploy.prepare_instance_to_boot(task, root_uuid, + efi_system_part_uuid) + configure_mock.assert_called_once_with(self.deploy, task, root_uuid, efi_system_part_uuid) boot_option_mock.assert_called_once_with(task.node) @@ -928,7 +922,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, + @mock.patch.object(agent_base_vendor.AgentDeployMixin, 'configure_local_boot', autospec=True) def test_prepare_instance_to_boot_configure_fails(self, configure_mock, boot_option_mock, @@ -949,17 +943,17 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: self.assertRaises(exception.InstanceDeployFailure, - self.passthru.prepare_instance_to_boot, task, + self.deploy.prepare_instance_to_boot, task, root_uuid, efi_system_part_uuid) - configure_mock.assert_called_once_with(self.passthru, task, + configure_mock.assert_called_once_with(self.deploy, task, root_uuid, efi_system_part_uuid) boot_option_mock.assert_called_once_with(task.node) self.assertFalse(prepare_mock.called) self.assertFalse(failed_state_mock.called) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning(self, status_mock, notify_mock): @@ -980,14 +974,14 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) - notify_mock.assert_called_once_with(mock.ANY, task) + self.deploy.continue_cleaning(task) + notify_mock.assert_called_once_with(task) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) def test__cleaning_reboot(self, mock_reboot): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru._cleaning_reboot(task) + agent_base_vendor._cleaning_reboot(task) mock_reboot.assert_called_once_with(task, states.REBOOT) self.assertTrue(task.node.driver_internal_info['cleaning_reboot']) @@ -998,7 +992,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru._cleaning_reboot(task) + agent_base_vendor._cleaning_reboot(task) mock_reboot.assert_called_once_with(task, states.REBOOT) mock_handler.assert_called_once_with(task, mock.ANY) self.assertNotIn('cleaning_reboot', @@ -1025,11 +1019,11 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) reboot_mock.assert_called_once_with(task, states.REBOOT) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning_after_reboot(self, status_mock, notify_mock): @@ -1048,15 +1042,15 @@ class TestBaseAgentVendor(db_base.DbTestCase): status_mock.return_value = [] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) - notify_mock.assert_called_once_with(mock.ANY, task) + self.deploy.continue_cleaning(task) + notify_mock.assert_called_once_with(task) self.assertNotIn('cleaning_reboot', task.node.driver_internal_info) @mock.patch.object(agent_base_vendor, '_get_post_clean_step_hook', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning_with_hook( @@ -1076,14 +1070,14 @@ class TestBaseAgentVendor(db_base.DbTestCase): get_hook_mock.return_value = hook_mock with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) get_hook_mock.assert_called_once_with(task.node) hook_mock.assert_called_once_with(task, command_status) - notify_mock.assert_called_once_with(mock.ANY, task) + notify_mock.assert_called_once_with(task) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_base_vendor, '_get_post_clean_step_hook', autospec=True) @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @@ -1108,15 +1102,15 @@ class TestBaseAgentVendor(db_base.DbTestCase): get_hook_mock.return_value = hook_mock with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) get_hook_mock.assert_called_once_with(task.node) hook_mock.assert_called_once_with(task, command_status) error_handler_mock.assert_called_once_with(task, mock.ANY) self.assertFalse(notify_mock.called) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning_old_command(self, status_mock, notify_mock): @@ -1141,11 +1135,11 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) self.assertFalse(notify_mock.called) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning_running(self, status_mock, notify_mock): @@ -1157,7 +1151,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) self.assertFalse(notify_mock.called) @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @@ -1172,13 +1166,13 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) error_mock.assert_called_once_with(task, mock.ANY) @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, '_refresh_clean_steps', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1195,8 +1189,8 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - self.passthru.continue_cleaning(task) - notify_mock.assert_called_once_with(mock.ANY, task) + self.deploy.continue_cleaning(task) + notify_mock.assert_called_once_with(task) refresh_steps_mock.assert_called_once_with(mock.ANY, task) if manual: self.assertFalse( @@ -1215,9 +1209,9 @@ class TestBaseAgentVendor(db_base.DbTestCase): @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, - 'notify_conductor_resume_clean', autospec=True) - @mock.patch.object(agent_base_vendor.BaseAgentVendor, + @mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean', + autospec=True) + @mock.patch.object(agent_base_vendor.AgentDeployMixin, '_refresh_clean_steps', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1236,7 +1230,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) status_mock.assert_called_once_with(mock.ANY, task.node) refresh_steps_mock.assert_called_once_with(mock.ANY, task) @@ -1256,7 +1250,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): }] with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - self.passthru.continue_cleaning(task) + self.deploy.continue_cleaning(task) error_mock.assert_called_once_with(task, mock.ANY) def _test_clean_step_hook(self, hook_dict_mock): @@ -1339,7 +1333,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.assertIsNone(hook_returned) -class TestRefreshCleanSteps(TestBaseAgentVendor): +class TestRefreshCleanSteps(AgentDeployMixinBaseTest): def setUp(self): super(TestRefreshCleanSteps, self).setUp() @@ -1374,7 +1368,7 @@ class TestRefreshCleanSteps(TestBaseAgentVendor): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - self.passthru._refresh_clean_steps(task) + self.deploy._refresh_clean_steps(task) client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) @@ -1404,7 +1398,7 @@ class TestRefreshCleanSteps(TestBaseAgentVendor): self.context, self.node.uuid, shared=False) as task: self.assertRaisesRegex(exception.NodeCleaningFailure, 'invalid result', - self.passthru._refresh_clean_steps, + self.deploy._refresh_clean_steps, task) client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) @@ -1421,11 +1415,7 @@ class TestRefreshCleanSteps(TestBaseAgentVendor): self.context, self.node.uuid, shared=False) as task: self.assertRaisesRegex(exception.NodeCleaningFailure, 'invalid clean step', - self.passthru._refresh_clean_steps, + self.deploy._refresh_clean_steps, task) client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) - - def test_get_properties(self): - expected = agent_base_vendor.VENDOR_PROPERTIES - self.assertEqual(expected, self.passthru.get_properties()) diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index a4f6d9991..558fc4a06 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -649,6 +649,12 @@ class ISCSIDeployTestCase(db_base.DbTestCase): agent_execute_clean_step_mock.assert_called_once_with( task, {'some-step': 'step-info'}) + def test_heartbeat(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + self.driver.deploy.heartbeat(task, 'url') + self.assertFalse(task.shared) + class TestVendorPassthru(db_base.DbTestCase): diff --git a/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml b/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml new file mode 100644 index 000000000..6cc426f40 --- /dev/null +++ b/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml @@ -0,0 +1,5 @@ +--- +other: + - The "continue_deploy" and "reboot_to_instance" methods in the + "BaseAgentVendor" class stopped accepting ** arguments. They were never + used anyway; the drivers should stop passing anything there. From a9e48a3378d6d01ac5a24b7948c03851bdc08104 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Wed, 20 Jul 2016 14:45:30 -0700 Subject: [PATCH 095/166] Add build-essential to required packages for development I ran into gcc not being available recently on a fresh Ubuntu image. Adding it allowed tox to run fine and install ironic's dependencies. Add it to the dev quickstart to make sure nobody else gets tripped up. Change-Id: Ibd6e3bc4772af382c8df92d54050af4934e49ab4 --- doc/source/dev/dev-quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index 21120d1ae..72e22245b 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -24,7 +24,7 @@ Install prerequisites (for python 2.7): - Ubuntu/Debian:: - sudo apt-get install python-dev libssl-dev python-pip libmysqlclient-dev libxml2-dev libxslt-dev libpq-dev git git-review libffi-dev gettext ipmitool psmisc graphviz libjpeg-dev + sudo apt-get install build-essential python-dev libssl-dev python-pip libmysqlclient-dev libxml2-dev libxslt-dev libpq-dev git git-review libffi-dev gettext ipmitool psmisc graphviz libjpeg-dev - Fedora 21/RHEL7/CentOS7:: From 4136f65d5d2c6230c834674301a5ef7d595fe3fe Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Thu, 21 Jul 2016 10:40:26 +0300 Subject: [PATCH 096/166] Remove duplicate copyright. This patch removes duplicate copyright string from ironic/conf/ipmi.py Change-Id: I966d6a498af3647d54e944015824f63594d41219 --- ironic/conf/ipmi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ironic/conf/ipmi.py b/ironic/conf/ipmi.py index 4dad3c2d6..10629c5b9 100644 --- a/ironic/conf/ipmi.py +++ b/ironic/conf/ipmi.py @@ -2,7 +2,6 @@ # # Copyright 2013 International Business Machines Corporation # All Rights Reserved. -# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain From 2f3864ff28ead6bfe227bc530241e715bb0a3875 Mon Sep 17 00:00:00 2001 From: TuanLAF Date: Wed, 20 Jul 2016 08:38:30 +0700 Subject: [PATCH 097/166] Remove unused import This patch is removing the "import *" from ironic/tests/unit/db/__init__.py because it was not used. Change-Id: I754447d63e0e1162ae6cd0506158e95bf0ba7cdd --- ironic/tests/unit/db/__init__.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/ironic/tests/unit/db/__init__.py b/ironic/tests/unit/db/__init__.py index 0f96bcc9c..e69de29bb 100644 --- a/ironic/tests/unit/db/__init__.py +++ b/ironic/tests/unit/db/__init__.py @@ -1,16 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# flake8: noqa -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from ironic.tests.unit.db import * From 56d75b9654bb680b7345c3cffc2bcee7ad640f50 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Thu, 21 Jul 2016 14:39:43 +0300 Subject: [PATCH 098/166] Do the VM setup only when requested After the recent change, we started calling create_bridge_and_vms no matter if it was requested by setting IRONIC_BAREMETAL_BASIC_OPS to True or not. This breaks the ironicclient functional job that does not need any vms created. Change-Id: Iabd2794fc0aad2ca752e04d87d53f641f26e9ffb Closes-bug: #1605194 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index bd635c487..958b65a6a 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -37,7 +37,7 @@ if is_service_enabled ir-api ir-cond; then # Initialize ironic init_ironic - if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" && "$IRONIC_IS_HARDWARE" == "False" ]]; then echo_summary "Creating bridge and VMs" create_bridge_and_vms fi From f95be90248ff64cb37cd0e2dbe220719c1df346c Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Wed, 20 Jul 2016 14:00:11 -0400 Subject: [PATCH 099/166] Bug fixes and doc updates for adoption During the creation of a tempest test for the adoption feature, some minor issues were identified with the adoption functionality. Namely, the default logic was to create ramdisks, however that logic path is more intended for deployment ramdisks. Logic was switched to the instance preparation logic which is the default for nodes in active state, which is realistically exactly what is desired. Validation behavior ultimately remains unchanged and tests were added to validate that the expected methods are called. Additionally, it was identified that it would be ideal to encourage the user to set the node to local boot, and as such the documentation was updated as part of this change, coupled with a note mentioning changes in API version 1.20 that a user may wish to leverage. Change-Id: Id6053e0fa68deb431f4543005421982c795401f2 Closes-Bug: #1605239 --- doc/source/deploy/adoption.rst | 14 +++++++++++++- ironic/drivers/modules/agent.py | 2 +- ironic/drivers/modules/iscsi_deploy.py | 2 +- .../tests/unit/drivers/modules/test_agent.py | 19 +++++++++++++++++++ .../unit/drivers/modules/test_iscsi_deploy.py | 14 ++++++++++++++ ...ption-feature-update-d2160954a2c36b0a.yaml | 11 +++++++++++ 6 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/adoption-feature-update-d2160954a2c36b0a.yaml diff --git a/doc/source/deploy/adoption.rst b/doc/source/deploy/adoption.rst index aed8bd284..b3d769f0e 100644 --- a/doc/source/deploy/adoption.rst +++ b/doc/source/deploy/adoption.rst @@ -132,7 +132,8 @@ from the ``manageable`` state to ``active`` state.:: ironic port-create --node -a ironic node-update testnode add \ - instance_info/image_source="http://localhost:8080/blankimage" + instance_info/image_source="http://localhost:8080/blankimage" \ + instance_info/capabilities="{\"boot_option\": \"local\"}" ironic node-set-provision-state testnode manage @@ -142,6 +143,11 @@ from the ``manageable`` state to ``active`` state.:: In the above example, the image_source setting must reference a valid image or file, however that image or file can ultimately be empty. +.. NOTE:: + The above example utilizes a capability that defines the boot operation + to be local. It is recommended to define the node as such unless network + booting is desired. + .. NOTE:: The above example will fail a re-deployment as a fake image is defined and no instance_info/image_checksum value is defined. @@ -156,6 +162,12 @@ from the ``manageable`` state to ``active`` state.:: ironic node-update add instance_uuid= +.. NOTE:: + In Newton, coupled with API version 1.20, the concept of a + network_interface was introduced. A user of this feature may wish to + add new nodes with a network_interface of ``noop`` and then change + the interface at a later point and time. + Troubleshooting =============== diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 2cf56809f..1e55a8495 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -331,7 +331,7 @@ class AgentDeploy(base.DeployInterface): # options get added for the provisioning port. manager_utils.node_power_action(task, states.POWER_OFF) task.driver.network.add_provisioning_network(task) - if node.provision_state != states.ACTIVE: + if node.provision_state not in [states.ACTIVE, states.ADOPTING]: node.instance_info = build_instance_info_for_deploy(task) node.save() if CONF.agent.manage_agent_boot: diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 957368eb3..a049f3f2c 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -484,7 +484,7 @@ class ISCSIDeploy(base.DeployInterface): :raises: any boot interface's prepare_ramdisk exceptions. """ node = task.node - if node.provision_state == states.ACTIVE: + if node.provision_state in [states.ACTIVE, states.ADOPTING]: task.driver.boot.prepare_instance(task) else: if node.provision_state == states.DEPLOYING: diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py index e2b647b89..81e34448c 100644 --- a/ironic/tests/unit/drivers/modules/test_agent.py +++ b/ironic/tests/unit/drivers/modules/test_agent.py @@ -515,6 +515,25 @@ class TestAgentDeploy(db_base.DbTestCase): self.assertFalse(pxe_prepare_ramdisk_mock.called) self.assertFalse(add_provisioning_net_mock.called) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + @mock.patch.object(deploy_utils, 'build_agent_options') + @mock.patch.object(agent, 'build_instance_info_for_deploy') + def test_prepare_adopting( + self, build_instance_info_mock, build_options_mock, + pxe_prepare_ramdisk_mock, add_provisioning_net_mock): + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.ADOPTING + + self.driver.prepare(task) + + self.assertFalse(build_instance_info_mock.called) + self.assertFalse(build_options_mock.called) + self.assertFalse(pxe_prepare_ramdisk_mock.called) + self.assertFalse(add_provisioning_net_mock.called) + @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk') diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index a4f6d9991..64b38e063 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -541,6 +541,20 @@ class ISCSIDeployTestCase(db_base.DbTestCase): task.driver.boot, task) self.assertEqual(0, add_provisioning_net_mock.call_count) + @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' + 'add_provisioning_network', spec_set=True, autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) + def test_prepare_node_adopting(self, prepare_instance_mock, + add_provisioning_net_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + task.node.provision_state = states.ADOPTING + + task.driver.deploy.prepare(task) + + prepare_instance_mock.assert_called_once_with( + task.driver.boot, task) + self.assertEqual(0, add_provisioning_net_mock.call_count) + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' diff --git a/releasenotes/notes/adoption-feature-update-d2160954a2c36b0a.yaml b/releasenotes/notes/adoption-feature-update-d2160954a2c36b0a.yaml new file mode 100644 index 000000000..bcc5247c9 --- /dev/null +++ b/releasenotes/notes/adoption-feature-update-d2160954a2c36b0a.yaml @@ -0,0 +1,11 @@ +--- +fixes: + - Adoption feature logic was updated to prevent ramdisk + creation and default to instance creation where appropriate + based on the driver. + - Adoption documentation has been updated to note that the + boot_option should likely be defined for nodes by a user + leveraging the feature. + - Adoption documentation has been updated to note that a user + may wish to utilize the ``noop`` network interface that + arrived with API version 1.20. From 7a0d22c0fd70408f6a2ba94bf2af7d6e652c1ca3 Mon Sep 17 00:00:00 2001 From: Varun Gadiraju Date: Wed, 20 Jul 2016 15:23:59 +0000 Subject: [PATCH 100/166] Added note to local.conf addressing firewall/proxy blocking Git protocol local.conf file for deploying ironic with devstack uses Git protocol to enable ironic plugin. Git protocol requires access to port 9418 which corporate firewalls commonly block. Added a note to the local.conf file informing users who face this problem how to modify the local.conf file to use https:// instead of git:// so ironic plugin will be enabled. Change-Id: Ia0ab1c23cf1e6979eb7e03bb9746aa94dd3f0257 Closes-Bug: #1604243 --- doc/source/dev/dev-quickstart.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index 21120d1ae..68782aaba 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -464,6 +464,17 @@ and uses the ``agent_ipmitool`` driver by default:: END +.. note:: + Git protocol requires access to port 9418, which is not a standard port that + corporate firewalls always allow. If you are behind a firewall or on a proxy that + blocks Git protocol, modify the ``enable_plugin`` line to use ``https://`` instead + of ``git://`` and add ``GIT_BASE=https://git.openstack.org`` to the credentials:: + + GIT_BASE=https://git.openstack.org + + # Enable Ironic plugin + enable_plugin ironic https://git.openstack.org/openstack/ironic + .. note:: The agent_ssh and pxe_ssh drivers are being deprecated in favor of the more production-like agent_ipmitool and pxe_ipmitool drivers. When a From 2de6e17cf202b01e7134ffd94a16d0ed4a69745b Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Thu, 21 Apr 2016 14:33:26 -0700 Subject: [PATCH 101/166] Centralize config options - [api] Nova style refactor of config options in Ironic. Change-Id: I67ebeca9a4efb6065d2b4f0a7bcf5830b0cc5e3f Partial-Bug: #1561100 --- ironic/api/__init__.py | 57 --------------------------------------- ironic/conf/__init__.py | 2 ++ ironic/conf/api.py | 60 +++++++++++++++++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +-- 4 files changed, 63 insertions(+), 59 deletions(-) create mode 100644 ironic/conf/api.py diff --git a/ironic/api/__init__.py b/ironic/api/__init__.py index eba90ad5b..e69de29bb 100644 --- a/ironic/api/__init__.py +++ b/ironic/api/__init__.py @@ -1,57 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from ironic.common.i18n import _ - -API_SERVICE_OPTS = [ - cfg.StrOpt('host_ip', - default='0.0.0.0', - help=_('The IP address on which ironic-api listens.')), - cfg.PortOpt('port', - default=6385, - help=_('The TCP port on which ironic-api listens.')), - cfg.IntOpt('max_limit', - default=1000, - help=_('The maximum number of items returned in a single ' - 'response from a collection resource.')), - cfg.StrOpt('public_endpoint', - help=_("Public URL to use when building the links to the API " - "resources (for example, \"https://ironic.rocks:6384\")." - " If None the links will be built using the request's " - "host URL. If the API is operating behind a proxy, you " - "will want to change this to represent the proxy's URL. " - "Defaults to None.")), - cfg.IntOpt('api_workers', - help=_('Number of workers for OpenStack Ironic API service. ' - 'The default is equal to the number of CPUs available ' - 'if that can be determined, else a default worker ' - 'count of 1 is returned.')), - cfg.BoolOpt('enable_ssl_api', - default=False, - help=_("Enable the integrated stand-alone API to service " - "requests via HTTPS instead of HTTP. If there is a " - "front-end service performing HTTPS offloading from " - "the service, this option should be False; note, you " - "will want to change public API endpoint to represent " - "SSL termination URL with 'public_endpoint' option.")), -] - -CONF = cfg.CONF -opt_group = cfg.OptGroup(name='api', - title='Options for the ironic-api service') -CONF.register_group(opt_group) -CONF.register_opts(API_SERVICE_OPTS, opt_group) diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index d172c9205..df6a81ac9 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -15,6 +15,7 @@ from oslo_config import cfg +from ironic.conf import api from ironic.conf import cimc from ironic.conf import cisco_ucs from ironic.conf import conductor @@ -39,6 +40,7 @@ from ironic.conf import virtualbox CONF = cfg.CONF +api.register_opts(CONF) cimc.register_opts(CONF) cisco_ucs.register_opts(CONF) conductor.register_opts(CONF) diff --git a/ironic/conf/api.py b/ironic/conf/api.py new file mode 100644 index 000000000..7ec6f36c2 --- /dev/null +++ b/ironic/conf/api.py @@ -0,0 +1,60 @@ +# Copyright 2016 Intel Corporation +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.StrOpt('host_ip', + default='0.0.0.0', + help=_('The IP address on which ironic-api listens.')), + cfg.PortOpt('port', + default=6385, + help=_('The TCP port on which ironic-api listens.')), + cfg.IntOpt('max_limit', + default=1000, + help=_('The maximum number of items returned in a single ' + 'response from a collection resource.')), + cfg.StrOpt('public_endpoint', + help=_("Public URL to use when building the links to the API " + "resources (for example, \"https://ironic.rocks:6384\")." + " If None the links will be built using the request's " + "host URL. If the API is operating behind a proxy, you " + "will want to change this to represent the proxy's URL. " + "Defaults to None.")), + cfg.IntOpt('api_workers', + help=_('Number of workers for OpenStack Ironic API service. ' + 'The default is equal to the number of CPUs available ' + 'if that can be determined, else a default worker ' + 'count of 1 is returned.')), + cfg.BoolOpt('enable_ssl_api', + default=False, + help=_("Enable the integrated stand-alone API to service " + "requests via HTTPS instead of HTTP. If there is a " + "front-end service performing HTTPS offloading from " + "the service, this option should be False; note, you " + "will want to change public API endpoint to represent " + "SSL termination URL with 'public_endpoint' option.")), +] + +opt_group = cfg.OptGroup(name='api', + title='Options for the ironic-api service') + + +def register_opts(conf): + conf.register_group(opt_group) + conf.register_opts(opts, group=opt_group) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 1be7cd349..9458a2bfe 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -12,7 +12,6 @@ import itertools -import ironic.api import ironic.drivers.modules.agent import ironic.drivers.modules.agent_base_vendor import ironic.drivers.modules.agent_client @@ -44,7 +43,7 @@ _opts = [ ('amt', itertools.chain( ironic.drivers.modules.amt.common.opts, ironic.drivers.modules.amt.power.opts)), - ('api', ironic.api.API_SERVICE_OPTS), + ('api', ironic.conf.api.opts), ('cimc', ironic.conf.cimc.opts), ('cisco_ucs', ironic.conf.cisco_ucs.opts), ('conductor', ironic.conf.conductor.opts), From a351fd6a4d68864ad3655f00c779ffad0be04cdd Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Thu, 21 Apr 2016 15:34:34 -0700 Subject: [PATCH 102/166] Centralize config options - [deploy] Nova style refactor of config options in Ironic. Change-Id: Ic303847415d5f3564510a8f1b1a40168b8a165f8 Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 + ironic/conf/deploy.py | 68 ++++++++++++++++++++++++++ ironic/conf/opts.py | 3 +- ironic/drivers/modules/deploy_utils.py | 48 +----------------- 4 files changed, 72 insertions(+), 49 deletions(-) create mode 100644 ironic/conf/deploy.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index df6a81ac9..4bcb97319 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -22,6 +22,7 @@ from ironic.conf import conductor from ironic.conf import console from ironic.conf import database from ironic.conf import default +from ironic.conf import deploy from ironic.conf import dhcp from ironic.conf import glance from ironic.conf import iboot @@ -47,6 +48,7 @@ conductor.register_opts(CONF) console.register_opts(CONF) database.register_opts(CONF) default.register_opts(CONF) +deploy.register_opts(CONF) dhcp.register_opts(CONF) glance.register_opts(CONF) iboot.register_opts(CONF) diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py new file mode 100644 index 000000000..d033ea23d --- /dev/null +++ b/ironic/conf/deploy.py @@ -0,0 +1,68 @@ +# Copyright 2016 Intel Corporation +# Copyright (c) 2012 NTT DOCOMO, INC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + + +opts = [ + cfg.StrOpt('http_url', + help=_("ironic-conductor node's HTTP server URL. " + "Example: http://192.1.2.3:8080")), + cfg.StrOpt('http_root', + default='/httpboot', + help=_("ironic-conductor node's HTTP root path.")), + cfg.IntOpt('erase_devices_priority', + help=_('Priority to run in-band erase devices via the Ironic ' + 'Python Agent ramdisk. If unset, will use the priority ' + 'set in the ramdisk (defaults to 10 for the ' + 'GenericHardwareManager). If set to 0, will not run ' + 'during cleaning.')), + # TODO(mmitchell): Remove the deprecated name/group during Ocata cycle. + cfg.IntOpt('shred_random_overwrite_iterations', + deprecated_name='erase_devices_iterations', + deprecated_group='deploy', + default=1, + min=0, + help=_('During shred, overwrite all block devices N times with ' + 'random data. This is only used if a device could not ' + 'be ATA Secure Erased. Defaults to 1.')), + cfg.BoolOpt('shred_final_overwrite_with_zeros', + default=True, + help=_("Whether to write zeros to a node's block devices " + "after writing random data. This will write zeros to " + "the device even when " + "deploy.shred_random_overwrite_interations is 0. This " + "option is only used if a device could not be ATA " + "Secure Erased. Defaults to True.")), + cfg.BoolOpt('continue_if_disk_secure_erase_fails', + default=False, + help=_('Defines what to do if an ATA secure erase operation ' + 'fails during cleaning in the Ironic Python Agent. ' + 'If False, the cleaning operation will fail and the ' + 'node will be put in ``clean failed`` state. ' + 'If True, shred will be invoked and cleaning will ' + 'continue.')), + cfg.BoolOpt('power_off_after_deploy_failure', + default=True, + help=_('Whether to power off a node after deploy failure. ' + 'Defaults to True.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='deploy') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 9458a2bfe..b1c105f48 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -17,7 +17,6 @@ import ironic.drivers.modules.agent_base_vendor import ironic.drivers.modules.agent_client import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power -import ironic.drivers.modules.deploy_utils import ironic.drivers.modules.iscsi_deploy import ironic.drivers.modules.pxe @@ -49,7 +48,7 @@ _opts = [ ('conductor', ironic.conf.conductor.opts), ('console', ironic.conf.console.opts), ('database', ironic.conf.database.opts), - ('deploy', ironic.drivers.modules.deploy_utils.deploy_opts), + ('deploy', ironic.conf.deploy.opts), ('dhcp', ironic.conf.dhcp.opts), ('glance', ironic.conf.glance.opts), ('iboot', ironic.conf.iboot.opts), diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 87aaf65de..e7456f582 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -21,7 +21,6 @@ import time from ironic_lib import disk_utils from oslo_concurrency import processutils -from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils @@ -40,58 +39,13 @@ from ironic.common import keystone from ironic.common import states from ironic.common import utils from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers.modules import agent_client from ironic.drivers.modules import image_cache from ironic.drivers import utils as driver_utils from ironic import objects -deploy_opts = [ - cfg.StrOpt('http_url', - help=_("ironic-conductor node's HTTP server URL. " - "Example: http://192.1.2.3:8080")), - cfg.StrOpt('http_root', - default='/httpboot', - help=_("ironic-conductor node's HTTP root path.")), - cfg.IntOpt('erase_devices_priority', - help=_('Priority to run in-band erase devices via the Ironic ' - 'Python Agent ramdisk. If unset, will use the priority ' - 'set in the ramdisk (defaults to 10 for the ' - 'GenericHardwareManager). If set to 0, will not run ' - 'during cleaning.')), - # TODO(mmitchell): Remove the deprecated name/group during Ocata cycle. - cfg.IntOpt('shred_random_overwrite_iterations', - deprecated_name='erase_devices_iterations', - deprecated_group='deploy', - default=1, - min=0, - help=_('During shred, overwrite all block devices N times with ' - 'random data. This is only used if a device could not ' - 'be ATA Secure Erased. Defaults to 1.')), - cfg.BoolOpt('shred_final_overwrite_with_zeros', - default=True, - help=_("Whether to write zeros to a node's block devices " - "after writing random data. This will write zeros to " - "the device even when " - "deploy.shred_random_overwrite_interations is 0. This " - "option is only used if a device could not be ATA " - "Secure Erased. Defaults to True.")), - cfg.BoolOpt('continue_if_disk_secure_erase_fails', - default=False, - help=_('Defines what to do if an ATA secure erase operation ' - 'fails during cleaning in the Ironic Python Agent. ' - 'If False, the cleaning operation will fail and the ' - 'node will be put in ``clean failed`` state. ' - 'If True, shred will be invoked and cleaning will ' - 'continue.')), - cfg.BoolOpt('power_off_after_deploy_failure', - default=True, - help=_('Whether to power off a node after deploy failure. ' - 'Defaults to True.')), -] -CONF = cfg.CONF -CONF.register_opts(deploy_opts, group='deploy') - # TODO(Faizan): Move this logic to common/utils.py and deprecate # rootwrap_config. # This is required to set the default value of ironic_lib option From b90f3c3a37ca7ed6e8babfbbbfdfd1a6db41a7f5 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Fri, 22 Jul 2016 13:46:38 +0800 Subject: [PATCH 103/166] Trivial: Fix a trivial flake8 error There is a flake8 error in context.py: "D200 One-line docstring should fit on one line with quotes" This patch fixes it. Change-Id: I412a7c3ee2d16eb4c87e981831ae616c29378180 --- ironic/common/context.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ironic/common/context.py b/ironic/common/context.py index 63f24c967..ffe11bd66 100644 --- a/ironic/common/context.py +++ b/ironic/common/context.py @@ -92,9 +92,8 @@ class RequestContext(context.RequestContext): def get_admin_context(): - """Create an administrator context. + """Create an administrator context.""" - """ context = RequestContext(None, tenant=None, is_admin=True, From 4ae92b46f74344ac404e71ac65145f4835383a4a Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Sun, 24 Jul 2016 23:31:19 +0900 Subject: [PATCH 104/166] Remove unused code when failing to start console Processutils.ProcessExecutionError is never raised when it is unable to stop an old shellinabox process due to a fix[1][2]. Instead of the error, ConsoleError is raised. It would be better not to ignore the error and to fail to start a console for avoiding process leak. This patch removes a block for the exception which is never raised. [1]https://review.openstack.org/#/c/242319/ [2]https://review.openstack.org/#/c/244699/ Change-Id: Ia1226da652510c5655c9b8c270e6fc168efd935d --- ironic/drivers/modules/console_utils.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/ironic/drivers/modules/console_utils.py b/ironic/drivers/modules/console_utils.py index e7dde6709..799231232 100644 --- a/ironic/drivers/modules/console_utils.py +++ b/ironic/drivers/modules/console_utils.py @@ -27,7 +27,6 @@ import subprocess import time from ironic_lib import utils as ironic_utils -from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import netutils @@ -151,7 +150,8 @@ def start_shellinabox_console(node_uuid, port, console_cmd): :param node_uuid: the uuid for the node. :param port: the terminal port for the node. :param console_cmd: the shell command that gets the console. - :raises: ConsoleError if the directory for the PID file cannot be created. + :raises: ConsoleError if the directory for the PID file cannot be created + or an old process cannot be stopped. :raises: ConsoleSubprocessFailed when invoking the subprocess failed. """ @@ -161,11 +161,6 @@ def start_shellinabox_console(node_uuid, port, console_cmd): _stop_console(node_uuid) except exception.NoConsolePid: pass - except processutils.ProcessExecutionError as exc: - LOG.warning(_LW("Failed to kill the old console process " - "before starting a new shellinabox console " - "for node %(node)s. Reason: %(err)s"), - {'node': node_uuid, 'err': exc}) _ensure_console_pid_dir_exists() pid_file = _get_console_pid_file(node_uuid) From cd88db1f7eb04ebfa6528a80be13027b201e4587 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Mon, 25 Jul 2016 10:49:25 +0800 Subject: [PATCH 105/166] Use assertEqual() instead of assertDictEqual() In unittest2, assertDictEqual() is implemented by using != operator to compare two dicts. So is assertEqual() in testtools. assertEqual() in testtools is able to handle dict, list, set and so on. So we just call assertEqual() to make the unit tests simpler. Change-Id: I862fcc3665ed7efc17ce92afd8224134e0124f6e --- ironic/tests/unit/common/test_glance_service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ironic/tests/unit/common/test_glance_service.py b/ironic/tests/unit/common/test_glance_service.py index 0490c05e2..5cdc5bb31 100644 --- a/ironic/tests/unit/common/test_glance_service.py +++ b/ironic/tests/unit/common/test_glance_service.py @@ -167,10 +167,10 @@ class TestGlanceImageService(base.TestCase): 'owner': None, } - self.assertDictEqual(expected, image_meta) + self.assertEqual(expected, image_meta) image_metas = self.service.detail() - self.assertDictEqual(expected, image_metas[0]) + self.assertEqual(expected, image_metas[0]) def test_create_without_instance_id(self): """Test creating an image without an instance ID. @@ -201,7 +201,7 @@ class TestGlanceImageService(base.TestCase): 'owner': None, } actual = self.service.show(image_id) - self.assertDictEqual(expected, actual) + self.assertEqual(expected, actual) def test_create(self): fixture = self._make_fixture(name='test image') @@ -271,7 +271,7 @@ class TestGlanceImageService(base.TestCase): 'owner': None, } - self.assertDictEqual(expected, meta) + self.assertEqual(expected, meta) i = i + 1 def test_detail_limit(self): @@ -327,7 +327,7 @@ class TestGlanceImageService(base.TestCase): 'deleted': None, 'owner': None, } - self.assertDictEqual(expected, meta) + self.assertEqual(expected, meta) i = i + 1 def test_detail_invalid_marker(self): From 857372a2269cdd0f8a1ae5b9e9f6e0ee193f01be Mon Sep 17 00:00:00 2001 From: Yuiko Takada Mori Date: Thu, 14 Jul 2016 11:17:24 +0900 Subject: [PATCH 106/166] IPMITool: add IPMISocatConsole and IPMIConsole class IPMISocatConsole is a new console interface class using ipmitool and socat. It has the same APIs of IPMIShellinaboxConsole class and provides TCP4/TCP6 connection service to connect serial-on-LAN of nodes. IPMIConsole is a new console interface class using ipmitool and used as parent class of IPMIShellinaboxConsole and IPMISocatConsol class. This patch set implements new console driver interfaces IPMISocatConsole. To use PXE + IPMItool + socat, specify pxe_ipmitool_socat. To use IPA + IPMItool + socat, specify agent_ipmitool_socat. Spec: https://review.openstack.org/#/c/319505/ Partial-Bug: #1553083 Change-Id: I35a7dcb7e89baf16d096501fd44dbc12adc8c61e --- ironic/drivers/agent.py | 19 + ironic/drivers/fake.py | 11 + ironic/drivers/modules/ipmitool.py | 92 +++- ironic/drivers/pxe.py | 19 + .../unit/drivers/modules/test_ipmitool.py | 402 ++++++++++++++---- ironic/tests/unit/drivers/test_agent.py | 37 ++ ironic/tests/unit/drivers/test_pxe.py | 13 + ...cat-console-ipmitool-ab4402ec976c5c96.yaml | 5 + setup.cfg | 3 + 9 files changed, 498 insertions(+), 103 deletions(-) create mode 100644 releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml diff --git a/ironic/drivers/agent.py b/ironic/drivers/agent.py index 56f3d5744..1b1a00056 100644 --- a/ironic/drivers/agent.py +++ b/ironic/drivers/agent.py @@ -66,6 +66,25 @@ class AgentAndIPMIToolDriver(base.BaseDriver): 'AgentAndIPMIToolDriver') +class AgentAndIPMIToolAndSocatDriver(AgentAndIPMIToolDriver): + """Agent + IPMITool + socat driver. + + This driver implements the `core` functionality, combining + :class:`ironic.drivers.modules.ipmitool.IPMIPower` (for power on/off and + reboot) with :class:`ironic.drivers.modules.agent.AgentDeploy` (for + image deployment) and with + :class:`ironic.drivers.modules.ipmitool.IPMISocatConsole`. + This driver uses the socat console interface instead of the shellinabox + one. + Implementations are in those respective classes; this class is merely the + glue between them. + """ + + def __init__(self): + AgentAndIPMIToolDriver.__init__(self) + self.console = ipmitool.IPMISocatConsole() + + class AgentAndIPMINativeDriver(base.BaseDriver): """Agent + IPMINative driver. diff --git a/ironic/drivers/fake.py b/ironic/drivers/fake.py index 88ccefd61..46912cd41 100644 --- a/ironic/drivers/fake.py +++ b/ironic/drivers/fake.py @@ -90,6 +90,17 @@ class FakeIPMIToolDriver(base.BaseDriver): self.management = ipmitool.IPMIManagement() +class FakeIPMIToolSocatDriver(base.BaseDriver): + """Example implementation of a Driver.""" + + def __init__(self): + self.power = ipmitool.IPMIPower() + self.console = ipmitool.IPMISocatConsole() + self.deploy = fake.FakeDeploy() + self.vendor = ipmitool.VendorPassthru() + self.management = ipmitool.IPMIManagement() + + class FakePXEDriver(base.BaseDriver): """Example implementation of a Driver.""" diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py index cef19d25b..173c56acf 100644 --- a/ironic/drivers/modules/ipmitool.py +++ b/ironic/drivers/modules/ipmitool.py @@ -355,11 +355,12 @@ def _parse_driver_info(node): } -def _exec_ipmitool(driver_info, command): +def _exec_ipmitool(driver_info, command, check_exit_code=None): """Execute the ipmitool command. :param driver_info: the ipmitool parameters for accessing a node. :param command: the ipmitool command to be executed. + :param check_exit_code: Single bool, int, or list of allowed exit codes. :returns: (stdout, stderr) from executing the command. :raises: PasswordFileFailedToCreate from creating or writing to the temporary file. @@ -414,6 +415,9 @@ def _exec_ipmitool(driver_info, command): # Resetting the list that will be utilized so the password arguments # from any previous execution are preserved. cmd_args = args[:] + extra_args = {} + if check_exit_code is not None: + extra_args['check_exit_code'] = check_exit_code # 'ipmitool' command will prompt password if there is no '-f' # option, we set it to '\0' to write a password file to support # empty password @@ -422,7 +426,7 @@ def _exec_ipmitool(driver_info, command): cmd_args.append(pw_file) cmd_args.extend(command.split(" ")) try: - out, err = utils.execute(*cmd_args) + out, err = utils.execute(*cmd_args, **extra_args) return out, err except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception() as ctxt: @@ -1090,8 +1094,8 @@ class VendorPassthru(base.VendorInterface): _parse_driver_info(task.node) -class IPMIShellinaboxConsole(base.ConsoleInterface): - """A ConsoleInterface that uses ipmitool and shellinabox.""" +class IPMIConsole(base.ConsoleInterface): + """A base ConsoleInterface that uses ipmitool.""" def __init__(self): try: @@ -1128,10 +1132,11 @@ class IPMIShellinaboxConsole(base.ConsoleInterface): "Check the 'ipmi_protocol_version' parameter in " "node's driver_info")) - def start_console(self, task): + def _start_console(self, driver_info, start_method): """Start a remote console for the node. :param task: a task from TaskManager + :param start_method: console_utils method to start console :raises: InvalidParameterValue if required ipmi parameters are missing :raises: PasswordFileFailedToCreate if unable to create a file containing the password @@ -1139,8 +1144,6 @@ class IPMIShellinaboxConsole(base.ConsoleInterface): created :raises: ConsoleSubprocessFailed when invoking the subprocess failed """ - driver_info = _parse_driver_info(task.node) - path = _console_pwfile_path(driver_info['uuid']) pw_file = console_utils.make_persistent_password_file( path, driver_info['password'] or '\0') @@ -1162,13 +1165,30 @@ class IPMIShellinaboxConsole(base.ConsoleInterface): ipmi_cmd += " -v" ipmi_cmd += " sol activate" try: - console_utils.start_shellinabox_console(driver_info['uuid'], - driver_info['port'], - ipmi_cmd) + start_method(driver_info['uuid'], driver_info['port'], ipmi_cmd) except (exception.ConsoleError, exception.ConsoleSubprocessFailed): with excutils.save_and_reraise_exception(): ironic_utils.unlink_without_raise(path) + +class IPMIShellinaboxConsole(IPMIConsole): + """A ConsoleInterface that uses ipmitool and shellinabox.""" + + def start_console(self, task): + """Start a remote console for the node. + + :param task: a task from TaskManager + :raises: InvalidParameterValue if required ipmi parameters are missing + :raises: PasswordFileFailedToCreate if unable to create a file + containing the password + :raises: ConsoleError if the directory for the PID file cannot be + created + :raises: ConsoleSubprocessFailed when invoking the subprocess failed + """ + driver_info = _parse_driver_info(task.node) + self._start_console(driver_info, + console_utils.start_shellinabox_console) + def stop_console(self, task): """Stop the remote console session for the node. @@ -1186,3 +1206,55 @@ class IPMIShellinaboxConsole(base.ConsoleInterface): driver_info = _parse_driver_info(task.node) url = console_utils.get_shellinabox_console_url(driver_info['port']) return {'type': 'shellinabox', 'url': url} + + +class IPMISocatConsole(IPMIConsole): + """A ConsoleInterface that uses ipmitool and socat.""" + + def start_console(self, task): + """Start a remote console for the node. + + :param task: a task from TaskManager + :raises: InvalidParameterValue if required ipmi parameters are missing + :raises: PasswordFileFailedToCreate if unable to create a file + containing the password + :raises: ConsoleError if the directory for the PID file cannot be + created + :raises: ConsoleSubprocessFailed when invoking the subprocess failed + """ + driver_info = _parse_driver_info(task.node) + try: + self._exec_stop_console(driver_info) + except OSError: + # We need to drop any existing sol sessions with sol deactivate. + # OSError is raised when sol session is deactive, so we can + # ignore it. + pass + self._start_console(driver_info, console_utils.start_socat_console) + + def stop_console(self, task): + """Stop the remote console session for the node. + + :param task: a task from TaskManager + :raises: ConsoleError if unable to stop the console + """ + driver_info = _parse_driver_info(task.node) + try: + console_utils.stop_socat_console(task.node.uuid) + finally: + ironic_utils.unlink_without_raise( + _console_pwfile_path(task.node.uuid)) + self._exec_stop_console(driver_info) + + def _exec_stop_console(self, driver_info): + cmd = "sol deactivate" + _exec_ipmitool(driver_info, cmd, check_exit_code=[0, 1]) + + def get_console(self, task): + """Get the type and connection information about the console. + + :param task: a task from TaskManager + """ + driver_info = _parse_driver_info(task.node) + url = console_utils.get_socat_console_url(driver_info['port']) + return {'type': 'socat', 'url': url} diff --git a/ironic/drivers/pxe.py b/ironic/drivers/pxe.py index 3ec78c635..d37fee90c 100644 --- a/ironic/drivers/pxe.py +++ b/ironic/drivers/pxe.py @@ -85,6 +85,25 @@ class PXEAndIPMIToolDriver(base.BaseDriver): self.raid = agent.AgentRAID() +class PXEAndIPMIToolAndSocatDriver(PXEAndIPMIToolDriver): + """PXE + IPMITool + socat driver. + + This driver implements the `core` functionality, combining + :class:`ironic.drivers.modules.ipmi.IPMI` for power on/off + and reboot with + :class:`ironic.drivers.modules.iscsi_deploy.ISCSIDeploy` (for + image deployment) and with + :class:`ironic.drivers.modules.ipmitool.IPMISocatConsole`. + This driver uses the socat console interface instead of the shellinabox + one. + Implementations are in those respective + classes; this class is merely the glue between them. + """ + def __init__(self): + PXEAndIPMIToolDriver.__init__(self) + self.console = ipmitool.IPMISocatConsole() + + class PXEAndSSHDriver(base.BaseDriver): """PXE + SSH driver. diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py index aacc986f4..5fd3aee6b 100644 --- a/ironic/tests/unit/drivers/modules/test_ipmitool.py +++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py @@ -28,6 +28,7 @@ import tempfile import time import types +from ironic_lib import utils as ironic_utils import mock from oslo_concurrency import processutils from oslo_config import cfg @@ -108,7 +109,7 @@ class IPMIToolCheckInitTestCase(base.TestCase): ipmi.TMP_DIR_CHECKED = True ipmi.IPMIPower() mock_support.assert_called_with(mock.ANY) - self.assertEqual(0, mock_check_dir.call_count) + self.assertFalse(mock_check_dir.called) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @mock.patch.object(utils, 'check_dir', autospec=True) @@ -130,7 +131,7 @@ class IPMIToolCheckInitTestCase(base.TestCase): ipmi.IPMIManagement() mock_support.assert_called_with(mock.ANY) - self.assertEqual(0, mock_check_dir.call_count) + self.assertFalse(mock_check_dir.called) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @mock.patch.object(utils, 'check_dir', autospec=True) @@ -150,7 +151,7 @@ class IPMIToolCheckInitTestCase(base.TestCase): ipmi.TMP_DIR_CHECKED = True ipmi.VendorPassthru() mock_support.assert_called_with(mock.ANY) - self.assertEqual(0, mock_check_dir.call_count) + self.assertFalse(mock_check_dir.called) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @mock.patch.object(utils, 'check_dir', autospec=True) @@ -170,7 +171,29 @@ class IPMIToolCheckInitTestCase(base.TestCase): ipmi.TMP_DIR_CHECKED = True ipmi.IPMIShellinaboxConsole() mock_support.assert_called_with(mock.ANY) - self.assertEqual(0, mock_check_dir.call_count) + self.assertFalse(mock_check_dir.called) + + @mock.patch.object(ipmi, '_is_option_supported', autospec=True) + @mock.patch.object(utils, 'check_dir', autospec=True) + def test_console_init_calls_for_socat(self, mock_check_dir, mock_support): + with mock.patch.object(ipmi, 'TMP_DIR_CHECKED'): + mock_support.return_value = True + ipmi.TMP_DIR_CHECKED = None + ipmi.IPMISocatConsole() + mock_support.assert_called_with(mock.ANY) + mock_check_dir.assert_called_once_with() + + @mock.patch.object(ipmi, '_is_option_supported', autospec=True) + @mock.patch.object(utils, 'check_dir', autospec=True) + def test_console_init_calls_for_socat_already_checked(self, + mock_check_dir, + mock_support): + with mock.patch.object(ipmi, 'TMP_DIR_CHECKED'): + mock_support.return_value = True + ipmi.TMP_DIR_CHECKED = True + ipmi.IPMISocatConsole() + mock_support.assert_called_with(mock.ANY) + self.assertFalse(mock_check_dir.call_count) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @@ -977,14 +1000,14 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): @mock.patch.object(utils, 'execute', autospec=True) def test__exec_ipmitool_with_single_bridging(self, mock_exec, - mock_support, - mock_sleep): + mock_pass, + mock_support): single_bridge_info = dict(BRIDGE_INFO_DICT) single_bridge_info['ipmi_bridging'] = 'single' node = obj_utils.get_test_node(self.context, driver='fake_ipmitool', driver_info=single_bridge_info) # when support for single bridge command is called returns True - mock_support.return_value = True + mock_pass.return_value = True info = ipmi._parse_driver_info(node) info['transit_channel'] = info['transit_address'] = None @@ -1004,17 +1027,17 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): expected = [mock.call('single_bridge'), mock.call('timing')] # When support for timing command is called returns False - mock_support.return_value = False + mock_pass.return_value = False mock_exec.return_value = (None, None) ipmi._exec_ipmitool(info, 'A B C') - self.assertEqual(expected, mock_support.call_args_list) + self.assertEqual(expected, mock_pass.call_args_list) mock_exec.assert_called_once_with(*args) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub) @mock.patch.object(utils, 'execute', autospec=True) def test__exec_ipmitool_exception( - self, mock_exec, mock_support, mock_sleep): + self, mock_exec, mock_pass, mock_support): args = [ 'ipmitool', '-I', 'lanplus', @@ -1025,12 +1048,12 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): 'A', 'B', 'C', ] - mock_support.return_value = False + mock_pass.return_value = False mock_exec.side_effect = processutils.ProcessExecutionError("x") self.assertRaises(processutils.ProcessExecutionError, ipmi._exec_ipmitool, self.info, 'A B C') - mock_support.assert_called_once_with('timing') + mock_pass.assert_called_once_with('timing') mock_exec.assert_called_once_with(*args) self.assertEqual(1, mock_exec.call_count) @@ -1117,7 +1140,7 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub) @mock.patch.object(utils, 'execute', autospec=True) def test__exec_ipmitool_IPMI_version_1_5( - self, mock_exec, mock_support, mock_sleep): + self, mock_exec, mock_pass, mock_support): self.info['protocol_version'] = '1.5' # Assert it uses "-I lan" (1.5) instead of "-I lanplus" (2.0) args = [ @@ -1130,17 +1153,17 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): 'A', 'B', 'C', ] - mock_support.return_value = False + mock_pass.return_value = False mock_exec.return_value = (None, None) ipmi._exec_ipmitool(self.info, 'A B C') - mock_support.assert_called_once_with('timing') + mock_pass.assert_called_once_with('timing') mock_exec.assert_called_once_with(*args) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub) @mock.patch.object(utils, 'execute', autospec=True) - def test__exec_ipmitool_with_port(self, mock_exec, mock_support, - mock_sleep): + def test__exec_ipmitool_with_port(self, mock_exec, mock_pass, + mock_support): self.info['dest_port'] = '1623' ipmi.LAST_CMD_TIME = {} args = [ @@ -1154,14 +1177,34 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): 'A', 'B', 'C', ] - mock_support.return_value = False + mock_pass.return_value = False mock_exec.return_value = (None, None) ipmi._exec_ipmitool(self.info, 'A B C') - mock_support.assert_called_once_with('timing') + mock_pass.assert_called_once_with('timing') mock_exec.assert_called_once_with(*args) - self.assertFalse(mock_sleep.called) + self.assertFalse(mock_support.called) + + @mock.patch.object(ipmi, '_is_option_supported', autospec=True) + @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub) + @mock.patch.object(utils, 'execute', autospec=True) + def test__exec_ipmitool_with_check_exit_code(self, mock_exec, + mock_pass, mock_support): + args = [ + 'ipmitool', + '-I', 'lanplus', + '-H', self.info['address'], + '-L', self.info['priv_level'], + '-U', self.info['username'], + '-f', awesome_password_filename, + 'A', 'B', 'C', + ] + mock_pass.return_value = False + mock_exec.return_value = (None, None) + ipmi._exec_ipmitool(self.info, 'A B C', check_exit_code=[0, 1]) + mock_pass.assert_called_once_with('timing') + mock_exec.assert_called_once_with(*args, check_exit_code=[0, 1]) @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test__power_status_on(self, mock_exec, mock_sleep): @@ -1222,13 +1265,18 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase): class IPMIToolDriverTestCase(db_base.DbTestCase): - def setUp(self): + def setUp(self, terminal=None): super(IPMIToolDriverTestCase, self).setUp() - mgr_utils.mock_the_extension_manager(driver="fake_ipmitool") - self.driver = driver_factory.get_driver("fake_ipmitool") + if terminal is None: + self.driver_name = "fake_ipmitool" + else: + self.driver_name = "fake_ipmitool_socat" + + mgr_utils.mock_the_extension_manager(driver=self.driver_name) + self.driver = driver_factory.get_driver(self.driver_name) self.node = obj_utils.create_test_node(self.context, - driver='fake_ipmitool', + driver=self.driver_name, driver_info=INFO_DICT) self.info = ipmi._parse_driver_info(self.node) @@ -1290,7 +1338,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_on.return_value = states.POWER_ON with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.power.set_power_state(task, states.POWER_ON) @@ -1306,7 +1354,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_on.return_value = states.POWER_ON with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.power.set_power_state(task, states.POWER_ON) mock_next_boot.assert_called_once_with(task, self.info) @@ -1322,7 +1370,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_off.return_value = states.POWER_OFF with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.power.set_power_state(task, states.POWER_OFF) @@ -1336,7 +1384,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_on.return_value = states.ERROR with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.assertRaises(exception.PowerStateFailure, self.driver.power.set_power_state, task, @@ -1346,7 +1394,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): self.assertFalse(mock_off.called) def test_set_power_invalid_state(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.InvalidParameterValue, self.driver.power.set_power_state, task, @@ -1357,7 +1405,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_exec.return_value = [None, None] with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.vendor.send_raw(task, http_method='POST', raw_bytes='0x00 0x01') @@ -1368,7 +1416,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_exec.side_effect = exception.PasswordFileFailedToCreate('error') with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.assertRaises(exception.IPMIFailure, self.driver.vendor.send_raw, task, @@ -1380,7 +1428,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_exec.return_value = [None, None] with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.vendor.bmc_reset(task, 'POST') mock_exec.assert_called_once_with(self.info, 'bmc reset warm') @@ -1390,7 +1438,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_exec.return_value = [None, None] with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.vendor.bmc_reset(task, 'POST', warm=False) mock_exec.assert_called_once_with(self.info, 'bmc reset cold') @@ -1400,7 +1448,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock_exec.side_effect = processutils.ProcessExecutionError() with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.assertRaises(exception.IPMIFailure, self.driver.vendor.bmc_reset, task, 'POST') @@ -1418,7 +1466,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock.call.power_on(self.info)] with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.power.reboot(task) mock_next_boot.assert_called_once_with(task, self.info) @@ -1436,7 +1484,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): mock.call.power_on(self.info)] with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.assertRaises(exception.PowerStateFailure, self.driver.power.reboot, task) @@ -1446,28 +1494,28 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(ipmi, '_parse_driver_info', autospec=True) def test_vendor_passthru_validate__parse_driver_info_fail(self, info_mock): info_mock.side_effect = exception.InvalidParameterValue("bad") - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.InvalidParameterValue, self.driver.vendor.validate, task, method='send_raw', raw_bytes='0x00 0x01') info_mock.assert_called_once_with(task.node) def test_vendor_passthru_validate__send_raw_bytes_good(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.driver.vendor.validate(task, method='send_raw', http_method='POST', raw_bytes='0x00 0x01') def test_vendor_passthru_validate__send_raw_bytes_fail(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.MissingParameterValue, self.driver.vendor.validate, task, method='send_raw') @mock.patch.object(ipmi.VendorPassthru, 'send_raw', autospec=True) def test_vendor_passthru_call_send_raw_bytes(self, raw_bytes_mock): - with task_manager.acquire(self.context, self.node['uuid'], + with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.driver.vendor.send_raw(task, http_method='POST', raw_bytes='0x00 0x01') @@ -1476,18 +1524,18 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): raw_bytes='0x00 0x01') def test_vendor_passthru_validate__bmc_reset_good(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.driver.vendor.validate(task, method='bmc_reset') def test_vendor_passthru_validate__bmc_reset_warm_good(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.driver.vendor.validate(task, method='bmc_reset', warm=True) def test_vendor_passthru_validate__bmc_reset_cold_good(self): - with task_manager.acquire(self.context, self.node['uuid']) as task: + with task_manager.acquire(self.context, self.node.uuid) as task: self.driver.vendor.validate(task, method='bmc_reset', warm=False) @@ -1496,7 +1544,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): def _vendor_passthru_call_bmc_reset(self, warm, expected, mock_exec): mock_exec.return_value = [None, None] - with task_manager.acquire(self.context, self.node['uuid'], + with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.driver.vendor.bmc_reset(task, 'POST', warm=warm) mock_exec.assert_called_once_with( @@ -1553,49 +1601,66 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): self.assertRaises(exception.InvalidParameterValue, task.driver.console.validate, task) - @mock.patch.object(console_utils, 'start_shellinabox_console', - autospec=True) - def test_start_console(self, mock_exec): - mock_exec.return_value = None - - with task_manager.acquire(self.context, - self.node['uuid']) as task: - self.driver.console.start_console(task) - - mock_exec.assert_called_once_with(self.info['uuid'], - self.info['port'], - mock.ANY) - self.assertTrue(mock_exec.called) - - @mock.patch.object(console_utils, 'start_shellinabox_console', - autospec=True) - def test_start_console_fail(self, mock_exec): - mock_exec.side_effect = exception.ConsoleSubprocessFailed( - error='error') - - with task_manager.acquire(self.context, - self.node['uuid']) as task: - self.assertRaises(exception.ConsoleSubprocessFailed, - self.driver.console.start_console, - task) - - @mock.patch.object(console_utils, 'start_shellinabox_console', - autospec=True) - def test_start_console_fail_nodir(self, mock_exec): - mock_exec.side_effect = exception.ConsoleError() + @mock.patch.object(ipmi.IPMIConsole, '_start_console', autospec=True) + def test_start_console(self, mock_start): + mock_start.return_value = None with task_manager.acquire(self.context, self.node.uuid) as task: + self.driver.console.start_console(task) + driver_info = ipmi._parse_driver_info(task.node) + mock_start.assert_called_once_with( + self.driver.console, driver_info, + console_utils.start_shellinabox_console) + + @mock.patch.object(console_utils, 'start_shellinabox_console', + autospec=True) + def test__start_console(self, mock_start): + mock_start.return_value = None + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console._start_console( + driver_info, console_utils.start_shellinabox_console) + + mock_start.assert_called_once_with(self.info['uuid'], + self.info['port'], + mock.ANY) + + @mock.patch.object(console_utils, 'start_shellinabox_console', + autospec=True) + def test__start_console_fail(self, mock_start): + mock_start.side_effect = exception.ConsoleSubprocessFailed( + error='error') + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.assertRaises(exception.ConsoleSubprocessFailed, + self.driver.console._start_console, + driver_info, + console_utils.start_shellinabox_console) + + @mock.patch.object(console_utils, 'start_shellinabox_console', + autospec=True) + def test__start_console_fail_nodir(self, mock_start): + mock_start.side_effect = exception.ConsoleError() + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) self.assertRaises(exception.ConsoleError, - self.driver.console.start_console, - task) - mock_exec.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY) + self.driver.console._start_console, + driver_info, + console_utils.start_shellinabox_console) + mock_start.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY) @mock.patch.object(console_utils, 'make_persistent_password_file', autospec=True) @mock.patch.object(console_utils, 'start_shellinabox_console', autospec=True) - def test_start_console_empty_password(self, mock_exec, mock_pass): + def test__start_console_empty_password(self, mock_start, mock_pass): driver_info = self.node.driver_info del driver_info['ipmi_password'] self.node.driver_info = driver_info @@ -1603,24 +1668,25 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): with task_manager.acquire(self.context, self.node.uuid) as task: - self.driver.console.start_console(task) + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console._start_console( + driver_info, console_utils.start_shellinabox_console) mock_pass.assert_called_once_with(mock.ANY, '\0') - mock_exec.assert_called_once_with(self.info['uuid'], - self.info['port'], - mock.ANY) + mock_start.assert_called_once_with(self.info['uuid'], + self.info['port'], + mock.ANY) @mock.patch.object(console_utils, 'stop_shellinabox_console', autospec=True) - def test_stop_console(self, mock_exec): - mock_exec.return_value = None + def test_stop_console(self, mock_stop): + mock_stop.return_value = None with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: self.driver.console.stop_console(task) - mock_exec.assert_called_once_with(self.info['uuid']) - self.assertTrue(mock_exec.called) + mock_stop.assert_called_once_with(self.info['uuid']) @mock.patch.object(console_utils, 'stop_shellinabox_console', autospec=True) @@ -1637,18 +1703,17 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): @mock.patch.object(console_utils, 'get_shellinabox_console_url', autospec=True) - def test_get_console(self, mock_exec): + def test_get_console(self, mock_get): url = 'http://localhost:4201' - mock_exec.return_value = url + mock_get.return_value = url expected = {'type': 'shellinabox', 'url': url} with task_manager.acquire(self.context, - self.node['uuid']) as task: + self.node.uuid) as task: console_info = self.driver.console.get_console(task) self.assertEqual(expected, console_info) - mock_exec.assert_called_once_with(self.info['port']) - self.assertTrue(mock_exec.called) + mock_get.assert_called_once_with(self.info['port']) @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_management_interface_set_boot_device_ok(self, mock_exec): @@ -1810,7 +1875,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): # Missing IPMI driver_info information node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), - driver='fake_ipmitool') + driver=self.driver_name) with task_manager.acquire(self.context, node.uuid) as task: self.assertRaises(exception.MissingParameterValue, task.driver.management.validate, task) @@ -2046,3 +2111,154 @@ class IPMIToolDriverTestCase(db_base.DbTestCase): ret = ipmi.send_raw(task, 'fake raw') self.assertEqual(fake_ret, ret) + + +class IPMIToolSocatDriverTestCase(IPMIToolDriverTestCase): + + def setUp(self): + super(IPMIToolSocatDriverTestCase, self).setUp(terminal="socat") + + @mock.patch.object(ipmi.IPMIConsole, '_start_console', autospec=True) + @mock.patch.object(ipmi.IPMISocatConsole, '_exec_stop_console', + autospec=True) + def test_start_console(self, mock_stop, mock_start): + mock_start.return_value = None + mock_stop.return_value = None + + with task_manager.acquire(self.context, + self.node.uuid) as task: + self.driver.console.start_console(task) + driver_info = ipmi._parse_driver_info(task.node) + mock_stop.assert_called_once_with(self.driver.console, driver_info) + mock_start.assert_called_once_with( + self.driver.console, driver_info, + console_utils.start_socat_console) + + @mock.patch.object(console_utils, 'start_socat_console', + autospec=True) + def test__start_console(self, mock_start): + mock_start.return_value = None + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console._start_console( + driver_info, console_utils.start_socat_console) + + mock_start.assert_called_once_with(self.info['uuid'], + self.info['port'], + mock.ANY) + + @mock.patch.object(console_utils, 'start_socat_console', + autospec=True) + def test__start_console_fail(self, mock_start): + mock_start.side_effect = exception.ConsoleSubprocessFailed( + error='error') + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.assertRaises(exception.ConsoleSubprocessFailed, + self.driver.console._start_console, + driver_info, + console_utils.start_socat_console) + + mock_start.assert_called_once_with(self.info['uuid'], + self.info['port'], + mock.ANY) + + @mock.patch.object(console_utils, 'start_socat_console', + autospec=True) + def test__start_console_fail_nodir(self, mock_start): + mock_start.side_effect = exception.ConsoleError() + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.assertRaises(exception.ConsoleError, + self.driver.console._start_console, + driver_info, + console_utils.start_socat_console) + mock_start.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY) + + @mock.patch.object(console_utils, 'make_persistent_password_file', + autospec=True) + @mock.patch.object(console_utils, 'start_socat_console', + autospec=True) + def test__start_console_empty_password(self, mock_start, mock_pass): + driver_info = self.node.driver_info + del driver_info['ipmi_password'] + self.node.driver_info = driver_info + self.node.save() + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console._start_console( + driver_info, console_utils.start_socat_console) + + mock_pass.assert_called_once_with(mock.ANY, '\0') + mock_start.assert_called_once_with(self.info['uuid'], + self.info['port'], + mock.ANY) + + @mock.patch.object(ipmi.IPMISocatConsole, '_exec_stop_console', + autospec=True) + @mock.patch.object(console_utils, 'stop_socat_console', + autospec=True) + def test_stop_console(self, mock_stop, mock_exec_stop): + mock_stop.return_value = None + + with task_manager.acquire(self.context, + self.node.uuid) as task: + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console.stop_console(task) + + mock_stop.assert_called_once_with(self.info['uuid']) + mock_exec_stop.assert_called_once_with(self.driver.console, + driver_info) + + @mock.patch.object(ipmi.IPMISocatConsole, '_exec_stop_console', + autospec=True) + @mock.patch.object(ironic_utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(console_utils, 'stop_socat_console', + autospec=True) + def test_stop_console_fail(self, mock_stop, mock_unlink, mock_exec_stop): + mock_stop.side_effect = exception.ConsoleError() + + with task_manager.acquire(self.context, + self.node.uuid) as task: + self.assertRaises(exception.ConsoleError, + self.driver.console.stop_console, + task) + + mock_stop.assert_called_once_with(self.node.uuid) + mock_unlink.assert_called_once_with( + ipmi._console_pwfile_path(self.node.uuid)) + self.assertFalse(mock_exec_stop.call_count) + + @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) + def test__exec_stop_console(self, mock_exec): + with task_manager.acquire(self.context, + self.node.uuid) as task: + + driver_info = ipmi._parse_driver_info(task.node) + self.driver.console._exec_stop_console(driver_info) + + mock_exec.assert_called_once_with( + driver_info, 'sol deactivate', check_exit_code=[0, 1]) + + @mock.patch.object(console_utils, 'get_socat_console_url', + autospec=True) + def test_get_console(self, mock_get_url): + url = 'tcp://localhost:4201' + mock_get_url.return_value = url + expected = {'type': 'socat', 'url': url} + + with task_manager.acquire(self.context, + self.node.uuid) as task: + console_info = self.driver.console.get_console(task) + + self.assertEqual(expected, console_info) + mock_get_url.assert_called_once_with(self.info['port']) diff --git a/ironic/tests/unit/drivers/test_agent.py b/ironic/tests/unit/drivers/test_agent.py index 1a1439282..23cdb8751 100644 --- a/ironic/tests/unit/drivers/test_agent.py +++ b/ironic/tests/unit/drivers/test_agent.py @@ -25,8 +25,45 @@ from ironic.drivers.modules import agent as agent_module from ironic.drivers.modules.amt import management as amt_management from ironic.drivers.modules.amt import power as amt_power from ironic.drivers.modules import iboot +from ironic.drivers.modules import ipmitool from ironic.drivers.modules import pxe from ironic.drivers.modules import wol +from ironic.drivers import utils +from ironic.tests import base + + +class AgentAndIPMIToolDriverTestCase(base.TestCase): + + def test___init__(self): + driver = agent.AgentAndIPMIToolDriver() + + self.assertIsInstance(driver.power, ipmitool.IPMIPower) + self.assertIsInstance(driver.console, ipmitool.IPMIShellinaboxConsole) + self.assertIsInstance(driver.boot, pxe.PXEBoot) + self.assertIsInstance(driver.deploy, agent_module.AgentDeploy) + self.assertIsInstance(driver.management, ipmitool.IPMIManagement) + self.assertIsInstance(driver.agent_vendor, + agent_module.AgentVendorInterface) + self.assertIsInstance(driver.ipmi_vendor, ipmitool.VendorPassthru) + self.assertIsInstance(driver.vendor, utils.MixinVendorInterface) + self.assertIsInstance(driver.raid, agent_module.AgentRAID) + + +class AgentAndIPMIToolAndSocatDriverTestCase(base.TestCase): + + def test___init__(self): + driver = agent.AgentAndIPMIToolAndSocatDriver() + + self.assertIsInstance(driver.power, ipmitool.IPMIPower) + self.assertIsInstance(driver.console, ipmitool.IPMISocatConsole) + self.assertIsInstance(driver.boot, pxe.PXEBoot) + self.assertIsInstance(driver.deploy, agent_module.AgentDeploy) + self.assertIsInstance(driver.management, ipmitool.IPMIManagement) + self.assertIsInstance(driver.agent_vendor, + agent_module.AgentVendorInterface) + self.assertIsInstance(driver.ipmi_vendor, ipmitool.VendorPassthru) + self.assertIsInstance(driver.vendor, utils.MixinVendorInterface) + self.assertIsInstance(driver.raid, agent_module.AgentRAID) class AgentAndAMTDriverTestCase(testtools.TestCase): diff --git a/ironic/tests/unit/drivers/test_pxe.py b/ironic/tests/unit/drivers/test_pxe.py index 1039e7a6b..13ec8144c 100644 --- a/ironic/tests/unit/drivers/test_pxe.py +++ b/ironic/tests/unit/drivers/test_pxe.py @@ -65,6 +65,19 @@ class PXEDriversTestCase(testtools.TestCase): self.assertIsInstance(driver.vendor, utils.MixinVendorInterface) self.assertIsInstance(driver.raid, agent.AgentRAID) + def test_pxe_ipmitool_socat_driver(self): + driver = pxe.PXEAndIPMIToolAndSocatDriver() + + self.assertIsInstance(driver.power, ipmitool.IPMIPower) + self.assertIsInstance(driver.console, ipmitool.IPMISocatConsole) + self.assertIsInstance(driver.boot, pxe_module.PXEBoot) + self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy) + self.assertIsInstance(driver.management, ipmitool.IPMIManagement) + self.assertIsNone(driver.inspect) + # TODO(rameshg87): Need better way of asserting the routes. + self.assertIsInstance(driver.vendor, utils.MixinVendorInterface) + self.assertIsInstance(driver.raid, agent.AgentRAID) + def test_pxe_ssh_driver(self): driver = pxe.PXEAndSSHDriver() diff --git a/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml b/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml new file mode 100644 index 000000000..3fbc66ab9 --- /dev/null +++ b/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml @@ -0,0 +1,5 @@ +--- +features: + - Adds support for socat-based serial console to ipmitool-based drivers. + These are available by using the agent_ipmitool_socat and + pxe_ipmitool_socat drivers. diff --git a/setup.cfg b/setup.cfg index becd3df3e..de639b38c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,6 +40,7 @@ ironic.drivers = agent_iboot = ironic.drivers.agent:AgentAndIBootDriver agent_ilo = ironic.drivers.ilo:IloVirtualMediaAgentDriver agent_ipmitool = ironic.drivers.agent:AgentAndIPMIToolDriver + agent_ipmitool_socat = ironic.drivers.agent:AgentAndIPMIToolAndSocatDriver agent_irmc = ironic.drivers.irmc:IRMCVirtualMediaAgentDriver agent_pxe_oneview = ironic.drivers.oneview:AgentPXEOneViewDriver agent_pyghmi = ironic.drivers.agent:AgentAndIPMINativeDriver @@ -51,6 +52,7 @@ ironic.drivers = fake_agent = ironic.drivers.fake:FakeAgentDriver fake_inspector = ironic.drivers.fake:FakeIPMIToolInspectorDriver fake_ipmitool = ironic.drivers.fake:FakeIPMIToolDriver + fake_ipmitool_socat = ironic.drivers.fake:FakeIPMIToolSocatDriver fake_ipminative = ironic.drivers.fake:FakeIPMINativeDriver fake_ssh = ironic.drivers.fake:FakeSSHDriver fake_pxe = ironic.drivers.fake:FakePXEDriver @@ -71,6 +73,7 @@ ironic.drivers = iscsi_irmc = ironic.drivers.irmc:IRMCVirtualMediaIscsiDriver iscsi_pxe_oneview = ironic.drivers.oneview:ISCSIPXEOneViewDriver pxe_ipmitool = ironic.drivers.pxe:PXEAndIPMIToolDriver + pxe_ipmitool_socat = ironic.drivers.pxe:PXEAndIPMIToolAndSocatDriver pxe_ipminative = ironic.drivers.pxe:PXEAndIPMINativeDriver pxe_ssh = ironic.drivers.pxe:PXEAndSSHDriver pxe_vbox = ironic.drivers.pxe:PXEAndVirtualBoxDriver From c5a738524264f76266fe9435856e425f8a5ece72 Mon Sep 17 00:00:00 2001 From: Ruby Loo Date: Mon, 25 Jul 2016 11:10:39 -0400 Subject: [PATCH 107/166] Use 'UUID', not 'uuid' in exception strings Use the grammatically correct 'UUID' (since it is an abbreviation) in exception strings. Change-Id: Ib840b649b6ba0a7ba8dc935be6f06b9d36bb87d7 --- ironic/common/exception.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ironic/common/exception.py b/ironic/common/exception.py index f93305d21..49a643eac 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -160,11 +160,11 @@ class DuplicateName(Conflict): class InvalidUUID(Invalid): - _msg_fmt = _("Expected a uuid but received %(uuid)s.") + _msg_fmt = _("Expected a UUID but received %(uuid)s.") class InvalidUuidOrName(Invalid): - _msg_fmt = _("Expected a logical name or uuid but received %(name)s.") + _msg_fmt = _("Expected a logical name or UUID but received %(name)s.") class InvalidName(Invalid): @@ -172,7 +172,7 @@ class InvalidName(Invalid): class InvalidIdentity(Invalid): - _msg_fmt = _("Expected an uuid or int but received %(identity)s.") + _msg_fmt = _("Expected a UUID or int but received %(identity)s.") class InvalidMAC(Invalid): From 7706af37e0f7077dd514da9e956fdba180534e66 Mon Sep 17 00:00:00 2001 From: Ruby Loo Date: Mon, 25 Jul 2016 20:37:17 -0400 Subject: [PATCH 108/166] Fix rendering for version 1.14 The description for version 1.14 is rendered incorrectly. A blank line is added so that the two bullets are rendered correctly. Change-Id: I52fcbf51dd4b42c832e3ae65aefdf8ddea405492 --- doc/source/webapi/v1.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index 0b4f929a9..f64fe4e44 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -65,6 +65,7 @@ API Versions History **1.14** Make the following endpoints discoverable via Ironic API: + * '/v1/nodes//states' * '/v1/drivers//properties' From 641ab43e3ee8445e1492e4d2aefb01a5a39c3f96 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Tue, 26 Jul 2016 18:00:02 +0300 Subject: [PATCH 109/166] Change comment regarding network_interface In order to be able to unittest older API versions without changing tests that do node post, let's only add network_interface field to the return value of node_post_data if it was explicitly requested. Change-Id: I6de525040ee83011d3ebaf3e6da714bd3cd30603 --- ironic/tests/unit/api/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ironic/tests/unit/api/utils.py b/ironic/tests/unit/api/utils.py index 2b1035171..c63ff0807 100644 --- a/ironic/tests/unit/api/utils.py +++ b/ironic/tests/unit/api/utils.py @@ -94,7 +94,9 @@ def node_post_data(**kw): node.pop('conductor_affinity') node.pop('chassis_id') node.pop('tags') - # TODO(vdrok): Remove popping network_interface when it's exposed in API + # NOTE(vdrok): network_interface was introduced in API version 1.20, return + # it only if it was explicitly requested, so that tests using older API + # versions don't fail if 'network_interface' not in kw: node.pop('network_interface') internal = node_controller.NodePatchType.internal_attrs() From 295b35c48f7dcc2983a19d4114254bea85239b3a Mon Sep 17 00:00:00 2001 From: Lokesh S Date: Tue, 26 Jan 2016 17:08:42 +0000 Subject: [PATCH 110/166] Add support for the audit middleware This adds support for the audit middleware to Ironic, allowing the middleware to send two notifications per API request, one for the request and another for the response. This adds an option to enable or disable audit middleware. Also to properly audit API requests passing conf options via audit map file. AuditMiddleware docs: http://docs.openstack.org/developer/keystonemiddleware/audit.html Co-Authored-By: Chris Krelle Closes-Bug: #1540232 Change-Id: I6de4751aa6b25e8457cae3eeab95a15f417662c5 --- doc/source/deploy/api-audit-support.rst | 110 ++++++++++++++++++ doc/source/index.rst | 1 + etc/ironic/ironic.conf.sample | 21 ++++ etc/ironic/ironic_api_audit_map.conf.sample | 29 +++++ ironic/api/app.py | 16 +++ ironic/common/exception.py | 4 + ironic/conf/__init__.py | 2 + ironic/conf/audit.py | 38 ++++++ ironic/conf/opts.py | 1 + ironic/tests/unit/api/test_audit.py | 59 ++++++++++ ...ing-audit-middleware-b95f2a00baed9750.yaml | 10 ++ 11 files changed, 291 insertions(+) create mode 100644 doc/source/deploy/api-audit-support.rst create mode 100644 etc/ironic/ironic_api_audit_map.conf.sample create mode 100644 ironic/conf/audit.py create mode 100644 ironic/tests/unit/api/test_audit.py create mode 100644 releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml diff --git a/doc/source/deploy/api-audit-support.rst b/doc/source/deploy/api-audit-support.rst new file mode 100644 index 000000000..a83204c91 --- /dev/null +++ b/doc/source/deploy/api-audit-support.rst @@ -0,0 +1,110 @@ +.. _api-audit-support: + +API Audit Logging +================= + +Audit middleware supports delivery of CADF audit events via Oslo messaging +notifier capability. Based on `notification_driver` configuration, audit events +can be routed to messaging infrastructure (notification_driver = messagingv2) +or can be routed to a log file (notification_driver = log). + +Audit middleware creates two events per REST API interaction. First event has +information extracted from request data and the second one has request outcome +(response). + +Enabling API Audit Logging +========================== + +Audit middleware is available as part of `keystonemiddleware` (>= 1.6) library. +For infomation regarding how audit middleware functions refer `here. +`_ + +Auditing can be enabled for the Bare Metal service by making the following changes +to ``/etc/ironic/ironic.conf``. + +#. To enable audit logging of API requests:: + + [audit] + ... + enabled=true + +#. To customize auditing API requests, the audit middleware requires the audit_map_file setting + to be defined. Update the value of configuration setting 'audit_map_file' to set its + location. Audit map file configuration options for the Bare Metal service are included + in the etc/ironic/ironic_api_audit_map.conf.sample file. To understand CADF format + specified in ironic_api_audit_map.conf file refer to `CADF Format. + `_:: + + [audit] + ... + audit_map_file=/etc/ironic/ironic_api_audit_map.conf + +#. Comma separated list of Ironic REST API HTTP methods to be ignored during audit. + For example: GET,POST. It is used only when API audit is enabled. + + [audit] + ... + ignore_req_list=GET,POST + +Sample Audit Event +================== + +Following is the sample of audit event for ironic node list request. + +.. code-block:: json + + { + "event_type":"audit.http.request", + "timestamp":"2016-06-15 06:04:30.904397", + "payload":{ + "typeURI":"http://schemas.dmtf.org/cloud/audit/1.0/event", + "eventTime":"2016-06-15T06:04:30.903071+0000", + "target":{ + "id":"ironic", + "typeURI":"unknown", + "addresses":[ + { + "url":"http://{ironic_admin_host}:6385", + "name":"admin" + }, + { + "url":"http://{ironic_internal_host}:6385", + "name":"private" + }, + { + "url":"http://{ironic_public_host}:6385", + "name":"public" + } + ], + "name":"ironic" + }, + "observer":{ + "id":"target" + }, + "tags":[ + "correlation_id?value=685f1abb-620e-5d5d-b74a-b4135fb32373" + ], + "eventType":"activity", + "initiator":{ + "typeURI":"service/security/account/user", + "name":"admin", + "credential":{ + "token":"***", + "identity_status":"Confirmed" + }, + "host":{ + "agent":"python-ironicclient", + "address":"10.1.200.129" + }, + "project_id":"d8f52dd7d9e1475dbbf3ba47a4a83313", + "id":"8c1a948bad3948929aa5d5b50627a174" + }, + "action":"read", + "outcome":"pending", + "id":"061b7aa7-5879-5225-a331-c002cf23cb6c", + "requestPath":"/v1/nodes/?associated=True" + }, + "priority":"INFO", + "publisher_id":"ironic-api", + "message_id":"2f61ebaa-2d3e-4023-afba-f9fca6f21fc2" + } diff --git a/doc/source/index.rst b/doc/source/index.rst index a970bbeea..12d2f54f9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -42,6 +42,7 @@ Administrator's Guide deploy/inspection deploy/security deploy/adoption + deploy/api-audit-support deploy/troubleshooting Release Notes Dashboard (horizon) plugin diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index fda45054a..cec6de3fa 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -487,6 +487,27 @@ #enable_ssl_api = false +[audit] + +# +# From ironic +# + +# Enable auditing of API requests (for ironic-api service). +# (boolean value) +#enabled = false + +# Path to audit map file for ironic-api service. Used only +# when API audit is enabled. (string value) +#audit_map_file = /etc/ironic/ironic_api_audit_map.conf + +# Comma separated list of Ironic REST API HTTP methods to be +# ignored during audit. For example: auditing will not be done +# on any GET or POST requests if this is set to "GET,POST". It +# is used only when API audit is enabled. (string value) +#ignore_req_list = + + [cimc] # diff --git a/etc/ironic/ironic_api_audit_map.conf.sample b/etc/ironic/ironic_api_audit_map.conf.sample new file mode 100644 index 000000000..a8076e2ab --- /dev/null +++ b/etc/ironic/ironic_api_audit_map.conf.sample @@ -0,0 +1,29 @@ +[DEFAULT] +# default target endpoint type +# should match the endpoint type defined in service catalog +target_endpoint_type = None + +# possible end path of API requests +# path of api requests for CADF target typeURI +# Just need to include top resource path to identify class +# of resources. Ex: Log audit event for API requests +# path containing "nodes" keyword and node uuid. +[path_keywords] +nodes = node +drivers = driver +chassis = chassis +ports = port +states = state +power = None +provision = None +maintenance = None +validate = None +boot_device = None +supported = None +console = None +vendor_passthrus = vendor_passthru + + +# map endpoint type defined in service catalog to CADF typeURI +[service_endpoints] +baremetal = service/compute/baremetal diff --git a/ironic/api/app.py b/ironic/api/app.py index f81b3e6c6..5621d9759 100644 --- a/ironic/api/app.py +++ b/ironic/api/app.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import keystonemiddleware.audit as audit_middleware +from keystonemiddleware.audit import PycadfAuditApiConfigError from oslo_config import cfg import oslo_middleware.cors as cors_middleware import pecan @@ -24,6 +26,7 @@ from ironic.api import config from ironic.api.controllers.base import Version from ironic.api import hooks from ironic.api import middleware +from ironic.common import exception from ironic.conf import CONF @@ -60,6 +63,19 @@ def setup_app(pecan_config=None, extra_hooks=None): wrap_app=middleware.ParsableErrorMiddleware, ) + if CONF.audit.enabled: + try: + app = audit_middleware.AuditMiddleware( + app, + audit_map_file=CONF.audit.audit_map_file, + ignore_req_list=CONF.audit.ignore_req_list + ) + except (EnvironmentError, OSError, PycadfAuditApiConfigError) as e: + raise exception.InputFileError( + file_name=CONF.audit.audit_map_file, + reason=e + ) + if pecan_config.app.enable_acl: app = acl.install(app, cfg.CONF, pecan_config.app.acl_public_routes) diff --git a/ironic/common/exception.py b/ironic/common/exception.py index f93305d21..cb8e6c631 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -255,6 +255,10 @@ class InstanceNotFound(NotFound): _msg_fmt = _("Instance %(instance)s could not be found.") +class InputFileError(IronicException): + _msg_fmt = _("Error with file %(file_name)s. Reason: %(reason)s") + + class NodeNotFound(NotFound): _msg_fmt = _("Node %(node)s could not be found.") diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 4bcb97319..e80854a92 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -16,6 +16,7 @@ from oslo_config import cfg from ironic.conf import api +from ironic.conf import audit from ironic.conf import cimc from ironic.conf import cisco_ucs from ironic.conf import conductor @@ -42,6 +43,7 @@ from ironic.conf import virtualbox CONF = cfg.CONF api.register_opts(CONF) +audit.register_opts(CONF) cimc.register_opts(CONF) cisco_ucs.register_opts(CONF) conductor.register_opts(CONF) diff --git a/ironic/conf/audit.py b/ironic/conf/audit.py new file mode 100644 index 000000000..5e1d4b5a0 --- /dev/null +++ b/ironic/conf/audit.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.BoolOpt('enabled', + default=False, + help=_('Enable auditing of API requests' + ' (for ironic-api service).')), + + cfg.StrOpt('audit_map_file', + default='/etc/ironic/ironic_api_audit_map.conf', + help=_('Path to audit map file for ironic-api service. ' + 'Used only when API audit is enabled.')), + + cfg.StrOpt('ignore_req_list', + help=_('Comma separated list of Ironic REST API HTTP methods ' + 'to be ignored during audit. For example: auditing ' + 'will not be done on any GET or POST requests ' + 'if this is set to "GET,POST". It is used ' + 'only when API audit is enabled.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='audit') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index b1c105f48..67a0bee3a 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -43,6 +43,7 @@ _opts = [ ironic.drivers.modules.amt.common.opts, ironic.drivers.modules.amt.power.opts)), ('api', ironic.conf.api.opts), + ('audit', ironic.conf.audit.opts), ('cimc', ironic.conf.cimc.opts), ('cisco_ucs', ironic.conf.cisco_ucs.opts), ('conductor', ironic.conf.conductor.opts), diff --git a/ironic/tests/unit/api/test_audit.py b/ironic/tests/unit/api/test_audit.py new file mode 100644 index 000000000..6e53fbfb1 --- /dev/null +++ b/ironic/tests/unit/api/test_audit.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests to assert that audit middleware works as expected. +""" + +from keystonemiddleware import audit +import mock +from oslo_config import cfg + +from ironic.common import exception +from ironic.tests.unit.api import base + + +CONF = cfg.CONF + + +class TestAuditMiddleware(base.BaseApiTest): + """Provide a basic smoke test to ensure audit middleware is active. + + The tests below provide minimal confirmation that the audit middleware + is called, and may be configured. For comprehensive tests, please consult + the test suite in keystone audit_middleware. + """ + + def setUp(self): + super(TestAuditMiddleware, self).setUp() + + @mock.patch.object(audit, 'AuditMiddleware') + def test_enable_audit_request(self, mock_audit): + CONF.audit.enabled = True + self._make_app(enable_acl=True) + mock_audit.assert_called_once_with( + mock.ANY, + audit_map_file=CONF.audit.audit_map_file, + ignore_req_list=CONF.audit.ignore_req_list) + + @mock.patch.object(audit, 'AuditMiddleware') + def test_enable_audit_request_error(self, mock_audit): + CONF.audit.enabled = True + mock_audit.side_effect = IOError("file access error") + + self.assertRaises(exception.InputFileError, + self._make_app, enable_acl=True) + + @mock.patch.object(audit, 'AuditMiddleware') + def test_disable_audit_request(self, mock_audit): + CONF.audit.enabled = False + self._make_app(enable_acl=True) + self.assertFalse(mock_audit.called) diff --git a/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml b/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml new file mode 100644 index 000000000..ef804c304 --- /dev/null +++ b/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + The ironic-api service now supports logging audit messages of + api calls. The following configuration parameters have been added. + By default auditing of ironic-api service is turned off. + + * [audit]/enabled + * [audit]/ignore_req_list + * [audit]/audit_map_file From 395069cbef7f92e0ee191320f626a32756e6835b Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Fri, 3 Jun 2016 16:32:55 +0300 Subject: [PATCH 111/166] Add api-ref for new port fields API microversions 1.18, 1.19 introduced new fields in Port resource, this change adds them to api-ref documentation. Partial-bug: #1526403 Change-Id: Ib7059fd1ec8ffc54fa34647cc558fb8c8e80f8d7 --- .../source/baremetal-api-v1-nodes-ports.inc | 3 +++ api-ref/source/baremetal-api-v1-ports.inc | 12 +++++++++ api-ref/source/parameters.yaml | 25 +++++++++++++++++++ .../samples/node-port-detail-response.json | 9 ++++++- .../source/samples/port-create-request.json | 7 +++++- .../source/samples/port-create-response.json | 9 ++++++- .../samples/port-list-detail-response.json | 9 ++++++- .../source/samples/port-update-response.json | 9 ++++++- 8 files changed, 78 insertions(+), 5 deletions(-) diff --git a/api-ref/source/baremetal-api-v1-nodes-ports.inc b/api-ref/source/baremetal-api-v1-nodes-ports.inc index 9bfba9be0..58addd24c 100644 --- a/api-ref/source/baremetal-api-v1-nodes-ports.inc +++ b/api-ref/source/baremetal-api-v1-nodes-ports.inc @@ -81,6 +81,9 @@ Response - uuid: uuid - address: port_address - node_uuid: node_uuid + - local_link_connection: local_link_connection + - pxe_enabled: pxe_enabled + - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at diff --git a/api-ref/source/baremetal-api-v1-ports.inc b/api-ref/source/baremetal-api-v1-ports.inc index 390bd3bb1..d2b741167 100644 --- a/api-ref/source/baremetal-api-v1-ports.inc +++ b/api-ref/source/baremetal-api-v1-ports.inc @@ -102,6 +102,9 @@ Response - uuid: uuid - address: port_address - node_uuid: node_uuid + - local_link_connection: local_link_connection + - pxe_enabled: pxe_enabled + - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at @@ -148,6 +151,9 @@ Response - uuid: uuid - address: port_address - node_uuid: node_uuid + - local_link_connection: local_link_connection + - pxe_enabled: pxe_enabled + - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at @@ -188,6 +194,9 @@ Response - uuid: uuid - address: port_address - node_uuid: node_uuid + - local_link_connection: local_link_connection + - pxe_enabled: pxe_enabled + - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at @@ -231,6 +240,9 @@ Response - uuid: uuid - address: port_address - node_uuid: node_uuid + - local_link_connection: local_link_connection + - pxe_enabled: pxe_enabled + - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 20c9192ac..128d98338 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -336,6 +336,13 @@ instance_uuid: in: body required: true type: string +internal_info: + description: | + Internal metadata set and stored by the Port. This field is read-only. + Added in API microversion 1.18. + in: body + required: true + type: JSON last_error: description: | Any error from the most recent (last) transaction that started but failed to finish. @@ -349,6 +356,17 @@ links: in: body required: true type: array +local_link_connection: + description: | + The Port binding profile. If specified, must contain ``switch_id`` (only + a MAC address or an OpenFlow based datapath_id of the switch are accepted + in this field) and ``port_id`` (identifier of the physical port on the + switch to which node's port is connected to) fields. ``switch_info`` is an + optional string field to be used to store any vendor-specific information. + Added in API microversion 1.19. + in: body + required: true + type: JSON maintenance: description: | Whether or not this Node is currently in "maintenance mode". Setting a Node @@ -485,6 +503,13 @@ provision_updated_at: in: body required: true type: string +pxe_enabled: + description: | + Indicates whether PXE is enabled or disabled on the Port. Added in API + microversion 1.19. + in: body + required: true + type: boolean r_driver_name: description: | The name of the driver used to manage this Node. diff --git a/api-ref/source/samples/node-port-detail-response.json b/api-ref/source/samples/node-port-detail-response.json index 0cb336717..1b8e57a4e 100644 --- a/api-ref/source/samples/node-port-detail-response.json +++ b/api-ref/source/samples/node-port-detail-response.json @@ -16,7 +16,14 @@ } ], "created_at" : "2016-05-05T22:30:57+00:00", - "uuid" : "c933a251-486f-4c27-adb2-8b5f59bd9cd2" + "uuid" : "c933a251-486f-4c27-adb2-8b5f59bd9cd2", + "pxe_enabled": true, + "local_link_connection": { + "switch_id": "0a:1b:2c:3d:4e:5f", + "port_id": "Ethernet3/1", + "switch_info": "switch1" + }, + "internal_info": {} } ] } diff --git a/api-ref/source/samples/port-create-request.json b/api-ref/source/samples/port-create-request.json index b4dad2776..664f20e0d 100644 --- a/api-ref/source/samples/port-create-request.json +++ b/api-ref/source/samples/port-create-request.json @@ -1,4 +1,9 @@ { "node_uuid": "ecddf26d-8c9c-4ddf-8f45-fd57e09ccddb", - "address": "11:11:11:11:11:11" + "address": "11:11:11:11:11:11", + "local_link_connection": { + "switch_id": "0a:1b:2c:3d:4e:5f", + "port_id": "Ethernet3/1", + "switch_info": "switch1" + } } \ No newline at end of file diff --git a/api-ref/source/samples/port-create-response.json b/api-ref/source/samples/port-create-response.json index 95ccdc6d5..946fbd551 100644 --- a/api-ref/source/samples/port-create-response.json +++ b/api-ref/source/samples/port-create-response.json @@ -14,5 +14,12 @@ "address" : "11:11:11:11:11:11", "updated_at" : null, "node_uuid" : "ecddf26d-8c9c-4ddf-8f45-fd57e09ccddb", - "uuid" : "c933a251-486f-4c27-adb2-8b5f59bd9cd2" + "uuid" : "c933a251-486f-4c27-adb2-8b5f59bd9cd2", + "pxe_enabled": true, + "local_link_connection": { + "switch_id": "0a:1b:2c:3d:4e:5f", + "port_id": "Ethernet3/1", + "switch_info": "switch1" + }, + "internal_info": {} } diff --git a/api-ref/source/samples/port-list-detail-response.json b/api-ref/source/samples/port-list-detail-response.json index 00b6add81..1decc1af6 100644 --- a/api-ref/source/samples/port-list-detail-response.json +++ b/api-ref/source/samples/port-list-detail-response.json @@ -16,7 +16,14 @@ "rel" : "bookmark" } ], - "created_at" : "2016-05-05T22:30:57+00:00" + "created_at" : "2016-05-05T22:30:57+00:00", + "pxe_enabled": true, + "local_link_connection": { + "switch_id": "0a:1b:2c:3d:4e:5f", + "port_id": "Ethernet3/1", + "switch_info": "switch1" + }, + "internal_info": {} } ] } diff --git a/api-ref/source/samples/port-update-response.json b/api-ref/source/samples/port-update-response.json index 98d135929..5895e9fe5 100644 --- a/api-ref/source/samples/port-update-response.json +++ b/api-ref/source/samples/port-update-response.json @@ -14,5 +14,12 @@ "rel" : "bookmark" } ], - "created_at" : "2016-05-05T22:30:57+00:00" + "created_at" : "2016-05-05T22:30:57+00:00", + "pxe_enabled": true, + "local_link_connection": { + "switch_id": "0a:1b:2c:3d:4e:5f", + "port_id": "Ethernet3/1", + "switch_info": "switch1" + }, + "internal_info": {} } From f16c6570bf0ffe70f83b85b963a4210c3990c573 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Wed, 20 Jul 2016 14:20:45 -0700 Subject: [PATCH 112/166] Add node.resource_class field This adds the "resource_class" field to the node table, object, and API, as well as a database migration to go with it. Change-Id: I936f2e7b2f4d26e01354e826e5595ff021c3a55c Partial-Bug: #1604916 --- doc/source/webapi/v1.rst | 4 + ironic/api/controllers/v1/node.py | 48 +++++- ironic/api/controllers/v1/utils.py | 40 ++++- ironic/api/controllers/v1/versions.py | 4 +- ...dd34e1f1303b_add_resource_class_to_node.py | 33 ++++ ironic/db/sqlalchemy/api.py | 2 + ironic/db/sqlalchemy/models.py | 1 + ironic/objects/node.py | 6 +- ironic/tests/unit/api/utils.py | 10 +- ironic/tests/unit/api/v1/test_nodes.py | 159 ++++++++++++++++++ ironic/tests/unit/api/v1/test_utils.py | 42 ++++- .../unit/db/sqlalchemy/test_migrations.py | 7 + ironic/tests/unit/db/test_nodes.py | 6 +- ironic/tests/unit/db/utils.py | 1 + ironic/tests/unit/objects/test_objects.py | 2 +- ...-node-resource-class-c31e26df4196293e.yaml | 13 ++ 16 files changed, 348 insertions(+), 30 deletions(-) create mode 100644 ironic/db/sqlalchemy/alembic/versions/dd34e1f1303b_add_resource_class_to_node.py create mode 100644 releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index 0b4f929a9..2f0a87bd4 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -32,6 +32,10 @@ always requests the newest supported API version. API Versions History -------------------- +**1.21** + + Add node ``resource_class`` field. + **1.20** Add node ``network_interface`` field. diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index aa5231901..d714ae839 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -140,6 +140,9 @@ def hide_fields_in_newer_versions(obj): if pecan.request.version.minor < versions.MINOR_20_NETWORK_INTERFACE: obj.network_interface = wsme.Unset + if not api_utils.allow_resource_class(): + obj.resource_class = wsme.Unset + def update_state_in_older_versions(obj): """Change provision state names for API backwards compatability. @@ -699,6 +702,11 @@ class Node(base.APIBase): extra = {wtypes.text: types.jsontype} """This node's meta data""" + resource_class = wsme.wsattr(wtypes.StringType(max_length=80)) + """The resource class for the node, useful for classifying or grouping + nodes. Used, for example, to classify nodes in Nova's placement + engine.""" + # NOTE: properties should use a class to enforce required properties # current list: arch, cpus, disk, ram, image properties = {wtypes.text: types.jsontype} @@ -819,7 +827,7 @@ class Node(base.APIBase): inspection_finished_at=None, inspection_started_at=time, console_enabled=False, clean_step={}, raid_config=None, target_raid_config=None, - network_interface='flat') + network_interface='flat', resource_class='baremetal-gold') # NOTE(matty_dubs): The chassis_uuid getter() is based on the # _chassis_uuid variable: sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12' @@ -1006,6 +1014,7 @@ class NodesController(rest.RestController): def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated, maintenance, provision_state, marker, limit, sort_key, sort_dir, driver=None, + resource_class=None, resource_url=None, fields=None): if self.from_chassis and not chassis_uuid: raise exception.MissingParameterValue( @@ -1038,6 +1047,8 @@ class NodesController(rest.RestController): filters['provision_state'] = provision_state if driver: filters['driver'] = driver + if resource_class is not None: + filters['resource_class'] = resource_class nodes = objects.Node.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, @@ -1128,11 +1139,11 @@ class NodesController(rest.RestController): @METRICS.timer('NodesController.get_all') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, - wtypes.text, wtypes.text, types.listtype) + wtypes.text, wtypes.text, types.listtype, wtypes.text) def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, - fields=None): + fields=None, resource_class=None): """Retrieve a list of nodes. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -1153,28 +1164,34 @@ class NodesController(rest.RestController): :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. + :param resource_class: Optional string value to get only nodes with + that resource_class. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_allow_specify_fields(fields) + api_utils.check_allowed_fields(fields) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) - api_utils.check_allow_specify_network_interface_in_fields(fields) + api_utils.check_allow_specify_resource_class(resource_class) if fields is None: fields = _DEFAULT_RETURN_FIELDS return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, provision_state, marker, limit, sort_key, sort_dir, - driver, fields=fields) + driver=driver, + resource_class=resource_class, + fields=fields) @METRICS.timer('NodesController.detail') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, - wtypes.text, wtypes.text) + wtypes.text, wtypes.text, wtypes.text) def detail(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, provision_state=None, marker=None, - limit=None, sort_key='id', sort_dir='asc', driver=None): + limit=None, sort_key='id', sort_dir='asc', driver=None, + resource_class=None): """Retrieve a list of nodes with detail. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -1195,9 +1212,12 @@ class NodesController(rest.RestController): :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. + :param resource_class: Optional string value to get only nodes with + that resource_class. """ api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) + api_utils.check_allow_specify_resource_class(resource_class) # /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "nodes": @@ -1208,7 +1228,9 @@ class NodesController(rest.RestController): associated, maintenance, provision_state, marker, limit, sort_key, sort_dir, - driver, resource_url) + driver=driver, + resource_class=resource_class, + resource_url=resource_url) @METRICS.timer('NodesController.validate') @expose.expose(wtypes.text, types.uuid_or_name, types.uuid) @@ -1247,7 +1269,7 @@ class NodesController(rest.RestController): raise exception.OperationNotPermitted() api_utils.check_allow_specify_fields(fields) - api_utils.check_allow_specify_network_interface_in_fields(fields) + api_utils.check_allowed_fields(fields) rpc_node = api_utils.get_rpc_node(node_ident) return Node.convert_with_links(rpc_node, fields=fields) @@ -1262,6 +1284,10 @@ class NodesController(rest.RestController): if self.from_chassis: raise exception.OperationNotPermitted() + if (not api_utils.allow_resource_class() and + node.resource_class is not wtypes.Unset): + raise exception.NotAcceptable() + n_interface = node.network_interface if (not api_utils.allow_network_interface() and n_interface is not wtypes.Unset): @@ -1322,6 +1348,10 @@ class NodesController(rest.RestController): if self.from_chassis: raise exception.OperationNotPermitted() + resource_class = api_utils.get_patch_values(patch, '/resource_class') + if resource_class and not api_utils.allow_resource_class(): + raise exception.NotAcceptable() + n_interfaces = api_utils.get_patch_values(patch, '/network_interface') if n_interfaces and not api_utils.allow_network_interface(): raise exception.NotAcceptable() diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py index 51ccef062..00b6a17d9 100644 --- a/ironic/api/controllers/v1/utils.py +++ b/ironic/api/controllers/v1/utils.py @@ -240,16 +240,17 @@ def check_allow_specify_fields(fields): raise exception.NotAcceptable() -def check_allow_specify_network_interface_in_fields(fields): - """Check if fetching a network_interface attribute is allowed. +def check_allowed_fields(fields): + """Check if fetching a particular field is allowed. - Version 1.20 of the API allows to fetching a network_interface - attribute. This method check if the required version is being - requested. + This method checks if the required version is being requested for fields + that are only allowed to be fetched in a particular API version. """ - if (fields is not None - and 'network_interface' in fields - and not allow_network_interface()): + if fields is None: + return + if 'network_interface' in fields and not allow_network_interface(): + raise exception.NotAcceptable() + if 'resource_class' in fields and not allow_resource_class(): raise exception.NotAcceptable() @@ -303,6 +304,20 @@ def check_allow_specify_driver(driver): 'opr': versions.MINOR_16_DRIVER_FILTER}) +def check_allow_specify_resource_class(resource_class): + """Check if filtering nodes by resource_class is allowed. + + Version 1.21 of the API allows filtering nodes by resource_class. + """ + if (resource_class is not None and pecan.request.version.minor < + versions.MINOR_21_RESOURCE_CLASS): + raise exception.NotAcceptable(_( + "Request not acceptable. The minimal required API version " + "should be %(base)s.%(opr)s") % + {'base': versions.BASE_VERSION, + 'opr': versions.MINOR_21_RESOURCE_CLASS}) + + def initial_node_provision_state(): """Return node state to use by default when creating new nodes. @@ -359,6 +374,15 @@ def allow_network_interface(): versions.MINOR_20_NETWORK_INTERFACE) +def allow_resource_class(): + """Check if we should support resource_class node field. + + Version 1.21 of the API added support for resource_class. + """ + return (pecan.request.version.minor >= + versions.MINOR_21_RESOURCE_CLASS) + + def get_controller_reserved_names(cls): """Get reserved names for a given controller. diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py index d2e75862b..152f5e64a 100644 --- a/ironic/api/controllers/v1/versions.py +++ b/ironic/api/controllers/v1/versions.py @@ -50,6 +50,7 @@ BASE_VERSION = 1 # v1.18: Add port.internal_info. # v1.19: Add port.local_link_connection and port.pxe_enabled. # v1.20: Add node.network_interface +# v1.21: Add node.resource_class MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -72,11 +73,12 @@ MINOR_17_ADOPT_VERB = 17 MINOR_18_PORT_INTERNAL_INFO = 18 MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 MINOR_20_NETWORK_INTERFACE = 20 +MINOR_21_RESOURCE_CLASS = 21 # When adding another version, update MINOR_MAX_VERSION and also update # doc/source/webapi/v1.rst with a detailed explanation of what the version has # changed. -MINOR_MAX_VERSION = MINOR_20_NETWORK_INTERFACE +MINOR_MAX_VERSION = MINOR_21_RESOURCE_CLASS # String representations of the minor and maximum versions MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff --git a/ironic/db/sqlalchemy/alembic/versions/dd34e1f1303b_add_resource_class_to_node.py b/ironic/db/sqlalchemy/alembic/versions/dd34e1f1303b_add_resource_class_to_node.py new file mode 100644 index 000000000..020b3277d --- /dev/null +++ b/ironic/db/sqlalchemy/alembic/versions/dd34e1f1303b_add_resource_class_to_node.py @@ -0,0 +1,33 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add resource_class to node + +Revision ID: dd34e1f1303b +Revises: 10b163d4481e +Create Date: 2016-07-20 21:48:12.475320 + +""" + +# revision identifiers, used by Alembic. +revision = 'dd34e1f1303b' +down_revision = '10b163d4481e' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('nodes', sa.Column('resource_class', sa.String(80), + nullable=True)) diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py index 79b7aa8f2..a8030dd87 100644 --- a/ironic/db/sqlalchemy/api.py +++ b/ironic/db/sqlalchemy/api.py @@ -214,6 +214,8 @@ class Connection(api.Connection): query = query.filter_by(maintenance=filters['maintenance']) if 'driver' in filters: query = query.filter_by(driver=filters['driver']) + if 'resource_class' in filters: + query = query.filter_by(resource_class=filters['resource_class']) if 'provision_state' in filters: query = query.filter_by(provision_state=filters['provision_state']) if 'provisioned_before' in filters: diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py index 911c01f9c..46f5c9dc2 100644 --- a/ironic/db/sqlalchemy/models.py +++ b/ironic/db/sqlalchemy/models.py @@ -118,6 +118,7 @@ class Node(Base): driver_info = Column(db_types.JsonEncodedDict) driver_internal_info = Column(db_types.JsonEncodedDict) clean_step = Column(db_types.JsonEncodedDict) + resource_class = Column(String(80), nullable=True) raid_config = Column(db_types.JsonEncodedDict) target_raid_config = Column(db_types.JsonEncodedDict) diff --git a/ironic/objects/node.py b/ironic/objects/node.py index 6077dd2fe..3d4c5d83d 100644 --- a/ironic/objects/node.py +++ b/ironic/objects/node.py @@ -47,7 +47,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # and save() validate the input of property values. # Version 1.15: Add get_by_port_addresses # Version 1.16: Add network_interface field - VERSION = '1.16' + # Version 1.17: Add resource_class field + VERSION = '1.17' dbapi = db_api.get_instance() @@ -99,6 +100,9 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # that started but failed to finish. 'last_error': object_fields.StringField(nullable=True), + # Used by nova to relate the node to a flavor + 'resource_class': object_fields.StringField(nullable=True), + 'inspection_finished_at': object_fields.DateTimeField(nullable=True), 'inspection_started_at': object_fields.DateTimeField(nullable=True), diff --git a/ironic/tests/unit/api/utils.py b/ironic/tests/unit/api/utils.py index c63ff0807..433d1b54a 100644 --- a/ironic/tests/unit/api/utils.py +++ b/ironic/tests/unit/api/utils.py @@ -94,11 +94,15 @@ def node_post_data(**kw): node.pop('conductor_affinity') node.pop('chassis_id') node.pop('tags') - # NOTE(vdrok): network_interface was introduced in API version 1.20, return - # it only if it was explicitly requested, so that tests using older API - # versions don't fail + + # NOTE(jroll): pop out fields that were introduced in later API versions, + # unless explicitly requested. Otherwise, these will cause tests using + # older API versions to fail. if 'network_interface' not in kw: node.pop('network_interface') + if 'resource_class' not in kw: + node.pop('resource_class') + internal = node_controller.NodePatchType.internal_attrs() return remove_internal(node, internal) diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 5fa6cf2e6..6250e7193 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -111,6 +111,7 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertNotIn('raid_config', data['nodes'][0]) self.assertNotIn('target_raid_config', data['nodes'][0]) self.assertNotIn('network_interface', data['nodes'][0]) + self.assertNotIn('resource_class', data['nodes'][0]) # never expose the chassis_id self.assertNotIn('chassis_id', data['nodes'][0]) @@ -137,6 +138,7 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertIn('clean_step', data) self.assertIn('states', data) self.assertIn('network_interface', data) + self.assertIn('resource_class', data) # never expose the chassis_id self.assertNotIn('chassis_id', data) @@ -336,6 +338,17 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertEqual(node.network_interface, new_data['nodes'][0]["network_interface"]) + def test_hide_fields_in_newer_versions_resource_class(self): + node = obj_utils.create_test_node(self.context, + resource_class='foo') + data = self.get_json( + '/nodes/detail', headers={api_base.Version.string: '1.20'}) + self.assertNotIn('resource_class', data['nodes'][0]) + new_data = self.get_json( + '/nodes/detail', headers={api_base.Version.string: '1.21'}) + self.assertEqual(node.resource_class, + new_data['nodes'][0]["resource_class"]) + def test_many(self): nodes = [] for id in range(5): @@ -756,6 +769,75 @@ class TestListNodes(test_api_base.BaseApiTest): self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) self.assertTrue(response.json['error_message']) + def _test_get_nodes_by_resource_class(self, detail=False): + if detail: + base_url = '/nodes/detail?resource_class=%s' + else: + base_url = '/nodes?resource_class=%s' + + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + driver='fake', + resource_class='foo') + node1 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + driver='fake', + resource_class='bar') + + data = self.get_json(base_url % 'foo', + headers={api_base.Version.string: "1.21"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node.uuid, uuids) + self.assertNotIn(node1.uuid, uuids) + data = self.get_json(base_url % 'bar', + headers={api_base.Version.string: "1.21"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node1.uuid, uuids) + self.assertNotIn(node.uuid, uuids) + + def test_get_nodes_by_resource_class(self): + self._test_get_nodes_by_resource_class(detail=False) + + def test_get_nodes_by_resource_class_detail(self): + self._test_get_nodes_by_resource_class(detail=True) + + def _test_get_nodes_by_invalid_resource_class(self, detail=False): + if detail: + base_url = '/nodes/detail?resource_class=%s' + else: + base_url = '/nodes?resource_class=%s' + + data = self.get_json(base_url % 'test', + headers={api_base.Version.string: "1.21"}) + self.assertEqual(0, len(data['nodes'])) + + def test_get_nodes_by_invalid_resource_class(self): + self._test_get_nodes_by_invalid_resource_class(detail=False) + + def test_get_nodes_by_invalid_resource_class_detail(self): + self._test_get_nodes_by_invalid_resource_class(detail=True) + + def _test_get_nodes_by_resource_class_invalid_api_version(self, + detail=False): + if detail: + base_url = '/nodes/detail?resource_class=%s' + else: + base_url = '/nodes?resource_class=%s' + + response = self.get_json( + base_url % 'fake', + headers={api_base.Version.string: str(api_v1.MIN_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + self.assertTrue(response.json['error_message']) + + def test_get_nodes_by_resource_class_invalid_api_version(self): + self._test_get_nodes_by_resource_class_invalid_api_version( + detail=False) + + def test_get_nodes_by_resource_class_invalid_api_version_detail(self): + self._test_get_nodes_by_resource_class_invalid_api_version(detail=True) + def test_get_console_information(self): node = obj_utils.create_test_node(self.context) expected_console_info = {'test': 'test-data'} @@ -1452,6 +1534,64 @@ class TestPatch(test_api_base.BaseApiTest): self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + def test_update_resource_class(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + resource_class = 'foo' + headers = {api_base.Version.string: '1.21'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/resource_class', + 'value': resource_class, + 'op': 'add'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_resource_class_old_api(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + resource_class = 'foo' + headers = {api_base.Version.string: '1.20'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/resource_class', + 'value': resource_class, + 'op': 'add'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + + def test_update_resource_class_max_length(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + resource_class = 'f' * 80 + headers = {api_base.Version.string: '1.21'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/resource_class', + 'value': resource_class, + 'op': 'add'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_resource_class_too_long(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + resource_class = 'f' * 81 + headers = {api_base.Version.string: '1.21'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/resource_class', + 'value': resource_class, + 'op': 'add'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_code) + class TestPost(test_api_base.BaseApiTest): @@ -1793,6 +1933,25 @@ class TestPost(test_api_base.BaseApiTest): self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) + def test_create_node_resource_class(self): + ndict = test_api_utils.post_get_test_node( + resource_class='foo') + response = self.post_json('/nodes', ndict, + headers={api_base.Version.string: + str(api_v1.MAX_VER)}) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/nodes/%s' % ndict['uuid'], + headers={api_base.Version.string: + str(api_v1.MAX_VER)}) + self.assertEqual('foo', result['resource_class']) + + def test_create_node_resource_class_old_api_version(self): + ndict = test_api_utils.post_get_test_node( + resource_class='foo') + response = self.post_json('/nodes', ndict, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + class TestDelete(test_api_base.BaseApiTest): diff --git a/ironic/tests/unit/api/v1/test_utils.py b/ironic/tests/unit/api/v1/test_utils.py index 68d896d08..e8e777bce 100644 --- a/ironic/tests/unit/api/v1/test_utils.py +++ b/ironic/tests/unit/api/v1/test_utils.py @@ -131,21 +131,33 @@ class TestApiUtils(base.TestCase): utils.check_allow_specify_fields, ['foo']) @mock.patch.object(pecan, 'request', spec_set=['version']) - def test_check_allow_specify_network_interface(self, mock_request): + def test_check_allowed_fields_network_interface(self, mock_request): mock_request.version.minor = 20 self.assertIsNone( - utils.check_allow_specify_network_interface_in_fields( - ['network_interface'])) + utils.check_allowed_fields(['network_interface'])) @mock.patch.object(pecan, 'request', spec_set=['version']) - def test_check_allow_specify_network_interface_in_fields_fail( - self, mock_request): + def test_check_allowed_fields_network_interface_fail(self, mock_request): mock_request.version.minor = 19 self.assertRaises( exception.NotAcceptable, - utils.check_allow_specify_network_interface_in_fields, + utils.check_allowed_fields, ['network_interface']) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allowed_fields_resource_class(self, mock_request): + mock_request.version.minor = 21 + self.assertIsNone( + utils.check_allowed_fields(['resource_class'])) + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allowed_fields_resource_class_fail(self, mock_request): + mock_request.version.minor = 20 + self.assertRaises( + exception.NotAcceptable, + utils.check_allowed_fields, + ['resource_class']) + @mock.patch.object(pecan, 'request', spec_set=['version']) def test_check_allow_specify_driver(self, mock_request): mock_request.version.minor = 16 @@ -157,6 +169,17 @@ class TestApiUtils(base.TestCase): self.assertRaises(exception.NotAcceptable, utils.check_allow_specify_driver, ['fake']) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_specify_resource_class(self, mock_request): + mock_request.version.minor = 21 + self.assertIsNone(utils.check_allow_specify_resource_class(['foo'])) + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_specify_resource_class_fail(self, mock_request): + mock_request.version.minor = 20 + self.assertRaises(exception.NotAcceptable, + utils.check_allow_specify_resource_class, ['foo']) + @mock.patch.object(pecan, 'request', spec_set=['version']) def test_check_allow_manage_verbs(self, mock_request): mock_request.version.minor = 4 @@ -255,6 +278,13 @@ class TestApiUtils(base.TestCase): mock_request.version.minor = 19 self.assertFalse(utils.allow_network_interface()) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_resource_class(self, mock_request): + mock_request.version.minor = 21 + self.assertTrue(utils.allow_resource_class()) + mock_request.version.minor = 20 + self.assertFalse(utils.allow_resource_class()) + class TestNodeIdent(base.TestCase): diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py index 2785d771e..8b1acf158 100644 --- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py +++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py @@ -433,6 +433,13 @@ class MigrationCheckersMixin(object): self.assertIsInstance(portgroups.c.internal_info.type, sqlalchemy.types.TEXT) + def _check_dd34e1f1303b(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes.c] + self.assertIn('resource_class', col_names) + self.assertIsInstance(nodes.c.resource_class.type, + sqlalchemy.types.String) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py index ddac51db0..9e5f7fbc9 100644 --- a/ironic/tests/unit/db/test_nodes.py +++ b/ironic/tests/unit/db/test_nodes.py @@ -123,7 +123,8 @@ class DbNodeTestCase(base.DbTestCase): node2 = utils.create_test_node( driver='driver-two', uuid=uuidutils.generate_uuid(), - maintenance=True) + maintenance=True, + resource_class='foo') node3 = utils.create_test_node( driver='driver-one', uuid=uuidutils.generate_uuid(), @@ -157,6 +158,9 @@ class DbNodeTestCase(base.DbTestCase): self.assertEqual(sorted([node1.id, node3.id]), sorted([r.id for r in res])) + res = self.dbapi.get_node_list(filters={'resource_class': 'foo'}) + self.assertEqual([node2.id], [r.id for r in res]) + res = self.dbapi.get_node_list( filters={'reserved_by_any_of': ['fake-host', 'another-fake-host']}) diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index 2e2285180..2d8b4c9e8 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -226,6 +226,7 @@ def get_test_node(**kw): 'raid_config': kw.get('raid_config'), 'target_raid_config': kw.get('target_raid_config'), 'tags': kw.get('tags', []), + 'resource_class': kw.get('resource_class'), 'network_interface': kw.get('network_interface'), } diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index 2a99424a9..b734031d7 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -404,7 +404,7 @@ class TestObject(_LocalTest, _TestObject): # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.16-2a6646627cb937f083f428f5d54e6458', + 'Node': '1.17-ed09e704576dc1b5a74abcbb727bf722', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.6-609504503d68982a10f495659990084b', diff --git a/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml b/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml new file mode 100644 index 000000000..d52eca3d2 --- /dev/null +++ b/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml @@ -0,0 +1,13 @@ +--- +features: + - Adds a `resource_class` field to the node resource, + which will be used by Nova to define which nodes may + quantitatively match a Nova flavor. Operators should + populate this accordingly before deploying the Ocata + version of Nova. +upgrade: + - Adds a `resource_class` field to the node resource, + which will be used by Nova to define which nodes may + quantitatively match a Nova flavor. Operators should + populate this accordingly before deploying the Ocata + version of Nova. From ae6e628b2d38821a7ad42a45ea38655c14dc3339 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Thu, 28 Jul 2016 19:07:31 +0300 Subject: [PATCH 113/166] Log full config only once in conductor oslo.service's wait() already does that for us. Change-Id: I66b6a8c8daf32948badde555b12d2f0f9cd88c8a --- ironic/cmd/conductor.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ironic/cmd/conductor.py b/ironic/cmd/conductor.py index fb5239c60..39718e502 100644 --- a/ironic/cmd/conductor.py +++ b/ironic/cmd/conductor.py @@ -22,7 +22,6 @@ The Ironic Management Service import sys from oslo_config import cfg -from oslo_log import log from oslo_service import service from ironic.common import service as ironic_service @@ -38,10 +37,6 @@ def main(): 'ironic.conductor.manager', 'ConductorManager') - LOG = log.getLogger(__name__) - LOG.debug("Configuration:") - CONF.log_opt_values(LOG, log.DEBUG) - launcher = service.launch(CONF, mgr) launcher.wait() From 46fcfb3199254107df1dfe8295f8ad1e09624c64 Mon Sep 17 00:00:00 2001 From: Ramamani Yeleswarapu Date: Fri, 22 Apr 2016 14:37:24 -0700 Subject: [PATCH 114/166] Centralize config options - [agent] Nova style refactor of config options in Ironic. Change-Id: I97d3a616ca8b68a74d4ad546f81f3a94ce4bad2a Partial-Bug: #1561100 --- ironic/conf/__init__.py | 2 + ironic/conf/agent.py | 67 +++++++++++++++++++++ ironic/conf/opts.py | 8 +-- ironic/drivers/modules/agent.py | 34 +---------- ironic/drivers/modules/agent_base_vendor.py | 20 +----- ironic/drivers/modules/agent_client.py | 12 +--- 6 files changed, 73 insertions(+), 70 deletions(-) create mode 100644 ironic/conf/agent.py diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 4bcb97319..889da64c9 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -15,6 +15,7 @@ from oslo_config import cfg +from ironic.conf import agent from ironic.conf import api from ironic.conf import cimc from ironic.conf import cisco_ucs @@ -41,6 +42,7 @@ from ironic.conf import virtualbox CONF = cfg.CONF +agent.register_opts(CONF) api.register_opts(CONF) cimc.register_opts(CONF) cisco_ucs.register_opts(CONF) diff --git a/ironic/conf/agent.py b/ironic/conf/agent.py new file mode 100644 index 000000000..9555ca973 --- /dev/null +++ b/ironic/conf/agent.py @@ -0,0 +1,67 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 Rackspace, Inc. +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + + +opts = [ + cfg.BoolOpt('manage_agent_boot', + default=True, + help=_('Whether Ironic will manage booting of the agent ' + 'ramdisk. If set to False, you will need to configure ' + 'your mechanism to allow booting the agent ' + 'ramdisk.')), + cfg.IntOpt('memory_consumed_by_agent', + default=0, + help=_('The memory size in MiB consumed by agent when it is ' + 'booted on a bare metal node. This is used for ' + 'checking if the image can be downloaded and deployed ' + 'on the bare metal node after booting agent ramdisk. ' + 'This may be set according to the memory consumed by ' + 'the agent ramdisk image.')), + cfg.BoolOpt('stream_raw_images', + default=True, + help=_('Whether the agent ramdisk should stream raw images ' + 'directly onto the disk or not. By streaming raw ' + 'images directly onto the disk the agent ramdisk will ' + 'not spend time copying the image to a tmpfs partition ' + '(therefore consuming less memory) prior to writing it ' + 'to the disk. Unless the disk where the image will be ' + 'copied to is really slow, this option should be set ' + 'to True. Defaults to True.')), + cfg.IntOpt('heartbeat_timeout', + default=300, + help=_('Maximum interval (in seconds) for agent heartbeats.')), + cfg.IntOpt('post_deploy_get_power_state_retries', + default=6, + help=_('Number of times to retry getting power state to check ' + 'if bare metal node has been powered off after a soft ' + 'power off.')), + cfg.IntOpt('post_deploy_get_power_state_retry_interval', + default=5, + help=_('Amount of time (in seconds) to wait between polling ' + 'power state after trigger soft poweroff.')), + cfg.StrOpt('agent_api_version', + default='v1', + help=_('API version to use for communicating with the ramdisk ' + 'agent.')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='agent') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index b1c105f48..48ea26c3f 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -12,9 +12,6 @@ import itertools -import ironic.drivers.modules.agent -import ironic.drivers.modules.agent_base_vendor -import ironic.drivers.modules.agent_client import ironic.drivers.modules.amt.common import ironic.drivers.modules.amt.power import ironic.drivers.modules.iscsi_deploy @@ -35,10 +32,7 @@ _default_opt_lists = [ _opts = [ ('DEFAULT', itertools.chain(*_default_opt_lists)), - ('agent', itertools.chain( - ironic.drivers.modules.agent.agent_opts, - ironic.drivers.modules.agent_base_vendor.agent_opts, - ironic.drivers.modules.agent_client.agent_opts)), + ('agent', ironic.conf.agent.opts), ('amt', itertools.chain( ironic.drivers.modules.amt.common.opts, ironic.drivers.modules.amt.power.opts)), diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 79346e155..4a84f4bf2 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import units @@ -32,43 +31,12 @@ from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import agent_base_vendor from ironic.drivers.modules import deploy_utils -agent_opts = [ - cfg.BoolOpt('manage_agent_boot', - default=True, - help=_('Whether Ironic will manage booting of the agent ' - 'ramdisk. If set to False, you will need to configure ' - 'your mechanism to allow booting the agent ' - 'ramdisk.')), - cfg.IntOpt('memory_consumed_by_agent', - default=0, - help=_('The memory size in MiB consumed by agent when it is ' - 'booted on a bare metal node. This is used for ' - 'checking if the image can be downloaded and deployed ' - 'on the bare metal node after booting agent ramdisk. ' - 'This may be set according to the memory consumed by ' - 'the agent ramdisk image.')), - cfg.BoolOpt('stream_raw_images', - default=True, - help=_('Whether the agent ramdisk should stream raw images ' - 'directly onto the disk or not. By streaming raw ' - 'images directly onto the disk the agent ramdisk will ' - 'not spend time copying the image to a tmpfs partition ' - '(therefore consuming less memory) prior to writing it ' - 'to the disk. Unless the disk where the image will be ' - 'copied to is really slow, this option should be set ' - 'to True. Defaults to True.')), -] - -CONF = cfg.CONF -CONF.import_opt('erase_devices_priority', - 'ironic.drivers.modules.deploy_utils', group='deploy') -CONF.register_opts(agent_opts, group='agent') - LOG = log.getLogger(__name__) diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index a5614d609..cf4e69120 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -20,7 +20,6 @@ import ast import collections import time -from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils @@ -38,29 +37,12 @@ from ironic.common import utils from ironic.conductor import rpcapi from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils +from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils from ironic import objects -agent_opts = [ - cfg.IntOpt('heartbeat_timeout', - default=300, - help=_('Maximum interval (in seconds) for agent heartbeats.')), - cfg.IntOpt('post_deploy_get_power_state_retries', - default=6, - help=_('Number of times to retry getting power state to check ' - 'if bare metal node has been powered off after a soft ' - 'power off.')), - cfg.IntOpt('post_deploy_get_power_state_retry_interval', - default=5, - help=_('Amount of time (in seconds) to wait between polling ' - 'power state after trigger soft poweroff.')), -] - -CONF = cfg.CONF -CONF.register_opts(agent_opts, group='agent') - LOG = log.getLogger(__name__) # This contains a nested dictionary containing the post clean step diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py index be31dc45b..06ffe6569 100644 --- a/ironic/drivers/modules/agent_client.py +++ b/ironic/drivers/modules/agent_client.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import requests @@ -21,16 +20,7 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LW - -agent_opts = [ - cfg.StrOpt('agent_api_version', - default='v1', - help=_('API version to use for communicating with the ramdisk ' - 'agent.')) -] - -CONF = cfg.CONF -CONF.register_opts(agent_opts, group='agent') +from ironic.conf import CONF LOG = log.getLogger(__name__) From 8baf29fe6a5d067f1061cc2f5eced0ed48188b21 Mon Sep 17 00:00:00 2001 From: Galyna Zholtkevych Date: Wed, 20 Jul 2016 14:45:30 -0700 Subject: [PATCH 115/166] Updated tests for db migration scripts Updated tests for db migration scripts taking into account previous reviews. Change-Id: Ie4b46cf264a2199e1b422862fa47bc41789fd729 --- .../unit/db/sqlalchemy/test_migrations.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py index 2785d771e..fea7b310b 100644 --- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py +++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py @@ -261,6 +261,33 @@ class MigrationCheckersMixin(object): self.assertRaises(db_exc.DBDuplicateEntry, nodes.insert().execute, data) + def _check_487deb87cc9d(self, engine, data): + conductors = db_utils.get_table(engine, 'conductors') + column_names = [column.name for column in conductors.c] + + self.assertIn('online', column_names) + self.assertIsInstance(conductors.c.online.type, + (sqlalchemy.types.Boolean, + sqlalchemy.types.Integer)) + nodes = db_utils.get_table(engine, 'nodes') + column_names = [column.name for column in nodes.c] + self.assertIn('conductor_affinity', column_names) + self.assertIsInstance(nodes.c.conductor_affinity.type, + sqlalchemy.types.Integer) + + data_conductor = {'hostname': 'test_host'} + conductors.insert().execute(data_conductor) + conductor = conductors.select( + conductors.c.hostname == + data_conductor['hostname']).execute().first() + + data_node = {'uuid': uuidutils.generate_uuid(), + 'conductor_affinity': conductor['id']} + nodes.insert().execute(data_node) + node = nodes.select( + nodes.c.uuid == data_node['uuid']).execute().first() + self.assertEqual(conductor['id'], node['conductor_affinity']) + def _check_242cc6a923b3(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] @@ -307,6 +334,31 @@ class MigrationCheckersMixin(object): self.assertIsInstance(nodes.c.driver_internal_info.type, sqlalchemy.types.TEXT) + def _check_3ae36a5f5131(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + column_names = [column.name for column in nodes.c] + self.assertIn('name', column_names) + self.assertIsInstance(nodes.c.name.type, + sqlalchemy.types.String) + data = {'driver': 'fake', + 'uuid': uuidutils.generate_uuid(), + 'name': 'node' + } + nodes.insert().values(data).execute() + data['uuid'] = uuidutils.generate_uuid() + self.assertRaises(db_exc.DBDuplicateEntry, + nodes.insert().execute, data) + + def _check_1e1d5ace7dc6(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + column_names = [column.name for column in nodes.c] + self.assertIn('inspection_started_at', column_names) + self.assertIn('inspection_finished_at', column_names) + self.assertIsInstance(nodes.c.inspection_started_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(nodes.c.inspection_finished_at.type, + sqlalchemy.types.DateTime) + def _check_4f399b21ae71(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] From 5b752d258a0402abfd7f2511874f42bdf7c77dc1 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Fri, 29 Jul 2016 16:03:26 -0700 Subject: [PATCH 116/166] Devstack should use a prebuilt ramdisk by default Ironic-python-agent has both supported ramdisks prebuilt and ready for download, leaving no reason for building this in devstack in the general case. This also resolves (via alternative means) the bug which is causing our deploy ramdisk to get built every time in the gate. Change-Id: I00cf31bf75343fec4d7ec11178b3a253da806ac0 Related-bug: #1607981 --- devstack/lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index c22f8516d..12d036b4c 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -131,7 +131,7 @@ IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE) # Whether to build the ramdisk or download a prebuilt one. -IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) +IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse False IRONIC_BUILD_DEPLOY_RAMDISK) # Ironic IPA ramdisk type, supported types are: IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(coreos|tinyipa|dib)$" From 10ba46f3d56add3edb57e389a2536edcc3efcea3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 30 Jul 2016 01:18:28 +0000 Subject: [PATCH 117/166] Updated from global requirements Change-Id: I95f3c5a83940db310048592cdbe7444f9f3b5034 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 374414c4d..5a555fd92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=1.9.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.15.0 # Apache-2.0 +oslo.utils>=3.16.0 # Apache-2.0 pecan>=1.0.0 # BSD requests>=2.10.0 # Apache-2.0 six>=1.9.0 # MIT From cedff4e3780191f6d43d5ccfa6a1a363c728d859 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 1 Aug 2016 12:47:01 +0000 Subject: [PATCH 118/166] Move default network_interface logic in node object This patch moves the default network_interface logic into the node object using a field default value. This means if the network_interface on a node object is ever set to None it'll calculate the default to place into that field. Change-Id: I753d969b31ba2d12df4db3fc95a7d9e9ca1e4df6 Partial-Bug: #1608511 --- ironic/common/driver_factory.py | 4 ---- ironic/objects/node.py | 19 +++++++++++++-- .../unit/drivers/modules/test_deploy_utils.py | 4 ++++ ironic/tests/unit/objects/test_node.py | 24 +++++++++++++++++++ ironic/tests/unit/objects/test_objects.py | 2 +- 5 files changed, 46 insertions(+), 7 deletions(-) diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py index 98800c44c..500396a92 100644 --- a/ironic/common/driver_factory.py +++ b/ironic/common/driver_factory.py @@ -59,10 +59,6 @@ def _attach_interfaces_to_driver(driver, node, driver_name=None): setattr(driver, iface, impl) network_iface = node.network_interface - if network_iface is None: - network_iface = (CONF.default_network_interface or - ('flat' if CONF.dhcp.dhcp_provider == 'neutron' - else 'noop')) network_factory = NetworkInterfaceFactory() try: net_driver = network_factory.get_driver(network_iface) diff --git a/ironic/objects/node.py b/ironic/objects/node.py index 3d4c5d83d..6bb03d5c8 100644 --- a/ironic/objects/node.py +++ b/ironic/objects/node.py @@ -19,6 +19,7 @@ from oslo_versionedobjects import base as object_base from ironic.common import exception from ironic.common.i18n import _ +from ironic.conf import CONF from ironic.db import api as db_api from ironic.objects import base from ironic.objects import fields as object_fields @@ -26,6 +27,13 @@ from ironic.objects import fields as object_fields REQUIRED_INT_PROPERTIES = ['local_gb', 'cpus', 'memory_mb'] +def _default_network_interface(): + network_iface = (CONF.default_network_interface or + ('flat' if CONF.dhcp.dhcp_provider == 'neutron' + else 'noop')) + return network_iface + + @base.IronicObjectRegistry.register class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.0: Initial version @@ -48,7 +56,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): # Version 1.15: Add get_by_port_addresses # Version 1.16: Add network_interface field # Version 1.17: Add resource_class field - VERSION = '1.17' + # Version 1.18: Add default setting for network_interface + VERSION = '1.18' dbapi = db_api.get_instance() @@ -108,9 +117,15 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): 'extra': object_fields.FlexibleDictField(nullable=True), - 'network_interface': object_fields.StringField(nullable=True), + 'network_interface': object_fields.StringField( + nullable=False, default=_default_network_interface()), } + def __init__(self, context=None, **kwargs): + self.fields['network_interface']._default = ( + _default_network_interface()) + super(Node, self).__init__(context, **kwargs) + def _validate_property_values(self, properties): """Check if the input of local_gb, cpus and memory_mb are valid. diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 068c599a1..44626a1a7 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1799,6 +1799,8 @@ class AgentMethodsTestCase(db_base.DbTestCase): def test_prepare_inband_cleaning_ports_provider_does_not_create( self, dhcp_factory_mock, add_clean_net_mock): self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + self.node.network_interface = 'noop' + self.node.save() dhcp_provider = dhcp_factory_mock.return_value.provider del dhcp_provider.delete_cleaning_ports del dhcp_provider.create_cleaning_ports @@ -1846,6 +1848,8 @@ class AgentMethodsTestCase(db_base.DbTestCase): def test_tear_down_inband_cleaning_ports_provider_does_not_delete( self, dhcp_factory_mock, rm_clean_net_mock): self.config(group='dhcp', dhcp_provider='my_shiny_dhcp_provider') + self.node.network_interface = 'noop' + self.node.save() dhcp_provider = dhcp_factory_mock.return_value.provider del dhcp_provider.delete_cleaning_ports with task_manager.acquire( diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py index 34690e8a6..74d95157e 100644 --- a/ironic/tests/unit/objects/test_node.py +++ b/ironic/tests/unit/objects/test_node.py @@ -16,17 +16,22 @@ import mock from testtools.matchers import HasLength +from ironic.common import context from ironic.common import exception +from ironic.conf import CONF from ironic import objects from ironic.tests.unit.db import base from ironic.tests.unit.db import utils +from ironic.tests.unit.objects import utils as obj_utils class TestNodeObject(base.DbTestCase): def setUp(self): super(TestNodeObject, self).setUp() + self.ctxt = context.get_admin_context() self.fake_node = utils.get_test_node() + self.node = obj_utils.get_test_node(self.ctxt, **self.fake_node) def test_get_by_id(self): node_id = self.fake_node['id'] @@ -191,3 +196,22 @@ class TestNodeObject(base.DbTestCase): } node._validate_property_values(values['properties']) self.assertEqual(expect, values['properties']) + + def test_get_network_interface_use_field(self): + CONF.set_override('default_network_interface', None) + for nif in ('neutron', 'flat', 'noop'): + self.node.network_interface = nif + self.assertEqual(nif, self.node.network_interface) + + def test_get_network_interface_use_conf(self): + for nif in ('neutron', 'flat', 'noop'): + CONF.set_override('default_network_interface', nif) + self.node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + self.assertEqual(nif, self.node.network_interface) + + def test_get_network_interface_use_dhcp_provider(self): + CONF.set_override('default_network_interface', None) + for dhcp, nif in (('neutron', 'flat'), ('none', 'noop')): + CONF.set_override('dhcp_provider', dhcp, 'dhcp') + self.node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + self.assertEqual(nif, self.node.network_interface) diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index b734031d7..bd21bd950 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -404,7 +404,7 @@ class TestObject(_LocalTest, _TestObject): # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.17-ed09e704576dc1b5a74abcbb727bf722', + 'Node': '1.18-8cdb6010014b29f17ca636bef72b7800', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.6-609504503d68982a10f495659990084b', From c81bd9e5c7ef7263189f69d26070cf157461c5a9 Mon Sep 17 00:00:00 2001 From: Dao Cong Tien Date: Mon, 1 Aug 2016 15:20:51 +0700 Subject: [PATCH 119/166] Minor docstring and unittests fixes for IPMIConsole This is the follow-up patch of the commit 857372a2269cdd0f8a1ae5b9e9f6e0ee193f01be. This fix follows some minor comments of Lucas and Ruby from patch set 29 at the review: https://review.openstack.org/#/c/293873/. The changes are: * Fix the docstring of the _start_console() in ipmitool.py to reflect the real parameters being passed to the method. * Fix few unittests to use self.assertFalse(.called) instead of self.assertFalse(.call_count) Change-Id: I25caa9202c8bc867973205d2bf9642064d3ba492 --- ironic/drivers/modules/ipmitool.py | 2 +- ironic/tests/unit/drivers/modules/test_ipmitool.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py index 173c56acf..578ea3568 100644 --- a/ironic/drivers/modules/ipmitool.py +++ b/ironic/drivers/modules/ipmitool.py @@ -1135,7 +1135,7 @@ class IPMIConsole(base.ConsoleInterface): def _start_console(self, driver_info, start_method): """Start a remote console for the node. - :param task: a task from TaskManager + :param driver_info: the parameters for accessing a node :param start_method: console_utils method to start console :raises: InvalidParameterValue if required ipmi parameters are missing :raises: PasswordFileFailedToCreate if unable to create a file diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py index 5fd3aee6b..1877d0c32 100644 --- a/ironic/tests/unit/drivers/modules/test_ipmitool.py +++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py @@ -193,7 +193,7 @@ class IPMIToolCheckInitTestCase(base.TestCase): ipmi.TMP_DIR_CHECKED = True ipmi.IPMISocatConsole() mock_support.assert_called_with(mock.ANY) - self.assertFalse(mock_check_dir.call_count) + self.assertFalse(mock_check_dir.called) @mock.patch.object(ipmi, '_is_option_supported', autospec=True) @@ -2236,7 +2236,7 @@ class IPMIToolSocatDriverTestCase(IPMIToolDriverTestCase): mock_stop.assert_called_once_with(self.node.uuid) mock_unlink.assert_called_once_with( ipmi._console_pwfile_path(self.node.uuid)) - self.assertFalse(mock_exec_stop.call_count) + self.assertFalse(mock_exec_stop.called) @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test__exec_stop_console(self, mock_exec): From 84bf48431c2b3cdee47dbb6bb46cf808ac79cf99 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 5 Apr 2016 21:03:50 +0000 Subject: [PATCH 120/166] Pass agent metrics config via conductor This adds agent config options for metrics as described in the spec, and allows those config options to be sent to IPA on lookup. We're configuring heartbeat timeout this way, and this change matches nicely with that style. Additionally, this sets heartbeat_timeout under the new config namespace for consistency, however, we'll allow the old way to be deprecated when the vendor_passthru for agent lookups is deprecated. Change-Id: I94d81b95feabe46999dbbc02522508cd542a89f8 Co-Authored-By: Josh Gachnang Partial-bug: #1526219 --- etc/ironic/ironic.conf.sample | 46 ++++++++++++++++ ironic/conf/__init__.py | 4 ++ ironic/conf/metrics.py | 55 +++++++++++++++++++ ironic/conf/metrics_statsd.py | 36 ++++++++++++ ironic/conf/opts.py | 2 + ironic/drivers/modules/agent_base_vendor.py | 23 ++++++++ .../drivers/modules/test_agent_base_vendor.py | 22 ++++++++ ...g-to-agent-on-lookup-6db9ae187c4e8151.yaml | 7 +++ 8 files changed, 195 insertions(+) create mode 100644 ironic/conf/metrics.py create mode 100644 ironic/conf/metrics_statsd.py create mode 100644 releasenotes/notes/pass-metrics-config-to-agent-on-lookup-6db9ae187c4e8151.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index b4b4937b6..998f29a7e 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1538,6 +1538,37 @@ [metrics] +# +# From ironic +# + +# Backend for the agent ramdisk to use for metrics. Default +# possible backends are "noop" and "statsd". (string value) +#agent_backend = noop + +# Prepend the hostname to all metric names sent by the agent +# ramdisk. The format of metric names is +# [global_prefix.][uuid.][host_name.]prefix.metric_name. +# (boolean value) +#agent_prepend_host = false + +# Prepend the node's Ironic uuid to all metric names sent by +# the agent ramdisk. The format of metric names is +# [global_prefix.][uuid.][host_name.]prefix.metric_name. +# (boolean value) +#agent_prepend_uuid = false + +# Split the prepended host value by "." and reverse it for +# metrics sent by the agent ramdisk (to better match the +# reverse hierarchical form of domain names). (boolean value) +#agent_prepend_host_reverse = true + +# Prefix all metric names sent by the agent ramdisk with this +# value. The format of metric names is +# [global_prefix.][uuid.][host_name.]prefix.metric_name. +# (string value) +#agent_global_prefix = + # # From ironic_lib.metrics # @@ -1566,6 +1597,21 @@ [metrics_statsd] +# +# From ironic +# + +# Host for the agent ramdisk to use with the statsd backend. +# This must be accessible from networks the agent is booted +# on. (string value) +#agent_statsd_host = localhost + +# Port for the agent ramdisk to use with the statsd backend. +# (port value) +# Minimum value: 0 +# Maximum value: 65535 +#agent_statsd_port = 8125 + # # From ironic_lib.metrics_statsd # diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index a9dd7fc99..1ec5ee33c 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -33,6 +33,8 @@ from ironic.conf import inspector from ironic.conf import ipmi from ironic.conf import irmc from ironic.conf import keystone +from ironic.conf import metrics +from ironic.conf import metrics_statsd from ironic.conf import neutron from ironic.conf import oneview from ironic.conf import seamicro @@ -61,6 +63,8 @@ inspector.register_opts(CONF) ipmi.register_opts(CONF) irmc.register_opts(CONF) keystone.register_opts(CONF) +metrics.register_opts(CONF) +metrics_statsd.register_opts(CONF) neutron.register_opts(CONF) oneview.register_opts(CONF) seamicro.register_opts(CONF) diff --git a/ironic/conf/metrics.py b/ironic/conf/metrics.py new file mode 100644 index 000000000..8f64c00fa --- /dev/null +++ b/ironic/conf/metrics.py @@ -0,0 +1,55 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 Rackspace, Inc. +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + + +opts = [ + # IPA config options: used by IPA to configure how it reports metric data + cfg.StrOpt('agent_backend', + default='noop', + help=_('Backend for the agent ramdisk to use for metrics. ' + 'Default possible backends are "noop" and "statsd".')), + cfg.BoolOpt('agent_prepend_host', + default=False, + help=_('Prepend the hostname to all metric names sent by the ' + 'agent ramdisk. The format of metric names is ' + '[global_prefix.][uuid.][host_name.]prefix.' + 'metric_name.')), + cfg.BoolOpt('agent_prepend_uuid', + default=False, + help=_('Prepend the node\'s Ironic uuid to all metric names ' + 'sent by the agent ramdisk. The format of metric names ' + 'is [global_prefix.][uuid.][host_name.]prefix.' + 'metric_name.')), + cfg.BoolOpt('agent_prepend_host_reverse', + default=True, + help=_('Split the prepended host value by "." and reverse it ' + 'for metrics sent by the agent ramdisk (to better ' + 'match the reverse hierarchical form of domain ' + 'names).')), + cfg.StrOpt('agent_global_prefix', + help=_('Prefix all metric names sent by the agent ramdisk ' + 'with this value. The format of metric names is ' + '[global_prefix.][uuid.][host_name.]prefix.' + 'metric_name.')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='metrics') diff --git a/ironic/conf/metrics_statsd.py b/ironic/conf/metrics_statsd.py new file mode 100644 index 000000000..c1d3fc11d --- /dev/null +++ b/ironic/conf/metrics_statsd.py @@ -0,0 +1,36 @@ +# Copyright 2016 Intel Corporation +# Copyright 2014 Rackspace, Inc. +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ + + +opts = [ + cfg.StrOpt('agent_statsd_host', + default='localhost', + help=_('Host for the agent ramdisk to use with the statsd ' + 'backend. This must be accessible from networks the ' + 'agent is booted on.')), + cfg.PortOpt('agent_statsd_port', + default=8125, + help=_('Port for the agent ramdisk to use with the statsd ' + 'backend.')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='metrics_statsd') diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 4eae792bb..18d608791 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -54,6 +54,8 @@ _opts = [ ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.conf.keystone.opts), ('neutron', ironic.conf.neutron.opts), + ('metrics', ironic.conf.metrics.opts), + ('metrics_statsd', ironic.conf.metrics_statsd.opts), ('oneview', ironic.conf.oneview.opts), ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index ec2092573..51a6fc40e 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -729,6 +729,10 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): Currently, we don't handle the instance where the agent doesn't have a matching node (i.e. a brand new, never been in Ironic node). + Additionally, we may pass on useful configurations to the agent, which + it would then be responsible for applying if relevant. Today these are + limited to heartbeat_timeout and metrics configuration. + kwargs should have the following format:: { @@ -781,8 +785,27 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): strutils.mask_password(ndict['driver_info'], "******")) return { + # heartbeat_timeout is a config, so moving it into the + # config namespace. Instead of a separate deprecation, + # this will die when the vendor_passthru version of + # lookup goes away. 'heartbeat_timeout': CONF.agent.heartbeat_timeout, 'node': ndict, + 'config': { + 'metrics': { + 'backend': CONF.metrics.agent_backend, + 'prepend_host': CONF.metrics.agent_prepend_host, + 'prepend_uuid': CONF.metrics.agent_prepend_uuid, + 'prepend_host_reverse': + CONF.metrics.agent_prepend_host_reverse, + 'global_prefix': CONF.metrics.agent_global_prefix + }, + 'metrics_statsd': { + 'statsd_host': CONF.metrics_statsd.agent_statsd_host, + 'statsd_port': CONF.metrics_statsd.agent_statsd_port + }, + 'heartbeat_timeout': CONF.agent.heartbeat_timeout + } } def _get_interfaces(self, inventory): diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 389b9f0bc..78791bc58 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -20,6 +20,7 @@ import time import types import mock +from oslo_config import cfg from ironic.common import boot_devices from ironic.common import exception @@ -37,6 +38,8 @@ from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as object_utils +CONF = cfg.CONF + INSTANCE_INFO = db_utils.get_test_agent_instance_info() DRIVER_INFO = db_utils.get_test_agent_driver_info() DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info() @@ -114,10 +117,29 @@ class TestBaseAgentVendor(db_base.DbTestCase): expected = copy.deepcopy(self.node.as_dict()) if not show_password: expected['driver_info']['ipmi_password'] = '******' + + self.config(agent_backend='statsd', group='metrics') + expected_metrics = { + 'metrics': { + 'backend': 'statsd', + 'prepend_host': CONF.metrics.agent_prepend_host, + 'prepend_uuid': CONF.metrics.agent_prepend_uuid, + 'prepend_host_reverse': + CONF.metrics.agent_prepend_host_reverse, + 'global_prefix': CONF.metrics.agent_global_prefix + }, + 'metrics_statsd': { + 'statsd_host': CONF.metrics_statsd.agent_statsd_host, + 'statsd_port': CONF.metrics_statsd.agent_statsd_port + }, + 'heartbeat_timeout': CONF.agent.heartbeat_timeout + } + find_mock.return_value = self.node with task_manager.acquire(self.context, self.node.uuid) as task: node = self.passthru.lookup(task.context, **kwargs) self.assertEqual(expected, node['node']) + self.assertEqual(expected_metrics, node['config']) def test_lookup_v2_show_password(self): self._test_lookup_v2(show_password=True) diff --git a/releasenotes/notes/pass-metrics-config-to-agent-on-lookup-6db9ae187c4e8151.yaml b/releasenotes/notes/pass-metrics-config-to-agent-on-lookup-6db9ae187c4e8151.yaml new file mode 100644 index 000000000..08b908c20 --- /dev/null +++ b/releasenotes/notes/pass-metrics-config-to-agent-on-lookup-6db9ae187c4e8151.yaml @@ -0,0 +1,7 @@ +--- +features: + - Adds the ability for ironic conductor to pass + configurations for agent metrics on lookup. + When paired with a sufficiently new ironic + python agent, this will configure the metrics + backends. From 92edcabcf888feb61f5b0f0e6b69b3c88c9175f8 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 2 Aug 2016 12:30:46 -0700 Subject: [PATCH 121/166] Timing metrics for agent deploy classes This change adds timing metrics for all agent deploy classes. The only methods we are not emitting metrics for are those that return static results. Change-Id: I96290be44a60635f3bd68e7a43549e2f67a19f12 Related-bug: #1526219 --- ironic/drivers/modules/agent.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index 45222a3b0..bf93c4902 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import excutils from oslo_utils import units @@ -39,6 +40,7 @@ from ironic.drivers.modules import deploy_utils LOG = log.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) REQUIRED_PROPERTIES = { 'deploy_kernel': _('UUID (from Glance) of the deployment kernel. ' @@ -69,6 +71,7 @@ PARTITION_IMAGE_LABELS = ('kernel', 'ramdisk', 'root_gb', 'root_mb', 'swap_mb', 'deploy_boot_mode') +@METRICS.timer('build_instance_info_for_deploy') def build_instance_info_for_deploy(task): """Build instance_info necessary for deploying to a node. @@ -118,6 +121,7 @@ def build_instance_info_for_deploy(task): return instance_info +@METRICS.timer('check_image_size') def check_image_size(task, image_source): """Check if the requested image is larger than the ram size. @@ -155,6 +159,7 @@ def check_image_size(task, image_source): raise exception.InvalidParameterValue(msg) +@METRICS.timer('validate_image_proxies') def validate_image_proxies(node): """Check that the provided proxy parameters are valid. @@ -193,6 +198,7 @@ def validate_image_proxies(node): class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): + @METRICS.timer('AgentDeployMixin.deploy_has_started') def deploy_has_started(self, task): commands = self._client.get_commands_status(task.node) @@ -202,6 +208,7 @@ class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): return True return False + @METRICS.timer('AgentDeployMixin.deploy_is_done') def deploy_is_done(self, task): commands = self._client.get_commands_status(task.node) if not commands: @@ -219,6 +226,7 @@ class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): return False + @METRICS.timer('AgentDeployMixin.continue_deploy') @task_manager.require_exclusive_lock def continue_deploy(self, task): task.process_event('resume') @@ -295,6 +303,7 @@ class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): return return result + @METRICS.timer('AgentDeployMixin.check_deploy_success') def check_deploy_success(self, node): # should only ever be called after we've validated that # the prepare_image command is complete @@ -302,6 +311,7 @@ class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): if command['command_status'] == 'FAILED': return command['command_error'] + @METRICS.timer('AgentDeployMixin.reboot_to_instance') def reboot_to_instance(self, task): task.process_event('resume') node = task.node @@ -353,6 +363,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): """ return COMMON_PROPERTIES + @METRICS.timer('AgentDeploy.validate') def validate(self, task): """Validate the driver-specific Node deployment info. @@ -393,6 +404,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): validate_image_proxies(node) + @METRICS.timer('AgentDeploy.deploy') @task_manager.require_exclusive_lock def deploy(self, task): """Perform a deployment to a node. @@ -408,6 +420,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): manager_utils.node_power_action(task, states.REBOOT) return states.DEPLOYWAIT + @METRICS.timer('AgentDeploy.tear_down') @task_manager.require_exclusive_lock def tear_down(self, task): """Tear down a previous deployment on the task's node. @@ -426,6 +439,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): return states.DELETED + @METRICS.timer('AgentDeploy.prepare') @task_manager.require_exclusive_lock def prepare(self, task): """Prepare the deployment environment for this node. @@ -457,6 +471,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): deploy_opts = deploy_utils.build_agent_options(node) task.driver.boot.prepare_ramdisk(task, deploy_opts) + @METRICS.timer('AgentDeploy.clean_up') @task_manager.require_exclusive_lock def clean_up(self, task): """Clean up the deployment environment for this node. @@ -490,6 +505,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): """ pass + @METRICS.timer('AgentDeploy.get_clean_steps') def get_clean_steps(self, task): """Get the list of clean steps from the agent. @@ -506,6 +522,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): task, interface='deploy', override_priorities=new_priorities) + @METRICS.timer('AgentDeploy.execute_clean_step') def execute_clean_step(self, task, step): """Execute a clean step asynchronously on the agent. @@ -517,6 +534,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): """ return deploy_utils.agent_execute_clean_step(task, step) + @METRICS.timer('AgentDeploy.prepare_cleaning') def prepare_cleaning(self, task): """Boot into the agent to prepare for cleaning. @@ -530,6 +548,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface): return deploy_utils.prepare_inband_cleaning( task, manage_boot=CONF.agent.manage_agent_boot) + @METRICS.timer('AgentDeploy.tear_down_cleaning') def tear_down_cleaning(self, task): """Clean up the PXE and DHCP files after cleaning. @@ -556,6 +575,7 @@ class AgentRAID(base.RAIDInterface): """Return the properties of the interface.""" return {} + @METRICS.timer('AgentRAID.create_configuration') @base.clean_step(priority=0) def create_configuration(self, task, create_root_volume=True, @@ -652,6 +672,7 @@ class AgentRAID(base.RAIDInterface): raid.update_raid_info(task.node, clean_result) + @METRICS.timer('AgentRAID.delete_configuration') @base.clean_step(priority=0) def delete_configuration(self, task): """Deletes RAID configuration on the given node. From 204a6b2b9038d904b1cc583288e73b32cb98900d Mon Sep 17 00:00:00 2001 From: Aline Bousquet Date: Wed, 3 Aug 2016 11:09:45 +0100 Subject: [PATCH 122/166] Add metrics for the ipmitool driver Add timing metrics for the methods in the ipmitool driver. Change-Id: I5f0c9421fc186f6ecc927f96abc5bed90b87dfc1 --- ironic/drivers/modules/ipmitool.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py index 578ea3568..fbc031237 100644 --- a/ironic/drivers/modules/ipmitool.py +++ b/ironic/drivers/modules/ipmitool.py @@ -36,6 +36,7 @@ import subprocess import tempfile import time +from ironic_lib import metrics_utils from ironic_lib import utils as ironic_utils from oslo_concurrency import processutils from oslo_config import cfg @@ -69,6 +70,8 @@ CONF.import_opt('min_command_interval', LOG = logging.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER'] VALID_PROTO_VERSIONS = ('2.0', '1.5') @@ -650,6 +653,7 @@ def _parse_ipmi_sensors_data(node, sensors_data): return sensors_data_dict +@METRICS.timer('send_raw') @task_manager.require_exclusive_lock def send_raw(task, raw_bytes): """Send raw bytes to the BMC. Bytes should be a string of bytes. @@ -682,6 +686,7 @@ def send_raw(task, raw_bytes): return out, err +@METRICS.timer('dump_sdr') def dump_sdr(task, file_path): """Dump SDR data to a file. @@ -747,6 +752,7 @@ class IPMIPower(base.PowerInterface): def get_properties(self): return COMMON_PROPERTIES + @METRICS.timer('IPMIPower.validate') def validate(self, task): """Validate driver_info for ipmitool driver. @@ -763,6 +769,7 @@ class IPMIPower(base.PowerInterface): # This is a temporary measure to mitigate problems while # 1314954 and 1314961 are resolved. + @METRICS.timer('IPMIPower.get_power_state') def get_power_state(self, task): """Get the current power state of the task's node. @@ -777,6 +784,7 @@ class IPMIPower(base.PowerInterface): driver_info = _parse_driver_info(task.node) return _power_status(driver_info) + @METRICS.timer('IPMIPower.set_power_state') @task_manager.require_exclusive_lock def set_power_state(self, task, pstate): """Turn the power on or off. @@ -804,6 +812,7 @@ class IPMIPower(base.PowerInterface): if state != pstate: raise exception.PowerStateFailure(pstate=pstate) + @METRICS.timer('IPMIPower.reboot') @task_manager.require_exclusive_lock def reboot(self, task): """Cycles the power to the task's node. @@ -839,6 +848,7 @@ class IPMIManagement(base.ManagementInterface): "the system path when checking ipmitool version")) _check_temp_dir() + @METRICS.timer('IPMIManagement.validate') def validate(self, task): """Check that 'driver_info' contains IPMI credentials. @@ -864,6 +874,7 @@ class IPMIManagement(base.ManagementInterface): return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM, boot_devices.BIOS, boot_devices.SAFE] + @METRICS.timer('IPMIManagement.set_boot_device') @task_manager.require_exclusive_lock def set_boot_device(self, task, device, persistent=False): """Set the boot device for the task's node. @@ -915,6 +926,7 @@ class IPMIManagement(base.ManagementInterface): {'node': driver_info['uuid'], 'cmd': cmd, 'error': e}) raise exception.IPMIFailure(cmd=cmd) + @METRICS.timer('IPMIManagement.get_boot_device') def get_boot_device(self, task): """Get the current boot device for the task's node. @@ -976,6 +988,7 @@ class IPMIManagement(base.ManagementInterface): response['persistent'] = 'Options apply to all future boots' in out return response + @METRICS.timer('IPMIManagement.get_sensors_data') def get_sensors_data(self, task): """Get sensors data. @@ -1013,6 +1026,7 @@ class VendorPassthru(base.VendorInterface): "the system path when checking ipmitool version")) _check_temp_dir() + @METRICS.timer('VendorPassthru.send_raw') @base.passthru(['POST']) @task_manager.require_exclusive_lock def send_raw(self, task, http_method, raw_bytes): @@ -1028,6 +1042,7 @@ class VendorPassthru(base.VendorInterface): """ send_raw(task, raw_bytes) + @METRICS.timer('VendorPassthru.bmc_reset') @base.passthru(['POST']) @task_manager.require_exclusive_lock def bmc_reset(self, task, http_method, warm=True): @@ -1068,6 +1083,7 @@ class VendorPassthru(base.VendorInterface): def get_properties(self): return COMMON_PROPERTIES + @METRICS.timer('VendorPassthru.validate') def validate(self, task, method, **kwargs): """Validate vendor-specific actions. @@ -1112,6 +1128,7 @@ class IPMIConsole(base.ConsoleInterface): d.update(CONSOLE_PROPERTIES) return d + @METRICS.timer('IPMIConsole.validate') def validate(self, task): """Validate the Node console info. @@ -1174,6 +1191,7 @@ class IPMIConsole(base.ConsoleInterface): class IPMIShellinaboxConsole(IPMIConsole): """A ConsoleInterface that uses ipmitool and shellinabox.""" + @METRICS.timer('IPMIShellinaboxConsole.start_console') def start_console(self, task): """Start a remote console for the node. @@ -1189,6 +1207,7 @@ class IPMIShellinaboxConsole(IPMIConsole): self._start_console(driver_info, console_utils.start_shellinabox_console) + @METRICS.timer('IPMIShellinaboxConsole.stop_console') def stop_console(self, task): """Stop the remote console session for the node. @@ -1201,6 +1220,7 @@ class IPMIShellinaboxConsole(IPMIConsole): ironic_utils.unlink_without_raise( _console_pwfile_path(task.node.uuid)) + @METRICS.timer('IPMIShellinaboxConsole.get_console') def get_console(self, task): """Get the type and connection information about the console.""" driver_info = _parse_driver_info(task.node) @@ -1211,6 +1231,7 @@ class IPMIShellinaboxConsole(IPMIConsole): class IPMISocatConsole(IPMIConsole): """A ConsoleInterface that uses ipmitool and socat.""" + @METRICS.timer('IPMISocatConsole.start_console') def start_console(self, task): """Start a remote console for the node. @@ -1232,6 +1253,7 @@ class IPMISocatConsole(IPMIConsole): pass self._start_console(driver_info, console_utils.start_socat_console) + @METRICS.timer('IPMISocatConsole.stop_console') def stop_console(self, task): """Stop the remote console session for the node. @@ -1250,6 +1272,7 @@ class IPMISocatConsole(IPMIConsole): cmd = "sol deactivate" _exec_ipmitool(driver_info, cmd, check_exit_code=[0, 1]) + @METRICS.timer('IPMISocatConsole.get_console') def get_console(self, task): """Get the type and connection information about the console. From bacc872b72d18c344d20db1ead13fb6204a82ba7 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 3 Aug 2016 11:20:29 +0100 Subject: [PATCH 123/166] Extend the "configuring ironic-api behind mod_wsgi" guide The etc/apache2/ironic configuration have the logs file path pointing to /var/log/apache2/ which does not exist in the Red Hat systems (the equivalent is /var/log/httpd). This patch extend the documentation to point that out to the operator to look at these paths when setting up the ironic-api to run behing Apache mod_wsgi. Related-Bug: #1608252 Change-Id: I591748245af885eeb782df82eaa5f33e123f8e06 --- doc/source/deploy/install-guide.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index cf8804b23..4743f9de4 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -424,6 +424,9 @@ Bare Metal service comes with an example file for configuring the - Modify the ``Directory`` directive to set the path to the Ironic API code. + - Modify the ``ErrorLog`` and ``CustomLog`` to redirect the logs + to the right directory (on Red Hat systems this is usually under + /var/log/httpd). 4. Enable the apache ``ironic`` in site and reload:: From ced53d561f79a23a37042b4d4e3fb762cc3b1995 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Wed, 3 Aug 2016 08:05:06 -0400 Subject: [PATCH 124/166] Update nova scheduler_host_manager config docs Nova no longer allows the full python path to the host manager class. Update the docs to be correct. Change-Id: I54f203399dff5c9496516d9260e5f43bcc47b022 --- doc/source/deploy/install-guide.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index cf8804b23..c671a3bb2 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -466,8 +466,8 @@ Compute service's controller nodes and compute nodes.* firewall_driver=nova.virt.firewall.NoopFirewallDriver # The scheduler host manager class to use (string value) - #scheduler_host_manager=nova.scheduler.host_manager.HostManager - scheduler_host_manager=nova.scheduler.ironic_host_manager.IronicHostManager + #scheduler_host_manager=host_manager + scheduler_host_manager=ironic_host_manager # Virtual ram to physical ram allocation ratio which affects # all ram filters. This configuration specifies a global ratio From be8577b32c22e16ca055bb68dd8e0ca5ab73e3ca Mon Sep 17 00:00:00 2001 From: Akilan Pughazhendi Date: Mon, 25 Jul 2016 15:31:50 +0000 Subject: [PATCH 125/166] Updating dev quickstart to include compatiblity for newest distros Updates ironic dev quickstart to include compatability with the latest versions of python, ubuntu and fedora Change-Id: I14b0668e228c47db36970df14a774469f6d966d1 Closes-bug: 1603575 --- doc/source/dev/dev-quickstart.rst | 51 +++++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index 79fa7ece8..d2b6206fb 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -8,19 +8,22 @@ This is a quick walkthrough to get you started developing code for Ironic. This assumes you are already familiar with submitting code reviews to an OpenStack project. -The gate currently runs the unit tests under both -Python 2.7 and Python 3.4. It is strongly encouraged to run the unit tests -locally under one, the other, or both prior to submitting a patch. +The gate currently runs the unit tests under Python 2.7, Python 3.4 +and Python 3.5. It is strongly encouraged to run the unit tests locally prior +to submitting a patch. .. note:: Do not run unit tests on the same environment as devstack due to conflicting configuration with system dependencies. +.. note:: + This document is compatible with Python (3.5), Ubuntu (16.04) and Fedora (23). + .. seealso:: http://docs.openstack.org/infra/manual/developers.html#development-workflow -Install prerequisites (for python 2.7): +Install prerequisites for python 2.7: - Ubuntu/Debian:: @@ -50,13 +53,26 @@ Install prerequisites (for python 2.7): ``_. -To use Python 3.4, follow the instructions above to install prerequisites and +If you need Python 3.4, follow the instructions above to install prerequisites for 2.7 and additionally install the following packages: -- On Ubuntu/Debian:: +- On Ubuntu 14.x/Debian:: sudo apt-get install python3-dev +- On Ubuntu 16.04:: + + wget https://www.python.org/ftp/python/3.4.4/Python-3.4.4.tgz + sudo tar xzf Python-3.4.4.tgz + cd Python-3.4.4 + sudo ./configure + sudo make altinstall + + # This will install Python 3.4 without replacing 3.5. To check if 3.4 was installed properly + run this command: + + python3.4 -V + - On Fedora 21/RHEL7/CentOS7:: sudo yum install python3-devel @@ -65,6 +81,29 @@ additionally install the following packages: sudo dnf install python3-devel +If you need Python 3.5, follow the instructions for installing prerequisites for Python 2.7 and +run the following commands. + +- On Ubuntu 14.04:: + + wget https://www.python.org/ftp/python/3.5.2/Python-3.5.2.tgz + sudo tar xzf Python-3.5.2.tgz + cd Python-3.5.2 + sudo ./configure + sudo make altinstall + + # This will install Python 3.5 without replacing 3.4. To check if 3.5 was installed properly + run this command: + + python3.5 -V + +- On Fedora 23:: + + sudo dnf install -y dnf-plugins-core + sudo dnf copr enable -y mstuchli/Python3.5 + dnf install -y python35-python3 + + If your distro has at least tox 1.8, use similar command to install ``python-tox`` package. Otherwise install this on all distros:: From f9ea26ebf33118cfc179cc183588df2a829db4b6 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Wed, 23 Mar 2016 17:54:59 +0200 Subject: [PATCH 126/166] Migrate to using keystoneauth Sessions We currently construct Keystone client objects directly, which is no longer the preferred way. Instead, we should be using Sessions which allows use of different auth plugins. This change attempts to migrate our Keystone usage to this model. Additionally, we currently rely on the imported keystonemiddleware auth_token's configuration for all of the Keystone credentials used by the Ironic service user. This is bad, as that config is internal to that library and may change at any time. Also, the service user may be using different credentials than the token validator. This refactors the keystone module to use Sessions. It attempts to provide some backward compat for users who have not yet updated their config, by falling back to the authtoken config section when required. Operators impact: - Authentification parameters for each service now should specified in the corresponding config section for this service ([glance], [neutron] [swift], [inspector]). This includes providing both Keystone session-related options (timeout, SSL-related ones) and authentification options (`auth_type`, `auth_url` and proper options for the auth plugin). - New config section `service_catalog` for Ironic service user credentials, used to resolve Ironic API URL from Keystone catalog. - If loading from the service config section fails, an attempt is made to use respective options from [keystone_authtoken] section as a fall-back for backward compatibility. Implementation details: - using keystoneauth1 library instead of keystoneclient - For each service the keystone session is created only once and is reused further. This lowers the number of authentification requests made to Keystone but implies that only auth plugins that can re-authentificate themselves can be used (so no *Token plugins). This patch does not update the DevStack plugin, in order to test backwards compatibility with old config options. DevStack plugin will be modified in a subsequent patch. Change-Id: I166eebefc1e1335a1a7b632149cf6441512e9d5e Closes-Bug: #1422632 Related-Bug: #1418341 Related-Bug: #1494776 Co-Authored-By: Adam Gandelman --- etc/ironic/ironic.conf.sample | 584 +++++++++++++++--- ironic/cmd/conductor.py | 30 + ironic/common/image_service.py | 14 +- ironic/common/keystone.py | 173 +++--- ironic/common/neutron.py | 56 +- ironic/common/service.py | 1 - ironic/common/swift.py | 81 +-- ironic/conf/__init__.py | 2 + ironic/conf/auth.py | 79 +++ ironic/conf/glance.py | 6 + ironic/conf/inspector.py | 6 + ironic/conf/neutron.py | 13 +- ironic/conf/opts.py | 9 +- ironic/conf/service_catalog.py | 33 + ironic/conf/swift.py | 6 + ironic/drivers/modules/deploy_utils.py | 36 +- ironic/drivers/modules/inspector.py | 12 +- ironic/drivers/modules/iscsi_deploy.py | 13 +- .../tests/unit/common/test_image_service.py | 30 +- ironic/tests/unit/common/test_keystone.py | 252 ++++---- ironic/tests/unit/common/test_neutron.py | 178 +++--- ironic/tests/unit/common/test_swift.py | 69 +-- ironic/tests/unit/conf/__init__.py | 0 ironic/tests/unit/conf/test_auth.py | 70 +++ .../unit/drivers/modules/test_deploy_utils.py | 46 +- .../unit/drivers/modules/test_inspector.py | 8 +- .../unit/drivers/modules/test_iscsi_deploy.py | 33 +- .../notes/keystone-auth-3155762c524e44df.yaml | 43 ++ requirements.txt | 2 +- 29 files changed, 1287 insertions(+), 598 deletions(-) create mode 100644 ironic/conf/auth.py create mode 100644 ironic/conf/service_catalog.py create mode 100644 ironic/tests/unit/conf/__init__.py create mode 100644 ironic/tests/unit/conf/test_auth.py create mode 100644 releasenotes/notes/keystone-auth-3155762c524e44df.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 998f29a7e..62724b44f 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -977,9 +977,141 @@ # value) #allowed_direct_url_schemes = -# The secret token given to Swift to allow temporary URL -# downloads. Required for temporary URLs. (string value) -#swift_temp_url_key = +# Authentication URL (string value) +#auth_url = + +# Authentication strategy to use when connecting to glance. +# (string value) +# Allowed values: keystone, noauth +#auth_strategy = keystone + +# Authentication type to load (string value) +# Deprecated group/name - [glance]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying +# HTTPs connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will +# be used for both the user and project domain in v3 and +# ignored in v2 authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. +# It will be used for both the user and project domain in v3 +# and ignored in v2 authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Allow to perform insecure SSL (https) requests to glance. +# (boolean value) +#glance_api_insecure = false + +# A list of the glance api servers available to ironic. Prefix +# with https:// for SSL-based glance API servers. Format is +# [hostname|IP]:port. (list value) +#glance_api_servers = + +# Optional path to a CA certificate bundle to be used to +# validate the SSL certificate served by glance. It is used +# when glance_api_insecure is set to False. (string value) +#glance_cafile = + +# Default glance hostname or IP address. (string value) +#glance_host = $my_ip + +# Number of retries when downloading an image from glance. +# (integer value) +#glance_num_retries = 0 + +# Default glance port. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#glance_port = 9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +# Allowed values: http, https +#glance_protocol = http + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [glance]/tenant-id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [glance]/tenant-name +#project_name = + +# The account that Glance uses to communicate with Swift. The +# format is "AUTH_uuid". "uuid" is the UUID for the account +# configured in the glance-api.conf. Required for temporary +# URLs when Glance backend is Swift. For example: +# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary +# URL format: +# "endpoint_url/api_version/[account/]container/object_id" +# (string value) +#swift_account = + +# The Swift API version to create a temporary URL for. +# Defaults to "v1". Swift temporary URL format: +# "endpoint_url/api_version/[account/]container/object_id" +# (string value) +#swift_api_version = v1 + +# The Swift container Glance is configured to store its images +# in. Defaults to "glance", which is the default in glance- +# api.conf. Swift temporary URL format: +# "endpoint_url/api_version/[account/]container/object_id" +# (string value) +#swift_container = glance + +# The "endpoint" (scheme, hostname, optional port) for the +# Swift URL of the form +# "endpoint_url/api_version/[account/]container/object_id". Do +# not include trailing "/". For example, use +# "https://swift.example.com". If using RADOS Gateway, +# endpoint may also contain /swift path; if it does not, it +# will be appended. Required for temporary URLs. (string +# value) +#swift_endpoint_url = + +# This should match a config by the same name in the Glance +# configuration file. When set to 0, a single-tenant store +# will only use one container to store all images. When set to +# an integer value between 1 and 32, a single-tenant store +# will use multiple containers to store images, and this value +# will determine how many containers are created. (integer +# value) +#swift_store_multiple_containers_seed = 0 + +# Whether to cache generated Swift temporary URLs. Setting it +# to true is only useful when an image caching proxy is used. +# Defaults to False. (boolean value) +#swift_temp_url_cache_enabled = false # The length of time in seconds that the temporary URL will be # valid for. Defaults to 20 minutes. If some deploys get a 401 @@ -989,11 +1121,6 @@ # swift_temp_url_expected_download_start_delay (integer value) #swift_temp_url_duration = 1200 -# Whether to cache generated Swift temporary URLs. Setting it -# to true is only useful when an image caching proxy is used. -# Defaults to False. (boolean value) -#swift_temp_url_cache_enabled = false - # This is the delay (in seconds) from the time of the deploy # request (when the Swift temporary URL is generated) to when # the IPA ramdisk starts up and URL is used for the image @@ -1007,47 +1134,9 @@ # Minimum value: 0 #swift_temp_url_expected_download_start_delay = 0 -# The "endpoint" (scheme, hostname, optional port) for the -# Swift URL of the form -# "endpoint_url/api_version/[account/]container/object_id". Do -# not include trailing "/". For example, use -# "https://swift.example.com". If using RADOS Gateway, -# endpoint may also contain /swift path; if it does not, it -# will be appended. Required for temporary URLs. (string -# value) -#swift_endpoint_url = - -# The Swift API version to create a temporary URL for. -# Defaults to "v1". Swift temporary URL format: -# "endpoint_url/api_version/[account/]container/object_id" -# (string value) -#swift_api_version = v1 - -# The account that Glance uses to communicate with Swift. The -# format is "AUTH_uuid". "uuid" is the UUID for the account -# configured in the glance-api.conf. Required for temporary -# URLs when Glance backend is Swift. For example: -# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary -# URL format: -# "endpoint_url/api_version/[account/]container/object_id" -# (string value) -#swift_account = - -# The Swift container Glance is configured to store its images -# in. Defaults to "glance", which is the default in glance- -# api.conf. Swift temporary URL format: -# "endpoint_url/api_version/[account/]container/object_id" -# (string value) -#swift_container = glance - -# This should match a config by the same name in the Glance -# configuration file. When set to 0, a single-tenant store -# will only use one container to store all images. When set to -# an integer value between 1 and 32, a single-tenant store -# will use multiple containers to store images, and this value -# will determine how many containers are created. (integer -# value) -#swift_store_multiple_containers_seed = 0 +# The secret token given to Swift to allow temporary URL +# downloads. Required for temporary URLs. (string value) +#swift_temp_url_key = # Type of endpoint to use for temporary URLs. If the Glance # backend is Swift, use "swift"; if it is CEPH with RADOS @@ -1055,41 +1144,30 @@ # Allowed values: swift, radosgw #temp_url_endpoint_type = swift -# Default glance hostname or IP address. (string value) -#glance_host = $my_ip +# Tenant ID (string value) +#tenant_id = -# Default glance port. (port value) -# Minimum value: 0 -# Maximum value: 65535 -#glance_port = 9292 +# Tenant Name (string value) +#tenant_name = -# Default protocol to use when connecting to glance. Set to -# https for SSL. (string value) -# Allowed values: http, https -#glance_protocol = http +# Timeout value for http requests (integer value) +#timeout = -# A list of the glance api servers available to ironic. Prefix -# with https:// for SSL-based glance API servers. Format is -# [hostname|IP]:port. (list value) -#glance_api_servers = +# Trust ID (string value) +#trust_id = -# Allow to perform insecure SSL (https) requests to glance. -# (boolean value) -#glance_api_insecure = false +# User's domain id (string value) +#user_domain_id = -# Number of retries when downloading an image from glance. -# (integer value) -#glance_num_retries = 0 +# User's domain name (string value) +#user_domain_name = -# Authentication strategy to use when connecting to glance. -# (string value) -# Allowed values: keystone, noauth -#auth_strategy = keystone +# User id (string value) +#user_id = -# Optional path to a CA certificate bundle to be used to -# validate the SSL certificate served by glance. It is used -# when glance_api_insecure is set to False. (string value) -#glance_cafile = +# Username (string value) +# Deprecated group/name - [glance]/user-name +#username = [iboot] @@ -1189,10 +1267,63 @@ # From ironic # +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [inspector]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying +# HTTPs connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will +# be used for both the user and project domain in v3 and +# ignored in v2 authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. +# It will be used for both the user and project domain in v3 +# and ignored in v2 authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + # whether to enable inspection using ironic-inspector (boolean # value) #enabled = false +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [inspector]/tenant-id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [inspector]/tenant-name +#project_name = + # ironic-inspector HTTP endpoint. If this is not set, the # ironic-inspector client default (http://127.0.0.1:5050) will # be used. (string value) @@ -1202,6 +1333,31 @@ # (integer value) #status_check_period = 60 +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [inspector]/user-name +#username = + [ipmi] @@ -1631,21 +1787,8 @@ # From ironic # -# URL for connecting to neutron. (string value) -#url = http://$my_ip:9696 - -# Timeout value for connecting to neutron in seconds. (integer -# value) -#url_timeout = 30 - -# Delay value to wait for Neutron agents to setup sufficient -# DHCP configuration for port. (integer value) -# Minimum value: 0 -#port_setup_delay = 0 - -# Client retries in the case of a failed request. (integer -# value) -#retries = 3 +# Authentication URL (string value) +#auth_url = # Authentication strategy to use when connecting to neutron. # Running neutron in noauth mode (related to but not affected @@ -1654,17 +1797,111 @@ # Allowed values: keystone, noauth #auth_strategy = keystone +# Authentication type to load (string value) +# Deprecated group/name - [neutron]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying +# HTTPs connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + # Neutron network UUID for the ramdisk to be booted into for # cleaning nodes. Required for "neutron" network interface. It # is also required if cleaning nodes when using "flat" network # interface or "neutron" DHCP provider. (string value) #cleaning_network_uuid = +# Optional domain ID to use with v3 and v2 parameters. It will +# be used for both the user and project domain in v3 and +# ignored in v2 authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. +# It will be used for both the user and project domain in v3 +# and ignored in v2 authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Delay value to wait for Neutron agents to setup sufficient +# DHCP configuration for port. (integer value) +# Minimum value: 0 +#port_setup_delay = 0 + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [neutron]/tenant-id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [neutron]/tenant-name +#project_name = + # Neutron network UUID for the ramdisk to be booted into for # provisioning nodes. Required for "neutron" network # interface. (string value) #provisioning_network_uuid = +# Client retries in the case of a failed request. (integer +# value) +#retries = 3 + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# URL for connecting to neutron. Default value translates to +# 'http://$my_ip:9696' when auth_strategy is 'noauth', and to +# discovery from Keystone catalog when auth_strategy is +# 'keystone'. (string value) +#url = + +# Timeout value for connecting to neutron in seconds. (integer +# value) +#url_timeout = 30 + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [neutron]/user-name +#username = + [oneview] @@ -2213,6 +2450,91 @@ #action_timeout = 10 +[service_catalog] + +# +# From ironic +# + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [service_catalog]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying +# HTTPs connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will +# be used for both the user and project domain in v3 and +# ignored in v2 authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. +# It will be used for both the user and project domain in v3 +# and ignored in v2 authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [service_catalog]/tenant-id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [service_catalog]/tenant-name +#project_name = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [service_catalog]/user-name +#username = + + [snmp] # @@ -2285,10 +2607,88 @@ # From ironic # +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [swift]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying +# HTTPs connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will +# be used for both the user and project domain in v3 and +# ignored in v2 authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. +# It will be used for both the user and project domain in v3 +# and ignored in v2 authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [swift]/tenant-id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [swift]/tenant-name +#project_name = + # Maximum number of times to retry a Swift request, before # failing. (integer value) #swift_max_retries = 2 +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [swift]/user-name +#username = + [virtualbox] diff --git a/ironic/cmd/conductor.py b/ironic/cmd/conductor.py index 39718e502..794b52ce3 100644 --- a/ironic/cmd/conductor.py +++ b/ironic/cmd/conductor.py @@ -22,12 +22,40 @@ The Ironic Management Service import sys from oslo_config import cfg +from oslo_log import log from oslo_service import service +from ironic.common.i18n import _LW from ironic.common import service as ironic_service +from ironic.conf import auth CONF = cfg.CONF +LOG = log.getLogger(__name__) + +SECTIONS_WITH_AUTH = ( + 'service_catalog', 'neutron', 'glance', 'swift', 'inspector') + + +# TODO(pas-ha) remove this check after deprecation period +def _check_auth_options(conf): + missing = [] + for section in SECTIONS_WITH_AUTH: + if not auth.load_auth(conf, section): + missing.append('[%s]' % section) + if missing: + link = "http://docs.openstack.org/releasenotes/ironic/newton.html" + LOG.warning(_LW("Failed to load authentification credentials from " + "%(missing)s config sections. " + "The corresponding service users' credentials " + "will be loaded from [%(old)s] config section, " + "which is deprecated for this purpose. " + "Please update the config file. " + "For more info see %(link)s."), + dict(missing=", ".join(missing), + old=auth.LEGACY_SECTION, + link=link)) + def main(): # Parse config file and command line options, then start logging @@ -37,6 +65,8 @@ def main(): 'ironic.conductor.manager', 'ConductorManager') + _check_auth_options(CONF) + launcher = service.launch(CONF, mgr) launcher.wait() diff --git a/ironic/common/image_service.py b/ironic/common/image_service.py index 4e219715e..6016ac11e 100644 --- a/ironic/common/image_service.py +++ b/ironic/common/image_service.py @@ -35,9 +35,14 @@ from ironic.conf import CONF IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb -# TODO(rama_y): This import should be removed, -# once https://review.openstack.org/#/c/309070 is merged. -CONF.import_opt('my_ip', 'ironic.netconf') +_GLANCE_SESSION = None + + +def _get_glance_session(): + global _GLANCE_SESSION + if not _GLANCE_SESSION: + _GLANCE_SESSION = keystone.get_session('glance') + return _GLANCE_SESSION def import_versioned_module(version, submodule=None): @@ -52,7 +57,8 @@ def GlanceImageService(client=None, version=1, context=None): service_class = getattr(module, 'GlanceImageService') if (context is not None and CONF.glance.auth_strategy == 'keystone' and not context.auth_token): - context.auth_token = keystone.get_admin_auth_token() + session = _get_glance_session() + context.auth_token = keystone.get_admin_auth_token(session) return service_class(client, version, context) diff --git a/ironic/common/keystone.py b/ironic/common/keystone.py index 8f62123b3..9d79ab5db 100644 --- a/ironic/common/keystone.py +++ b/ironic/common/keystone.py @@ -12,132 +12,125 @@ # License for the specific language governing permissions and limitations # under the License. -from keystoneclient import exceptions as ksexception -from oslo_concurrency import lockutils -from six.moves.urllib import parse +"""Central place for handling Keystone authorization and service lookup.""" + +from keystoneauth1 import exceptions as kaexception +from keystoneauth1 import loading as kaloading +from oslo_log import log as logging +import six +from six.moves.urllib import parse # for legacy options loading only from ironic.common import exception from ironic.common.i18n import _ +from ironic.common.i18n import _LE +from ironic.conf import auth as ironic_auth from ironic.conf import CONF -CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token') -_KS_CLIENT = None +LOG = logging.getLogger(__name__) +# FIXME(pas-ha): for backward compat with legacy options loading only def _is_apiv3(auth_url, auth_version): - """Checks if V3 version of API is being used or not. + """Check if V3 version of API is being used or not. This method inspects auth_url and auth_version, and checks whether V3 version of the API is being used or not. - + When no auth_version is specified and auth_url is not a versioned + endpoint, v2.0 is assumed. :param auth_url: a http or https url to be inspected (like 'http://127.0.0.1:9898/'). :param auth_version: a string containing the version (like 'v2', 'v3.0') + or None :returns: True if V3 of the API is being used. """ return auth_version == 'v3.0' or '/v3' in parse.urlparse(auth_url).path -def _get_ksclient(token=None): - auth_url = CONF.keystone_authtoken.auth_uri - if not auth_url: - raise exception.KeystoneFailure(_('Keystone API endpoint is missing')) - - auth_version = CONF.keystone_authtoken.auth_version - api_v3 = _is_apiv3(auth_url, auth_version) - - if api_v3: - from keystoneclient.v3 import client - else: - from keystoneclient.v2_0 import client - - auth_url = get_keystone_url(auth_url, auth_version) - try: - if token: - return client.Client(token=token, auth_url=auth_url) - else: - params = {'username': CONF.keystone_authtoken.admin_user, - 'password': CONF.keystone_authtoken.admin_password, - 'tenant_name': CONF.keystone_authtoken.admin_tenant_name, - 'region_name': CONF.keystone.region_name, - 'auth_url': auth_url} - return _get_ksclient_from_conf(client, **params) - except ksexception.Unauthorized: - raise exception.KeystoneUnauthorized() - except ksexception.AuthorizationFailure as err: - raise exception.KeystoneFailure(_('Could not authorize in Keystone:' - ' %s') % err) +def ks_exceptions(f): + """Wraps keystoneclient functions and centralizes exception handling.""" + @six.wraps(f) + def wrapper(*args, **kwargs): + try: + return f(*args, **kwargs) + except kaexception.EndpointNotFound: + service_type = kwargs.get('service_type', 'baremetal') + endpoint_type = kwargs.get('endpoint_type', 'internal') + raise exception.CatalogNotFound( + service_type=service_type, endpoint_type=endpoint_type) + except (kaexception.Unauthorized, kaexception.AuthorizationFailure): + raise exception.KeystoneUnauthorized() + except (kaexception.NoMatchingPlugin, + kaexception.MissingRequiredOptions) as e: + raise exception.ConfigInvalid(six.text_type(e)) + except Exception as e: + LOG.exception(_LE('Keystone request failed: %(msg)s'), + {'msg': six.text_type(e)}) + raise exception.KeystoneFailure(six.text_type(e)) + return wrapper -@lockutils.synchronized('keystone_client', 'ironic-') -def _get_ksclient_from_conf(client, **params): - global _KS_CLIENT - # NOTE(yuriyz): use Keystone client default gap, to determine whether the - # given token is about to expire - if _KS_CLIENT is None or _KS_CLIENT.auth_ref.will_expire_soon(): - _KS_CLIENT = client.Client(**params) - return _KS_CLIENT +@ks_exceptions +def get_session(group): + auth = ironic_auth.load_auth(CONF, group) or _get_legacy_auth() + if not auth: + msg = _("Failed to load auth from either [%(new)s] or [%(old)s] " + "config sections.") + raise exception.ConfigInvalid(message=msg, new=group, + old=ironic_auth.LEGACY_SECTION) + session = kaloading.load_session_from_conf_options( + CONF, group, auth=auth) + return session -def get_keystone_url(auth_url, auth_version): - """Gives an http/https url to contact keystone. +# FIXME(pas-ha) remove legacy path after deprecation +def _get_legacy_auth(): + """Load auth from keystone_authtoken config section - Given an auth_url and auth_version, this method generates the url in - which keystone can be reached. - - :param auth_url: a http or https url to be inspected (like - 'http://127.0.0.1:9898/'). - :param auth_version: a string containing the version (like v2, v3.0, etc) - :returns: a string containing the keystone url + Used only to provide backward compatibility with old configs. """ - api_v3 = _is_apiv3(auth_url, auth_version) - api_version = 'v3' if api_v3 else 'v2.0' - # NOTE(lucasagomes): Get rid of the trailing '/' otherwise urljoin() - # fails to override the version in the URL - return parse.urljoin(auth_url.rstrip('/'), api_version) + conf = getattr(CONF, ironic_auth.LEGACY_SECTION) + legacy_loader = kaloading.get_plugin_loader('password') + auth_params = { + 'auth_url': conf.auth_uri, + 'username': conf.admin_user, + 'password': conf.admin_password, + 'tenant_name': conf.admin_tenant_name + } + api_v3 = _is_apiv3(conf.auth_uri, conf.auth_version) + if api_v3: + # NOTE(pas-ha): mimic defaults of keystoneclient + auth_params.update({ + 'project_domain_id': 'default', + 'user_domain_id': 'default', + }) + return legacy_loader.load_from_options(**auth_params) -def get_service_url(service_type='baremetal', endpoint_type='internal'): +@ks_exceptions +def get_service_url(session, service_type='baremetal', + endpoint_type='internal'): """Wrapper for get service url from keystone service catalog. - Given a service_type and an endpoint_type, this method queries keystone - service catalog and provides the url for the desired endpoint. + Given a service_type and an endpoint_type, this method queries + keystone service catalog and provides the url for the desired + endpoint. :param service_type: the keystone service for which url is required. :param endpoint_type: the type of endpoint for the service. :returns: an http/https url for the desired endpoint. """ - ksclient = _get_ksclient() - - if not ksclient.has_service_catalog(): - raise exception.KeystoneFailure(_('No Keystone service catalog ' - 'loaded')) - - try: - endpoint = ksclient.service_catalog.url_for( - service_type=service_type, - endpoint_type=endpoint_type, - region_name=CONF.keystone.region_name) - - except ksexception.EndpointNotFound: - raise exception.CatalogNotFound(service_type=service_type, - endpoint_type=endpoint_type) - - return endpoint + return session.get_endpoint(service_type=service_type, + interface_type=endpoint_type, + region=CONF.keystone.region_name) -def get_admin_auth_token(): - """Get an admin auth_token from the Keystone.""" - ksclient = _get_ksclient() - return ksclient.auth_token +@ks_exceptions +def get_admin_auth_token(session): + """Get admin token. - -def token_expires_soon(token, duration=None): - """Determines if token expiration is about to occur. - - :param duration: time interval in seconds - :returns: boolean : true if expiration is within the given duration + Currently used for inspector, glance and swift clients. + Only swift client does not actually support using sessions directly, + LP #1518938, others will be updated in ironic code. """ - ksclient = _get_ksclient(token=token) - return ksclient.auth_ref.will_expire_soon(stale_duration=duration) + return session.get_token() diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index b79e96d3f..8d2ac6a9b 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -24,29 +24,49 @@ from ironic.conf import CONF LOG = log.getLogger(__name__) +DEFAULT_NEUTRON_URL = 'http://%s:9696' % CONF.my_ip + +_NEUTRON_SESSION = None + + +def _get_neutron_session(): + global _NEUTRON_SESSION + if not _NEUTRON_SESSION: + _NEUTRON_SESSION = keystone.get_session('neutron') + return _NEUTRON_SESSION + def get_client(token=None): - params = { - 'timeout': CONF.neutron.url_timeout, - 'retries': CONF.neutron.retries, - 'insecure': CONF.keystone_authtoken.insecure, - 'ca_cert': CONF.keystone_authtoken.certfile, - } - + params = {'retries': CONF.neutron.retries} + url = CONF.neutron.url if CONF.neutron.auth_strategy == 'noauth': - params['endpoint_url'] = CONF.neutron.url + params['endpoint_url'] = url or DEFAULT_NEUTRON_URL params['auth_strategy'] = 'noauth' + params.update({ + 'timeout': CONF.neutron.url_timeout or CONF.neutron.timeout, + 'insecure': CONF.neutron.insecure, + 'ca_cert': CONF.neutron.cafile}) else: - params['endpoint_url'] = ( - CONF.neutron.url or - keystone.get_service_url(service_type='network')) - params['username'] = CONF.keystone_authtoken.admin_user - params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name - params['password'] = CONF.keystone_authtoken.admin_password - params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '') - if CONF.keystone.region_name: - params['region_name'] = CONF.keystone.region_name - params['token'] = token + session = _get_neutron_session() + if token is None: + params['session'] = session + # NOTE(pas-ha) endpoint_override==None will auto-discover + # endpoint from Keystone catalog. + # Region is needed only in this case. + # SSL related options are ignored as they are already embedded + # in keystoneauth Session object + if url: + params['endpoint_override'] = url + else: + params['region_name'] = CONF.keystone.region_name + else: + params['token'] = token + params['endpoint_url'] = url or keystone.get_service_url( + session, service_type='network') + params.update({ + 'timeout': CONF.neutron.url_timeout or CONF.neutron.timeout, + 'insecure': CONF.neutron.insecure, + 'ca_cert': CONF.neutron.cafile}) return clientv20.Client(**params) diff --git a/ironic/common/service.py b/ironic/common/service.py index e2d4d3c45..a64a2a3d0 100644 --- a/ironic/common/service.py +++ b/ironic/common/service.py @@ -108,7 +108,6 @@ def prepare_service(argv=None): 'qpid.messaging=INFO', 'oslo_messaging=INFO', 'sqlalchemy=WARNING', - 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=INFO', 'iso8601=WARNING', diff --git a/ironic/common/swift.py b/ironic/common/swift.py index 5362571a9..c16cb3c77 100644 --- a/ironic/common/swift.py +++ b/ironic/common/swift.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import six from six.moves import http_client from six.moves.urllib import parse from swiftclient import client as swift_client @@ -25,60 +26,39 @@ from ironic.common.i18n import _ from ironic.common import keystone from ironic.conf import CONF -CONF.import_opt('admin_user', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('admin_password', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('auth_version', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('insecure', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('cafile', 'keystonemiddleware.auth_token', - group='keystone_authtoken') -CONF.import_opt('region_name', 'keystonemiddleware.auth_token', - group='keystone_authtoken') + +_SWIFT_SESSION = None + + +def _get_swift_session(): + global _SWIFT_SESSION + if not _SWIFT_SESSION: + _SWIFT_SESSION = keystone.get_session('swift') + return _SWIFT_SESSION class SwiftAPI(object): """API for communicating with Swift.""" - def __init__(self, - user=None, - tenant_name=None, - key=None, - auth_url=None, - auth_version=None, - region_name=None): - """Constructor for creating a SwiftAPI object. - - :param user: the name of the user for Swift account - :param tenant_name: the name of the tenant for Swift account - :param key: the 'password' or key to authenticate with - :param auth_url: the url for authentication - :param auth_version: the version of api to use for authentication - :param region_name: the region used for getting endpoints of swift - """ - user = user or CONF.keystone_authtoken.admin_user - tenant_name = tenant_name or CONF.keystone_authtoken.admin_tenant_name - key = key or CONF.keystone_authtoken.admin_password - auth_url = auth_url or CONF.keystone_authtoken.auth_uri - auth_version = auth_version or CONF.keystone_authtoken.auth_version - auth_url = keystone.get_keystone_url(auth_url, auth_version) - params = {'retries': CONF.swift.swift_max_retries, - 'insecure': CONF.keystone_authtoken.insecure, - 'cacert': CONF.keystone_authtoken.cafile, - 'user': user, - 'tenant_name': tenant_name, - 'key': key, - 'authurl': auth_url, - 'auth_version': auth_version} - region_name = region_name or CONF.keystone_authtoken.region_name - if region_name: - params['os_options'] = {'region_name': region_name} + def __init__(self): + # TODO(pas-ha): swiftclient does not support keystone sessions ATM. + # Must be reworked when LP bug #1518938 is fixed. + session = _get_swift_session() + params = { + 'retries': CONF.swift.swift_max_retries, + 'preauthurl': keystone.get_service_url( + session, + service_type='object-store'), + 'preauthtoken': keystone.get_admin_auth_token(session) + } + # NOTE(pas-ha):session.verify is for HTTPS urls and can be + # - False (do not verify) + # - True (verify but try to locate system CA certificates) + # - Path (verify using specific CA certificate) + verify = session.verify + params['insecure'] = not verify + if verify and isinstance(verify, six.string_types): + params['cacert'] = verify self.connection = swift_client.Connection(**params) @@ -131,8 +111,7 @@ class SwiftAPI(object): raise exception.SwiftOperationError(operation=operation, error=e) - storage_url, token = self.connection.get_auth() - parse_result = parse.urlparse(storage_url) + parse_result = parse.urlparse(self.connection.url) swift_object_path = '/'.join((parse_result.path, container, object)) temp_url_key = account_info['x-account-meta-temp-url-key'] url_path = swift_utils.generate_temp_url(swift_object_path, timeout, diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py index 1ec5ee33c..048343c64 100644 --- a/ironic/conf/__init__.py +++ b/ironic/conf/__init__.py @@ -38,6 +38,7 @@ from ironic.conf import metrics_statsd from ironic.conf import neutron from ironic.conf import oneview from ironic.conf import seamicro +from ironic.conf import service_catalog from ironic.conf import snmp from ironic.conf import ssh from ironic.conf import swift @@ -68,6 +69,7 @@ metrics_statsd.register_opts(CONF) neutron.register_opts(CONF) oneview.register_opts(CONF) seamicro.register_opts(CONF) +service_catalog.register_opts(CONF) snmp.register_opts(CONF) ssh.register_opts(CONF) swift.register_opts(CONF) diff --git a/ironic/conf/auth.py b/ironic/conf/auth.py new file mode 100644 index 000000000..26dcdac1e --- /dev/null +++ b/ironic/conf/auth.py @@ -0,0 +1,79 @@ +# Copyright 2016 Mirantis Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from keystoneauth1 import exceptions as kaexception +from keystoneauth1 import loading as kaloading +from oslo_config import cfg + + +LEGACY_SECTION = 'keystone_authtoken' +OLD_SESSION_OPTS = { + 'certfile': [cfg.DeprecatedOpt('certfile', LEGACY_SECTION)], + 'keyfile': [cfg.DeprecatedOpt('keyfile', LEGACY_SECTION)], + 'cafile': [cfg.DeprecatedOpt('cafile', LEGACY_SECTION)], + 'insecure': [cfg.DeprecatedOpt('insecure', LEGACY_SECTION)], + 'timeout': [cfg.DeprecatedOpt('timeout', LEGACY_SECTION)], +} + +# FIXME(pas-ha) remove import of auth_token section after deprecation period +cfg.CONF.import_group(LEGACY_SECTION, 'keystonemiddleware.auth_token') + + +def load_auth(conf, group): + try: + auth = kaloading.load_auth_from_conf_options(conf, group) + except kaexception.MissingRequiredOptions: + auth = None + return auth + + +def register_auth_opts(conf, group): + """Register session- and auth-related options + + Registers only basic auth options shared by all auth plugins. + The rest are registered at runtime depending on auth plugin used. + """ + kaloading.register_session_conf_options( + conf, group, deprecated_opts=OLD_SESSION_OPTS) + kaloading.register_auth_conf_options(conf, group) + + +def add_auth_opts(options): + """Add auth options to sample config + + As these are dynamically registered at runtime, + this adds options for most used auth_plugins + when generating sample config. + """ + def add_options(opts, opts_to_add): + for new_opt in opts_to_add: + for opt in opts: + if opt.name == new_opt.name: + break + else: + opts.append(new_opt) + + opts = copy.deepcopy(options) + opts.insert(0, kaloading.get_auth_common_conf_options()[0]) + # NOTE(dims): There are a lot of auth plugins, we just generate + # the config options for a few common ones + plugins = ['password', 'v2password', 'v3password'] + for name in plugins: + plugin = kaloading.get_plugin_loader(name) + add_options(opts, kaloading.get_auth_plugin_conf_options(plugin)) + add_options(opts, kaloading.get_session_conf_options()) + opts.sort(key=lambda x: x.name) + return opts diff --git a/ironic/conf/glance.py b/ironic/conf/glance.py index a6312de4a..9c46a8181 100644 --- a/ironic/conf/glance.py +++ b/ironic/conf/glance.py @@ -18,6 +18,7 @@ from oslo_config import cfg from ironic.common.i18n import _ +from ironic.conf import auth opts = [ cfg.ListOpt('allowed_direct_url_schemes', @@ -145,3 +146,8 @@ opts = [ def register_opts(conf): conf.register_opts(opts, group='glance') + auth.register_auth_opts(conf, 'glance') + + +def list_opts(): + return auth.add_auth_opts(opts) diff --git a/ironic/conf/inspector.py b/ironic/conf/inspector.py index 05eeb7533..50613e9f0 100644 --- a/ironic/conf/inspector.py +++ b/ironic/conf/inspector.py @@ -15,6 +15,7 @@ from oslo_config import cfg from ironic.common.i18n import _ +from ironic.conf import auth opts = [ cfg.BoolOpt('enabled', default=False, @@ -31,3 +32,8 @@ opts = [ def register_opts(conf): conf.register_opts(opts, group='inspector') + auth.register_auth_opts(conf, 'inspector') + + +def list_opts(): + return auth.add_auth_opts(opts) diff --git a/ironic/conf/neutron.py b/ironic/conf/neutron.py index 03869d594..4e02f4725 100644 --- a/ironic/conf/neutron.py +++ b/ironic/conf/neutron.py @@ -17,11 +17,15 @@ from oslo_config import cfg from ironic.common.i18n import _ +from ironic.conf import auth opts = [ cfg.StrOpt('url', - default='http://$my_ip:9696', - help=_('URL for connecting to neutron.')), + help=_("URL for connecting to neutron. " + "Default value translates to 'http://$my_ip:9696' " + "when auth_strategy is 'noauth', " + "and to discovery from Keystone catalog " + "when auth_strategy is 'keystone'.")), cfg.IntOpt('url_timeout', default=30, help=_('Timeout value for connecting to neutron in seconds.')), @@ -55,3 +59,8 @@ opts = [ def register_opts(conf): conf.register_opts(opts, group='neutron') + auth.register_auth_opts(conf, 'neutron') + + +def list_opts(): + return auth.add_auth_opts(opts) diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py index 18d608791..6e7a258dd 100644 --- a/ironic/conf/opts.py +++ b/ironic/conf/opts.py @@ -45,25 +45,26 @@ _opts = [ ('database', ironic.conf.database.opts), ('deploy', ironic.conf.deploy.opts), ('dhcp', ironic.conf.dhcp.opts), - ('glance', ironic.conf.glance.opts), + ('glance', ironic.conf.glance.list_opts()), ('iboot', ironic.conf.iboot.opts), ('ilo', ironic.conf.ilo.opts), - ('inspector', ironic.conf.inspector.opts), + ('inspector', ironic.conf.inspector.list_opts()), ('ipmi', ironic.conf.ipmi.opts), ('irmc', ironic.conf.irmc.opts), ('iscsi', ironic.drivers.modules.iscsi_deploy.iscsi_opts), ('keystone', ironic.conf.keystone.opts), - ('neutron', ironic.conf.neutron.opts), ('metrics', ironic.conf.metrics.opts), ('metrics_statsd', ironic.conf.metrics_statsd.opts), + ('neutron', ironic.conf.neutron.list_opts()), ('oneview', ironic.conf.oneview.opts), ('pxe', itertools.chain( ironic.drivers.modules.iscsi_deploy.pxe_opts, ironic.drivers.modules.pxe.pxe_opts)), ('seamicro', ironic.conf.seamicro.opts), + ('service_catalog', ironic.conf.service_catalog.list_opts()), ('snmp', ironic.conf.snmp.opts), ('ssh', ironic.conf.ssh.opts), - ('swift', ironic.conf.swift.opts), + ('swift', ironic.conf.swift.list_opts()), ('virtualbox', ironic.conf.virtualbox.opts), ] diff --git a/ironic/conf/service_catalog.py b/ironic/conf/service_catalog.py new file mode 100644 index 000000000..610d20e1d --- /dev/null +++ b/ironic/conf/service_catalog.py @@ -0,0 +1,33 @@ +# Copyright 2016 Mirantis Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ +from ironic.conf import auth + +SERVCIE_CATALOG_GROUP = cfg.OptGroup( + 'service_catalog', + title='Access info for Ironic service user', + help=_('Holds credentials and session options to access ' + 'Keystone catalog for Ironic API endpoint resolution.')) + + +def register_opts(conf): + auth.register_auth_opts(conf, SERVCIE_CATALOG_GROUP.name) + + +def list_opts(): + return auth.add_auth_opts([]) diff --git a/ironic/conf/swift.py b/ironic/conf/swift.py index 66ba9daf3..66a0b1f5c 100644 --- a/ironic/conf/swift.py +++ b/ironic/conf/swift.py @@ -17,6 +17,7 @@ from oslo_config import cfg from ironic.common.i18n import _ +from ironic.conf import auth opts = [ cfg.IntOpt('swift_max_retries', @@ -28,3 +29,8 @@ opts = [ def register_opts(conf): conf.register_opts(opts, group='swift') + auth.register_auth_opts(conf, 'swift') + + +def list_opts(): + return auth.add_auth_opts(opts) diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 2d7a65c41..61da5b12a 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -86,6 +86,38 @@ warn_about_unsafe_shred_parameters() # All functions are called from deploy() directly or indirectly. # They are split for stub-out. +_IRONIC_SESSION = None + + +def _get_ironic_session(): + global _IRONIC_SESSION + if not _IRONIC_SESSION: + _IRONIC_SESSION = keystone.get_session('service_catalog') + return _IRONIC_SESSION + + +def get_ironic_api_url(): + """Resolve Ironic API endpoint + + either from config of from Keystone catalog. + """ + ironic_api = CONF.conductor.api_url + if not ironic_api: + try: + ironic_session = _get_ironic_session() + ironic_api = keystone.get_service_url(ironic_session) + except (exception.KeystoneFailure, + exception.CatalogNotFound, + exception.KeystoneUnauthorized) as e: + raise exception.InvalidParameterValue(_( + "Couldn't get the URL of the Ironic API service from the " + "configuration file or keystone catalog. Keystone error: " + "%s") % six.text_type(e)) + # NOTE: we should strip '/' from the end because it might be used in + # hardcoded ramdisk script + ironic_api = ironic_api.rstrip('/') + return ironic_api + def discovery(portal_address, portal_port): """Do iSCSI discovery on portal.""" @@ -998,10 +1030,8 @@ def build_agent_options(node): :returns: a dictionary containing the parameters to be passed to agent ramdisk. """ - ironic_api = (CONF.conductor.api_url or - keystone.get_service_url()).rstrip('/') agent_config_opts = { - 'ipa-api-url': ironic_api, + 'ipa-api-url': get_ironic_api_url(), 'ipa-driver-name': node.driver, # NOTE: The below entry is a temporary workaround for bug/1433812 'coreos.configdrive': 0, diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py index 907ad2196..59c5a3157 100644 --- a/ironic/drivers/modules/inspector.py +++ b/ironic/drivers/modules/inspector.py @@ -40,6 +40,15 @@ client = importutils.try_import('ironic_inspector_client') INSPECTOR_API_VERSION = (1, 0) +_INSPECTOR_SESSION = None + + +def _get_inspector_session(): + global _INSPECTOR_SESSION + if not _INSPECTOR_SESSION: + _INSPECTOR_SESSION = keystone.get_session('inspector') + return _INSPECTOR_SESSION + class Inspector(base.InspectInterface): """In-band inspection via ironic-inspector project.""" @@ -165,7 +174,8 @@ def _check_status(task): # NOTE(dtantsur): periodic tasks do not have proper tokens in context if CONF.auth_strategy == 'keystone': - task.context.auth_token = keystone.get_admin_auth_token() + session = _get_inspector_session() + task.context.auth_token = keystone.get_admin_auth_token(session) try: status = _call_inspector(client.get_status, node.uuid, task.context) diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 3369b2586..e43afad81 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -25,7 +25,6 @@ from six.moves.urllib import parse from ironic.common import dhcp_factory from ironic.common import exception from ironic.common.i18n import _ -from ironic.common import keystone from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager @@ -388,16 +387,8 @@ def validate(task): catalog. :raises: MissingParameterValue if no ports are enrolled for the given node. """ - try: - # TODO(lucasagomes): Validate the format of the URL - CONF.conductor.api_url or keystone.get_service_url() - except (exception.KeystoneFailure, - exception.CatalogNotFound, - exception.KeystoneUnauthorized) as e: - raise exception.InvalidParameterValue(_( - "Couldn't get the URL of the Ironic API service from the " - "configuration file or keystone catalog. Keystone error: %s") % e) - + # TODO(lucasagomes): Validate the format of the URL + deploy_utils.get_ironic_api_url() # Validate the root device hints deploy_utils.parse_root_device_hints(task.node) deploy_utils.parse_instance_info(task.node) diff --git a/ironic/tests/unit/common/test_image_service.py b/ironic/tests/unit/common/test_image_service.py index 079f07afd..57ac3be83 100644 --- a/ironic/tests/unit/common/test_image_service.py +++ b/ironic/tests/unit/common/test_image_service.py @@ -25,7 +25,6 @@ from six.moves import http_client from ironic.common import exception from ironic.common.glance_service.v1 import image_service as glance_v1_service from ironic.common import image_service -from ironic.common import keystone from ironic.tests import base if six.PY3: @@ -254,56 +253,59 @@ class FileImageServiceTestCase(base.TestCase): class ServiceGetterTestCase(base.TestCase): - @mock.patch.object(keystone, 'get_admin_auth_token', autospec=True) + @mock.patch.object(image_service, '_get_glance_session') @mock.patch.object(glance_v1_service.GlanceImageService, '__init__', return_value=None, autospec=True) - def test_get_glance_image_service(self, glance_service_mock, token_mock): + def test_get_glance_image_service(self, glance_service_mock, + session_mock): image_href = 'image-uuid' self.context.auth_token = 'fake' image_service.get_image_service(image_href, context=self.context) glance_service_mock.assert_called_once_with(mock.ANY, None, 1, self.context) - self.assertFalse(token_mock.called) + self.assertFalse(session_mock.called) - @mock.patch.object(keystone, 'get_admin_auth_token', autospec=True) + @mock.patch.object(image_service, '_get_glance_session') @mock.patch.object(glance_v1_service.GlanceImageService, '__init__', return_value=None, autospec=True) def test_get_glance_image_service_url(self, glance_service_mock, - token_mock): + session_mock): image_href = 'glance://image-uuid' self.context.auth_token = 'fake' image_service.get_image_service(image_href, context=self.context) glance_service_mock.assert_called_once_with(mock.ANY, None, 1, self.context) - self.assertFalse(token_mock.called) + self.assertFalse(session_mock.called) - @mock.patch.object(keystone, 'get_admin_auth_token', autospec=True) + @mock.patch.object(image_service, '_get_glance_session') @mock.patch.object(glance_v1_service.GlanceImageService, '__init__', return_value=None, autospec=True) def test_get_glance_image_service_no_token(self, glance_service_mock, - token_mock): + session_mock): image_href = 'image-uuid' self.context.auth_token = None - token_mock.return_value = 'admin-token' + sess = mock.Mock() + sess.get_token.return_value = 'admin-token' + session_mock.return_value = sess image_service.get_image_service(image_href, context=self.context) glance_service_mock.assert_called_once_with(mock.ANY, None, 1, self.context) - token_mock.assert_called_once_with() + sess.get_token.assert_called_once_with() self.assertEqual('admin-token', self.context.auth_token) - @mock.patch.object(keystone, 'get_admin_auth_token', autospec=True) + @mock.patch.object(image_service, '_get_glance_session') @mock.patch.object(glance_v1_service.GlanceImageService, '__init__', return_value=None, autospec=True) def test_get_glance_image_service_token_not_needed(self, glance_service_mock, - token_mock): + session_mock): image_href = 'image-uuid' self.context.auth_token = None self.config(auth_strategy='noauth', group='glance') image_service.get_image_service(image_href, context=self.context) glance_service_mock.assert_called_once_with(mock.ANY, None, 1, self.context) - self.assertFalse(token_mock.called) + self.assertFalse(session_mock.called) self.assertIsNone(self.context.auth_token) @mock.patch.object(image_service.HttpImageService, '__init__', diff --git a/ironic/tests/unit/common/test_keystone.py b/ironic/tests/unit/common/test_keystone.py index f3e3b4cbb..be5f5a81b 100644 --- a/ironic/tests/unit/common/test_keystone.py +++ b/ironic/tests/unit/common/test_keystone.py @@ -12,174 +12,138 @@ # License for the specific language governing permissions and limitations # under the License. -from keystoneclient import exceptions as ksexception +from keystoneauth1 import exceptions as ksexception +from keystoneauth1 import loading as kaloading import mock +from oslo_config import cfg +from oslo_config import fixture from ironic.common import exception from ironic.common import keystone +from ironic.conf import auth as ironic_auth from ironic.tests import base -class FakeCatalog(object): - def url_for(self, **kwargs): - return 'fake-url' - - -class FakeAccessInfo(object): - def will_expire_soon(self): - pass - - -class FakeClient(object): - def __init__(self, **kwargs): - self.service_catalog = FakeCatalog() - self.auth_ref = FakeAccessInfo() - - def has_service_catalog(self): - return True - - class KeystoneTestCase(base.TestCase): def setUp(self): super(KeystoneTestCase, self).setUp() - self.config(group='keystone_authtoken', - auth_uri='http://127.0.0.1:9898/', - admin_user='fake', admin_password='fake', - admin_tenant_name='fake') - self.config(group='keystone', region_name='fake') - keystone._KS_CLIENT = None + self.config(region_name='fake_region', + group='keystone') + self.test_group = 'test_group' + self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group)) + ironic_auth.register_auth_opts(self.cfg_fixture.conf, self.test_group) + self.config(auth_type='password', + group=self.test_group) + # NOTE(pas-ha) this is due to auth_plugin options + # being dynamically registered on first load, + # but we need to set the config before + plugin = kaloading.get_plugin_loader('password') + opts = kaloading.get_auth_plugin_conf_options(plugin) + self.cfg_fixture.register_opts(opts, group=self.test_group) + self.config(auth_url='http://127.0.0.1:9898', + username='fake_user', + password='fake_pass', + project_name='fake_tenant', + group=self.test_group) - def test_failure_authorization(self): - self.assertRaises(exception.KeystoneFailure, keystone.get_service_url) + def _set_config(self): + self.cfg_fixture = self.useFixture(fixture.Config()) + self.addCleanup(cfg.CONF.reset) - @mock.patch.object(FakeCatalog, 'url_for', autospec=True) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_get_url(self, mock_ks, mock_uf): + def test_get_url(self): fake_url = 'http://127.0.0.1:6385' - mock_uf.return_value = fake_url - mock_ks.return_value = FakeClient() - res = keystone.get_service_url() + mock_sess = mock.Mock() + mock_sess.get_endpoint.return_value = fake_url + res = keystone.get_service_url(mock_sess) self.assertEqual(fake_url, res) - @mock.patch.object(FakeCatalog, 'url_for', autospec=True) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_url_not_found(self, mock_ks, mock_uf): - mock_uf.side_effect = ksexception.EndpointNotFound - mock_ks.return_value = FakeClient() - self.assertRaises(exception.CatalogNotFound, keystone.get_service_url) + def test_get_url_failure(self): + exc_map = ( + (ksexception.Unauthorized, exception.KeystoneUnauthorized), + (ksexception.EndpointNotFound, exception.CatalogNotFound), + (ksexception.EmptyCatalog, exception.CatalogNotFound), + (ksexception.Unauthorized, exception.KeystoneUnauthorized), + ) + for kexc, irexc in exc_map: + mock_sess = mock.Mock() + mock_sess.get_endpoint.side_effect = kexc + self.assertRaises(irexc, keystone.get_service_url, mock_sess) - @mock.patch.object(FakeClient, 'has_service_catalog', autospec=True) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_no_catalog(self, mock_ks, mock_hsc): - mock_hsc.return_value = False - mock_ks.return_value = FakeClient() - self.assertRaises(exception.KeystoneFailure, keystone.get_service_url) + def test_get_admin_auth_token(self): + mock_sess = mock.Mock() + mock_sess.get_token.return_value = 'fake_token' + self.assertEqual('fake_token', + keystone.get_admin_auth_token(mock_sess)) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_unauthorized(self, mock_ks): - mock_ks.side_effect = ksexception.Unauthorized + def test_get_admin_auth_token_failure(self): + mock_sess = mock.Mock() + mock_sess.get_token.side_effect = ksexception.Unauthorized self.assertRaises(exception.KeystoneUnauthorized, - keystone.get_service_url) + keystone.get_admin_auth_token, mock_sess) - def test_get_service_url_fail_missing_auth_uri(self): - self.config(group='keystone_authtoken', auth_uri=None) - self.assertRaises(exception.KeystoneFailure, - keystone.get_service_url) + @mock.patch.object(ironic_auth, 'load_auth') + def test_get_session(self, auth_get_mock): + auth_mock = mock.Mock() + auth_get_mock.return_value = auth_mock + session = keystone.get_session(self.test_group) + self.assertEqual(auth_mock, session.auth) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_get_service_url_versionless_v2(self, mock_ks): - mock_ks.return_value = FakeClient() - self.config(group='keystone_authtoken', auth_uri='http://127.0.0.1') - expected_url = 'http://127.0.0.1/v2.0' - keystone.get_service_url() - mock_ks.assert_called_once_with(username='fake', password='fake', - tenant_name='fake', - region_name='fake', - auth_url=expected_url) + @mock.patch.object(keystone, '_get_legacy_auth', return_value=None) + @mock.patch.object(ironic_auth, 'load_auth', return_value=None) + def test_get_session_fail(self, auth_get_mock, legacy_get_mock): + self.assertRaisesRegexp( + exception.KeystoneFailure, + "Failed to load auth from either", + keystone.get_session, self.test_group) - @mock.patch('keystoneclient.v3.client.Client', autospec=True) - def test_get_service_url_versionless_v3(self, mock_ks): - mock_ks.return_value = FakeClient() - self.config(group='keystone_authtoken', auth_version='v3.0', - auth_uri='http://127.0.0.1') - expected_url = 'http://127.0.0.1/v3' - keystone.get_service_url() - mock_ks.assert_called_once_with(username='fake', password='fake', - tenant_name='fake', - region_name='fake', - auth_url=expected_url) + @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') + @mock.patch('ironic.common.keystone._get_legacy_auth') + def test_get_session_failed_new_auth(self, legacy_get_mock, load_mock): + legacy_mock = mock.Mock() + legacy_get_mock.return_value = legacy_mock + load_mock.side_effect = [None, ksexception.MissingRequiredOptions] + self.assertEqual(legacy_mock, + keystone.get_session(self.test_group).auth) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_get_service_url_version_override(self, mock_ks): - mock_ks.return_value = FakeClient() - self.config(group='keystone_authtoken', - auth_uri='http://127.0.0.1/v2.0/') - expected_url = 'http://127.0.0.1/v2.0' - keystone.get_service_url() - mock_ks.assert_called_once_with(username='fake', password='fake', - tenant_name='fake', - region_name='fake', - auth_url=expected_url) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_get_admin_auth_token(self, mock_ks): - fake_client = FakeClient() - fake_client.auth_token = '123456' - mock_ks.return_value = fake_client - self.assertEqual('123456', keystone.get_admin_auth_token()) +@mock.patch('keystoneauth1.loading._plugins.identity.generic.Password.' + 'load_from_options') +class KeystoneLegacyTestCase(base.TestCase): + def setUp(self): + super(KeystoneLegacyTestCase, self).setUp() + self.test_group = 'test_group' + self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group)) + self.config(group=ironic_auth.LEGACY_SECTION, + auth_uri='http://127.0.0.1:9898', + admin_user='fake_user', + admin_password='fake_pass', + admin_tenant_name='fake_tenant') + ironic_auth.register_auth_opts(self.cfg_fixture.conf, self.test_group) + self.config(group=self.test_group, + auth_type=None) + self.expected = dict( + auth_url='http://127.0.0.1:9898', + username='fake_user', + password='fake_pass', + tenant_name='fake_tenant') - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_get_region_name_v2(self, mock_ks): - mock_ks.return_value = FakeClient() - self.config(group='keystone', region_name='fake_region') - expected_url = 'http://127.0.0.1:9898/v2.0' - expected_region = 'fake_region' - keystone.get_service_url() - mock_ks.assert_called_once_with(username='fake', password='fake', - tenant_name='fake', - region_name=expected_region, - auth_url=expected_url) + def _set_config(self): + self.cfg_fixture = self.useFixture(fixture.Config()) + self.addCleanup(cfg.CONF.reset) - @mock.patch('keystoneclient.v3.client.Client', autospec=True) - def test_get_region_name_v3(self, mock_ks): - mock_ks.return_value = FakeClient() - self.config(group='keystone', region_name='fake_region') - self.config(group='keystone_authtoken', auth_version='v3.0') - expected_url = 'http://127.0.0.1:9898/v3' - expected_region = 'fake_region' - keystone.get_service_url() - mock_ks.assert_called_once_with(username='fake', password='fake', - tenant_name='fake', - region_name=expected_region, - auth_url=expected_url) + @mock.patch.object(ironic_auth, 'load_auth', return_value=None) + def test_legacy_loading_v2(self, load_auth_mock, load_mock): + keystone.get_session(self.test_group) + load_mock.assert_called_once_with(**self.expected) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_cache_client_init(self, mock_ks): - fake_client = FakeClient() - mock_ks.return_value = fake_client - self.assertEqual(fake_client, keystone._get_ksclient()) - self.assertEqual(fake_client, keystone._KS_CLIENT) - self.assertEqual(1, mock_ks.call_count) - - @mock.patch.object(FakeAccessInfo, 'will_expire_soon', autospec=True) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_cache_client_cached(self, mock_ks, mock_expire): - mock_expire.return_value = False - fake_client = FakeClient() - keystone._KS_CLIENT = fake_client - self.assertEqual(fake_client, keystone._get_ksclient()) - self.assertEqual(fake_client, keystone._KS_CLIENT) - self.assertFalse(mock_ks.called) - - @mock.patch.object(FakeAccessInfo, 'will_expire_soon', autospec=True) - @mock.patch('keystoneclient.v2_0.client.Client', autospec=True) - def test_cache_client_expired(self, mock_ks, mock_expire): - mock_expire.return_value = True - fake_client = FakeClient() - keystone._KS_CLIENT = fake_client - new_client = FakeClient() - mock_ks.return_value = new_client - self.assertEqual(new_client, keystone._get_ksclient()) - self.assertEqual(new_client, keystone._KS_CLIENT) - self.assertEqual(1, mock_ks.call_count) + @mock.patch.object(ironic_auth, 'load_auth', return_value=None) + def test_legacy_loading_v3(self, load_auth_mock, load_mock): + self.config( + auth_version='v3.0', + group=ironic_auth.LEGACY_SECTION) + self.expected.update(dict( + project_domain_id='default', + user_domain_id='default')) + keystone.get_session(self.test_group) + load_mock.assert_called_once_with(**self.expected) diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py index 0076d741a..54c2f967f 100644 --- a/ironic/tests/unit/common/test_neutron.py +++ b/ironic/tests/unit/common/test_neutron.py @@ -19,86 +19,80 @@ from oslo_utils import uuidutils from ironic.common import exception from ironic.common import neutron from ironic.conductor import task_manager +# from ironic.conf import auth as ironic_auth from ironic.tests import base from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.objects import utils as object_utils +@mock.patch.object(neutron, '_get_neutron_session') +@mock.patch.object(client.Client, "__init__") class TestNeutronClient(base.TestCase): def setUp(self): super(TestNeutronClient, self).setUp() - self.config(url='test-url', - url_timeout=30, + self.config(url_timeout=30, retries=2, group='neutron') - self.config(insecure=False, - certfile='test-file', - admin_user='test-admin-user', + self.config(admin_user='test-admin-user', admin_tenant_name='test-admin-tenant', admin_password='test-admin-password', auth_uri='test-auth-uri', group='keystone_authtoken') + # TODO(pas-ha) register session options to test legacy path + self.config(insecure=False, + cafile='test-file', + group='neutron') - @mock.patch.object(client.Client, "__init__") - def test_get_neutron_client_with_token(self, mock_client_init): + def test_get_neutron_client_with_token(self, mock_client_init, + mock_session): token = 'test-token-123' + sess = mock.Mock() + sess.get_endpoint.return_value = 'fake-url' + mock_session.return_value = sess expected = {'timeout': 30, 'retries': 2, 'insecure': False, 'ca_cert': 'test-file', 'token': token, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri'} + 'endpoint_url': 'fake-url'} mock_client_init.return_value = None neutron.get_client(token=token) mock_client_init.assert_called_once_with(**expected) - @mock.patch.object(client.Client, "__init__") - def test_get_neutron_client_without_token(self, mock_client_init): - expected = {'timeout': 30, - 'retries': 2, - 'insecure': False, - 'ca_cert': 'test-file', - 'token': None, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri'} - + def test_get_neutron_client_without_token(self, mock_client_init, + mock_session): + self.config(url='test-url', + group='neutron') + sess = mock.Mock() + mock_session.return_value = sess + expected = {'retries': 2, + 'endpoint_override': 'test-url', + 'session': sess} mock_client_init.return_value = None neutron.get_client(token=None) mock_client_init.assert_called_once_with(**expected) - @mock.patch.object(client.Client, "__init__") - def test_get_neutron_client_with_region(self, mock_client_init): - expected = {'timeout': 30, - 'retries': 2, - 'insecure': False, - 'ca_cert': 'test-file', - 'token': None, - 'endpoint_url': 'test-url', - 'username': 'test-admin-user', - 'tenant_name': 'test-admin-tenant', - 'password': 'test-admin-password', - 'auth_url': 'test-auth-uri', - 'region_name': 'test-region'} - - self.config(region_name='test-region', + def test_get_neutron_client_with_region(self, mock_client_init, + mock_session): + self.config(region_name='fake_region', group='keystone') + sess = mock.Mock() + mock_session.return_value = sess + expected = {'retries': 2, + 'region_name': 'fake_region', + 'session': sess} + mock_client_init.return_value = None neutron.get_client(token=None) mock_client_init.assert_called_once_with(**expected) - @mock.patch.object(client.Client, "__init__") - def test_get_neutron_client_noauth(self, mock_client_init): - self.config(auth_strategy='noauth', group='neutron') + def test_get_neutron_client_noauth(self, mock_client_init, mock_session): + self.config(auth_strategy='noauth', + url='test-url', + group='neutron') expected = {'ca_cert': 'test-file', 'insecure': False, 'endpoint_url': 'test-url', @@ -110,7 +104,7 @@ class TestNeutronClient(base.TestCase): neutron.get_client(token=None) mock_client_init.assert_called_once_with(**expected) - def test_out_range_auth_strategy(self): + def test_out_range_auth_strategy(self, mock_client_init, mock_session): self.assertRaises(ValueError, cfg.CONF.set_override, 'auth_strategy', 'fake', 'neutron', enforce_type=True) @@ -133,9 +127,13 @@ class TestNeutronNetworkActions(db_base.DbTestCase): self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00', 'mac_address': '52:54:00:cf:2d:32'} self.network_uuid = uuidutils.generate_uuid() + self.client_mock = mock.Mock() + patcher = mock.patch('ironic.common.neutron.get_client', + return_value=self.client_mock) + patcher.start() + self.addCleanup(patcher.stop) - @mock.patch.object(client.Client, 'create_port') - def test_add_ports_to_vlan_network(self, create_mock): + def test_add_ports_to_vlan_network(self): # Ports will be created only if pxe_enabled is True object_utils.create_test_port( self.context, node_id=self.node.id, @@ -159,15 +157,16 @@ class TestNeutronNetworkActions(db_base.DbTestCase): } } # Ensure we can create ports - create_mock.return_value = {'port': self.neutron_port} + self.client_mock.create_port.return_value = { + 'port': self.neutron_port} expected = {port.uuid: self.neutron_port['id']} with task_manager.acquire(self.context, self.node.uuid) as task: ports = neutron.add_ports_to_network(task, self.network_uuid) self.assertEqual(expected, ports) - create_mock.assert_called_once_with(expected_body) + self.client_mock.create_port.assert_called_once_with( + expected_body) - @mock.patch.object(client.Client, 'create_port') - def test_add_ports_to_flat_network(self, create_mock): + def test_add_ports_to_flat_network(self): port = self.ports[0] expected_body = { 'port': { @@ -183,16 +182,17 @@ class TestNeutronNetworkActions(db_base.DbTestCase): } } # Ensure we can create ports - create_mock.return_value = {'port': self.neutron_port} + self.client_mock.create_port.return_value = { + 'port': self.neutron_port} expected = {port.uuid: self.neutron_port['id']} with task_manager.acquire(self.context, self.node.uuid) as task: ports = neutron.add_ports_to_network(task, self.network_uuid, is_flat=True) self.assertEqual(expected, ports) - create_mock.assert_called_once_with(expected_body) + self.client_mock.create_port.assert_called_once_with( + expected_body) - @mock.patch.object(client.Client, 'create_port') - def test_add_ports_to_flat_network_no_neutron_port_id(self, create_mock): + def test_add_ports_to_flat_network_no_neutron_port_id(self): port = self.ports[0] expected_body = { 'port': { @@ -208,15 +208,16 @@ class TestNeutronNetworkActions(db_base.DbTestCase): } } del self.neutron_port['id'] - create_mock.return_value = {'port': self.neutron_port} + self.client_mock.create_port.return_value = { + 'port': self.neutron_port} with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaises(exception.NetworkError, neutron.add_ports_to_network, task, self.network_uuid, is_flat=True) - create_mock.assert_called_once_with(expected_body) + self.client_mock.create_port.assert_called_once_with( + expected_body) - @mock.patch.object(client.Client, 'create_port') - def test_add_ports_to_vlan_network_instance_uuid(self, create_mock): + def test_add_ports_to_vlan_network_instance_uuid(self): self.node.instance_uuid = uuidutils.generate_uuid() self.node.save() port = self.ports[0] @@ -235,18 +236,18 @@ class TestNeutronNetworkActions(db_base.DbTestCase): } } # Ensure we can create ports - create_mock.return_value = {'port': self.neutron_port} + self.client_mock.create_port.return_value = {'port': self.neutron_port} expected = {port.uuid: self.neutron_port['id']} with task_manager.acquire(self.context, self.node.uuid) as task: ports = neutron.add_ports_to_network(task, self.network_uuid) self.assertEqual(expected, ports) - create_mock.assert_called_once_with(expected_body) + self.client_mock.create_port.assert_called_once_with(expected_body) @mock.patch.object(neutron, 'rollback_ports') - @mock.patch.object(client.Client, 'create_port') - def test_add_network_fail(self, create_mock, rollback_mock): + def test_add_network_fail(self, rollback_mock): # Check that if creating a port fails, the ports are cleaned up - create_mock.side_effect = neutron_client_exc.ConnectionFailed + self.client_mock.create_port.side_effect = \ + neutron_client_exc.ConnectionFailed with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaisesRegex( @@ -255,9 +256,8 @@ class TestNeutronNetworkActions(db_base.DbTestCase): rollback_mock.assert_called_once_with(task, self.network_uuid) @mock.patch.object(neutron, 'rollback_ports') - @mock.patch.object(client.Client, 'create_port', return_value={}) - def test_add_network_fail_create_any_port_empty(self, create_mock, - rollback_mock): + def test_add_network_fail_create_any_port_empty(self, rollback_mock): + self.client_mock.create_port.return_value = {} with task_manager.acquire(self.context, self.node.uuid) as task: self.assertRaisesRegex( exception.NetworkError, 'any PXE enabled port', @@ -266,16 +266,16 @@ class TestNeutronNetworkActions(db_base.DbTestCase): @mock.patch.object(neutron, 'LOG') @mock.patch.object(neutron, 'rollback_ports') - @mock.patch.object(client.Client, 'create_port') - def test_add_network_fail_create_some_ports_empty(self, create_mock, - rollback_mock, log_mock): + def test_add_network_fail_create_some_ports_empty(self, rollback_mock, + log_mock): port2 = object_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:55:cf:2d:32', extra={'vif_port_id': uuidutils.generate_uuid()} ) - create_mock.side_effect = [{'port': self.neutron_port}, {}] + self.client_mock.create_port.side_effect = [ + {'port': self.neutron_port}, {}] with task_manager.acquire(self.context, self.node.uuid) as task: neutron.add_ports_to_network(task, self.network_uuid) self.assertIn(str(port2.uuid), @@ -309,35 +309,39 @@ class TestNeutronNetworkActions(db_base.DbTestCase): 'mac_address': [self.ports[0].address]} ) - @mock.patch.object(client.Client, 'delete_port') - @mock.patch.object(client.Client, 'list_ports') - def test_remove_neutron_ports(self, list_mock, delete_mock): + def test_remove_neutron_ports(self): with task_manager.acquire(self.context, self.node.uuid) as task: - list_mock.return_value = {'ports': [self.neutron_port]} + self.client_mock.list_ports.return_value = { + 'ports': [self.neutron_port]} neutron.remove_neutron_ports(task, {'param': 'value'}) - list_mock.assert_called_once_with(**{'param': 'value'}) - delete_mock.assert_called_once_with(self.neutron_port['id']) + self.client_mock.list_ports.assert_called_once_with( + **{'param': 'value'}) + self.client_mock.delete_port.assert_called_once_with( + self.neutron_port['id']) - @mock.patch.object(client.Client, 'list_ports') - def test_remove_neutron_ports_list_fail(self, list_mock): + def test_remove_neutron_ports_list_fail(self): with task_manager.acquire(self.context, self.node.uuid) as task: - list_mock.side_effect = neutron_client_exc.ConnectionFailed + self.client_mock.list_ports.side_effect = \ + neutron_client_exc.ConnectionFailed self.assertRaisesRegex( exception.NetworkError, 'Could not get given network VIF', neutron.remove_neutron_ports, task, {'param': 'value'}) - list_mock.assert_called_once_with(**{'param': 'value'}) + self.client_mock.list_ports.assert_called_once_with( + **{'param': 'value'}) - @mock.patch.object(client.Client, 'delete_port') - @mock.patch.object(client.Client, 'list_ports') - def test_remove_neutron_ports_delete_fail(self, list_mock, delete_mock): + def test_remove_neutron_ports_delete_fail(self): with task_manager.acquire(self.context, self.node.uuid) as task: - delete_mock.side_effect = neutron_client_exc.ConnectionFailed - list_mock.return_value = {'ports': [self.neutron_port]} + self.client_mock.delete_port.side_effect = \ + neutron_client_exc.ConnectionFailed + self.client_mock.list_ports.return_value = { + 'ports': [self.neutron_port]} self.assertRaisesRegex( exception.NetworkError, 'Could not remove VIF', neutron.remove_neutron_ports, task, {'param': 'value'}) - list_mock.assert_called_once_with(**{'param': 'value'}) - delete_mock.assert_called_once_with(self.neutron_port['id']) + self.client_mock.list_ports.assert_called_once_with( + **{'param': 'value'}) + self.client_mock.delete_port.assert_called_once_with( + self.neutron_port['id']) def test_get_node_portmap(self): with task_manager.acquire(self.context, self.node.uuid) as task: diff --git a/ironic/tests/unit/common/test_swift.py b/ironic/tests/unit/common/test_swift.py index e5e91fec7..e5bc306fd 100644 --- a/ironic/tests/unit/common/test_swift.py +++ b/ironic/tests/unit/common/test_swift.py @@ -30,6 +30,7 @@ if six.PY3: file = io.BytesIO +@mock.patch.object(swift, '_get_swift_session') @mock.patch.object(swift_client, 'Connection', autospec=True) class SwiftTestCase(base.TestCase): @@ -37,42 +38,22 @@ class SwiftTestCase(base.TestCase): super(SwiftTestCase, self).setUp() self.swift_exception = swift_exception.ClientException('', '') - self.config(admin_user='admin', group='keystone_authtoken') - self.config(admin_tenant_name='tenant', group='keystone_authtoken') - self.config(admin_password='password', group='keystone_authtoken') - self.config(auth_uri='http://authurl', group='keystone_authtoken') - self.config(auth_version='2', group='keystone_authtoken') - self.config(swift_max_retries=2, group='swift') - self.config(insecure=0, group='keystone_authtoken') - self.config(cafile='/path/to/ca/file', group='keystone_authtoken') - self.expected_params = {'retries': 2, - 'insecure': 0, - 'user': 'admin', - 'tenant_name': 'tenant', - 'key': 'password', - 'authurl': 'http://authurl/v2.0', - 'cacert': '/path/to/ca/file', - 'auth_version': '2'} - - def test___init__(self, connection_mock): + def test___init__(self, connection_mock, keystone_mock): + sess = mock.Mock() + sess.get_endpoint.return_value = 'http://swift:8080' + sess.get_token.return_value = 'fake_token' + sess.verify = '/path/to/ca/file' + keystone_mock.return_value = sess swift.SwiftAPI() - connection_mock.assert_called_once_with(**self.expected_params) - - def test__init__with_region_from_config(self, connection_mock): - self.config(region_name='region1', group='keystone_authtoken') - swift.SwiftAPI() - params = self.expected_params.copy() - params['os_options'] = {'region_name': 'region1'} - connection_mock.assert_called_once_with(**params) - - def test__init__with_region_from_constructor(self, connection_mock): - swift.SwiftAPI(region_name='region1') - params = self.expected_params.copy() - params['os_options'] = {'region_name': 'region1'} + params = {'retries': 2, + 'preauthurl': 'http://swift:8080', + 'preauthtoken': 'fake_token', + 'insecure': False, + 'cacert': '/path/to/ca/file'} connection_mock.assert_called_once_with(**params) @mock.patch.object(__builtin__, 'open', autospec=True) - def test_create_object(self, open_mock, connection_mock): + def test_create_object(self, open_mock, connection_mock, keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value mock_file_handle = mock.MagicMock(spec=file) @@ -91,7 +72,8 @@ class SwiftTestCase(base.TestCase): @mock.patch.object(__builtin__, 'open', autospec=True) def test_create_object_create_container_fails(self, open_mock, - connection_mock): + connection_mock, + keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_container.side_effect = self.swift_exception @@ -102,7 +84,8 @@ class SwiftTestCase(base.TestCase): self.assertFalse(connection_obj_mock.put_object.called) @mock.patch.object(__builtin__, 'open', autospec=True) - def test_create_object_put_object_fails(self, open_mock, connection_mock): + def test_create_object_put_object_fails(self, open_mock, connection_mock, + keystone_mock): swiftapi = swift.SwiftAPI() mock_file_handle = mock.MagicMock(spec=file) mock_file_handle.__enter__.return_value = 'file-object' @@ -118,30 +101,30 @@ class SwiftTestCase(base.TestCase): 'container', 'object', 'file-object', headers=None) @mock.patch.object(swift_utils, 'generate_temp_url', autospec=True) - def test_get_temp_url(self, gen_temp_url_mock, connection_mock): + def test_get_temp_url(self, gen_temp_url_mock, connection_mock, + keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value - auth = ['http://host/v1/AUTH_tenant_id', 'token'] - connection_obj_mock.get_auth.return_value = auth + connection_obj_mock.url = 'http://host/v1/AUTH_tenant_id' head_ret_val = {'x-account-meta-temp-url-key': 'secretkey'} connection_obj_mock.head_account.return_value = head_ret_val gen_temp_url_mock.return_value = 'temp-url-path' temp_url_returned = swiftapi.get_temp_url('container', 'object', 10) - connection_obj_mock.get_auth.assert_called_once_with() connection_obj_mock.head_account.assert_called_once_with() object_path_expected = '/v1/AUTH_tenant_id/container/object' gen_temp_url_mock.assert_called_once_with(object_path_expected, 10, 'secretkey', 'GET') self.assertEqual('http://host/temp-url-path', temp_url_returned) - def test_delete_object(self, connection_mock): + def test_delete_object(self, connection_mock, keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value swiftapi.delete_object('container', 'object') connection_obj_mock.delete_object.assert_called_once_with('container', 'object') - def test_delete_object_exc_resource_not_found(self, connection_mock): + def test_delete_object_exc_resource_not_found(self, connection_mock, + keystone_mock): swiftapi = swift.SwiftAPI() exc = swift_exception.ClientException( "Resource not found", http_status=http_client.NOT_FOUND) @@ -152,7 +135,7 @@ class SwiftTestCase(base.TestCase): connection_obj_mock.delete_object.assert_called_once_with('container', 'object') - def test_delete_object_exc(self, connection_mock): + def test_delete_object_exc(self, connection_mock, keystone_mock): swiftapi = swift.SwiftAPI() exc = swift_exception.ClientException("Operation error") connection_obj_mock = connection_mock.return_value @@ -162,7 +145,7 @@ class SwiftTestCase(base.TestCase): connection_obj_mock.delete_object.assert_called_once_with('container', 'object') - def test_head_object(self, connection_mock): + def test_head_object(self, connection_mock, keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value expected_head_result = {'a': 'b'} @@ -172,7 +155,7 @@ class SwiftTestCase(base.TestCase): 'object') self.assertEqual(expected_head_result, actual_head_result) - def test_update_object_meta(self, connection_mock): + def test_update_object_meta(self, connection_mock, keystone_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value headers = {'a': 'b'} diff --git a/ironic/tests/unit/conf/__init__.py b/ironic/tests/unit/conf/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ironic/tests/unit/conf/test_auth.py b/ironic/tests/unit/conf/test_auth.py new file mode 100644 index 000000000..369e5d4d3 --- /dev/null +++ b/ironic/tests/unit/conf/test_auth.py @@ -0,0 +1,70 @@ +# Copyright 2016 Mirantis Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import identity as kaidentity +from keystoneauth1 import loading as kaloading +from oslo_config import cfg + +from ironic.conf import auth as ironic_auth +from ironic.tests import base + + +class AuthConfTestCase(base.TestCase): + + def setUp(self): + super(AuthConfTestCase, self).setUp() + self.config(region_name='fake_region', + group='keystone') + self.test_group = 'test_group' + self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group)) + ironic_auth.register_auth_opts(self.cfg_fixture.conf, self.test_group) + self.config(auth_type='password', + group=self.test_group) + # NOTE(pas-ha) this is due to auth_plugin options + # being dynamically registered on first load, + # but we need to set the config before + plugin = kaloading.get_plugin_loader('password') + opts = kaloading.get_auth_plugin_conf_options(plugin) + self.cfg_fixture.register_opts(opts, group=self.test_group) + self.config(auth_url='http://127.0.0.1:9898', + username='fake_user', + password='fake_pass', + project_name='fake_tenant', + group=self.test_group) + + def test_add_auth_opts(self): + opts = ironic_auth.add_auth_opts([]) + # check that there is no duplicates + names = {o.dest for o in opts} + self.assertEqual(len(names), len(opts)) + # NOTE(pas-ha) checking for most standard auth and session ones only + expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile', + 'auth_type', 'auth_url', 'username', 'password', + 'tenant_name', 'project_name', 'trust_id', + 'domain_id', 'user_domain_id', 'project_domain_id'} + self.assertTrue(expected.issubset(names)) + + def test_load_auth(self): + auth = ironic_auth.load_auth(self.cfg_fixture.conf, self.test_group) + # NOTE(pas-ha) 'password' auth_plugin is used + self.assertIsInstance(auth, kaidentity.generic.password.Password) + self.assertEqual('http://127.0.0.1:9898', auth.auth_url) + + def test_load_auth_missing_options(self): + # NOTE(pas-ha) 'password' auth_plugin is used, + # so when we set the required auth_url to None, + # MissingOption is raised + self.config(auth_url=None, group=self.test_group) + self.assertIsNone(ironic_auth.load_auth( + self.cfg_fixture.conf, self.test_group)) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 44626a1a7..9a26c3680 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -31,7 +31,6 @@ from ironic.common import boot_devices from ironic.common import dhcp_factory from ironic.common import exception from ironic.common import image_service -from ironic.common import keystone from ironic.common import states from ironic.common import utils as common_utils from ironic.conductor import task_manager @@ -1381,6 +1380,42 @@ class OtherFunctionTestCase(db_base.DbTestCase): utils.warn_about_unsafe_shred_parameters() self.assertTrue(log_mock.warning.called) + @mock.patch.object(utils, '_get_ironic_session') + @mock.patch('ironic.common.keystone.get_service_url') + def test_get_ironic_api_url_from_config(self, mock_get_url, mock_ks): + mock_sess = mock.Mock() + mock_ks.return_value = mock_sess + fake_api_url = 'http://foo/' + mock_get_url.side_effect = exception.KeystoneFailure + self.config(api_url=fake_api_url, group='conductor') + url = utils.get_ironic_api_url() + # also checking for stripped trailing slash + self.assertEqual(fake_api_url[:-1], url) + self.assertFalse(mock_get_url.called) + + @mock.patch.object(utils, '_get_ironic_session') + @mock.patch('ironic.common.keystone.get_service_url') + def test_get_ironic_api_url_from_keystone(self, mock_get_url, mock_ks): + mock_sess = mock.Mock() + mock_ks.return_value = mock_sess + fake_api_url = 'http://foo/' + mock_get_url.return_value = fake_api_url + self.config(api_url=None, group='conductor') + url = utils.get_ironic_api_url() + # also checking for stripped trailing slash + self.assertEqual(fake_api_url[:-1], url) + mock_get_url.assert_called_with(mock_sess) + + @mock.patch.object(utils, '_get_ironic_session') + @mock.patch('ironic.common.keystone.get_service_url') + def test_get_ironic_api_url_fail(self, mock_get_url, mock_ks): + mock_sess = mock.Mock() + mock_ks.return_value = mock_sess + mock_get_url.side_effect = exception.KeystoneFailure() + self.config(api_url=None, group='conductor') + self.assertRaises(exception.InvalidParameterValue, + utils.get_ironic_api_url) + class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): @@ -1923,11 +1958,12 @@ class AgentMethodsTestCase(db_base.DbTestCase): self.assertEqual('fake_agent', options['ipa-driver-name']) self.assertEqual(0, options['coreos.configdrive']) - @mock.patch.object(keystone, 'get_service_url', autospec=True) - def test_build_agent_options_keystone(self, get_url_mock): - + @mock.patch.object(utils, '_get_ironic_session') + def test_build_agent_options_keystone(self, session_mock): self.config(api_url=None, group='conductor') - get_url_mock.return_value = 'api-url' + sess = mock.Mock() + sess.get_endpoint.return_value = 'api-url' + session_mock.return_value = sess options = utils.build_agent_options(self.node) self.assertEqual('api-url', options['ipa-api-url']) self.assertEqual('fake_agent', options['ipa-driver-name']) diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py index 132a2e4a1..7124cf4bb 100644 --- a/ironic/tests/unit/drivers/modules/test_inspector.py +++ b/ironic/tests/unit/drivers/modules/test_inspector.py @@ -16,7 +16,6 @@ import mock from ironic.common import driver_factory from ironic.common import exception -from ironic.common import keystone from ironic.common import states from ironic.conductor import task_manager from ironic.drivers.modules import inspector @@ -128,12 +127,17 @@ class InspectHardwareTestCase(BaseTestCase): task.process_event.assert_called_once_with('fail') -@mock.patch.object(keystone, 'get_admin_auth_token', lambda: 'the token') @mock.patch.object(client, 'get_status') class CheckStatusTestCase(BaseTestCase): def setUp(self): super(CheckStatusTestCase, self).setUp() self.node.provision_state = states.INSPECTING + mock_session = mock.Mock() + mock_session.get_token.return_value = 'the token' + sess_patch = mock.patch.object(inspector, '_get_inspector_session', + return_value=mock_session) + sess_patch.start() + self.addCleanup(sess_patch.stop) def test_not_inspecting(self, mock_get): self.node.provision_state = states.MANAGEABLE diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index 20df419a1..56ca5d46b 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -27,7 +27,6 @@ from oslo_utils import fileutils from ironic.common import dhcp_factory from ironic.common import driver_factory from ironic.common import exception -from ironic.common import keystone from ironic.common import pxe_utils from ironic.common import states from ironic.common import utils @@ -446,38 +445,22 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): self.assertEqual(states.ACTIVE, self.node.target_provision_state) self.assertIsNotNone(self.node.last_error) - @mock.patch.object(keystone, 'get_service_url', autospec=True) - def test_validate_good_api_url_from_config_file(self, mock_ks): - # not present in the keystone catalog - mock_ks.side_effect = exception.KeystoneFailure - self.config(group='conductor', api_url='http://foo') + @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url') + def test_validate_good_api_url(self, mock_get_url): + mock_get_url.return_value = 'http://127.0.0.1:1234' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: iscsi_deploy.validate(task) - self.assertFalse(mock_ks.called) + mock_get_url.assert_called_once_with() - @mock.patch.object(keystone, 'get_service_url', autospec=True) - def test_validate_good_api_url_from_keystone(self, mock_ks): - # present in the keystone catalog - mock_ks.return_value = 'http://127.0.0.1:1234' - # not present in the config file - self.config(group='conductor', api_url=None) - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - iscsi_deploy.validate(task) - mock_ks.assert_called_once_with() - - @mock.patch.object(keystone, 'get_service_url', autospec=True) - def test_validate_fail_no_api_url(self, mock_ks): - # not present in the keystone catalog - mock_ks.side_effect = exception.KeystoneFailure - # not present in the config file - self.config(group='conductor', api_url=None) + @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url') + def test_validate_fail_no_api_url(self, mock_get_url): + mock_get_url.side_effect = exception.InvalidParameterValue('Ham!') with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertRaises(exception.InvalidParameterValue, iscsi_deploy.validate, task) - mock_ks.assert_called_once_with() + mock_get_url.assert_called_once_with() def test_validate_invalid_root_device_hints(self): with task_manager.acquire(self.context, self.node.uuid, diff --git a/releasenotes/notes/keystone-auth-3155762c524e44df.yaml b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml new file mode 100644 index 000000000..0dfaf818a --- /dev/null +++ b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml @@ -0,0 +1,43 @@ +--- +upgrade: + - | + New way of configuring access credentials for OpenStack services clients. + For each service both Keystone session options + (timeout, SSL-related ones) and Keystone auth_plugin options + (auth_url, auth_type and correspondig auth_plugin options) + should be specified in the config section for this service. + Config section affected are + + * ``[neutron]`` for Neutron service user + * ``[glance]`` for Glance service user + * ``[swift]`` for Swift service user + * ``[inspector]`` for Ironic Inspector service user + * ``[service_catalog]`` *new section* for Ironic service user, + used to discover Ironic endpoint from Keystone Catalog + + This enables fine tuning of authentification for each service. + + Backward-compatible options handling is provided + using values from ``[keystone_authtoken]`` config section, + but operators are advised to switch to the new config options. + For more information on sessions, auth plugins and their settings, + please refer to _http://docs.openstack.org/developer/keystoneauth/ + + - | + Small change in semantics of default for ``[neutron]url`` option + + * default is changed to None. + * In case when [neutron]auth_strategy is ``noauth``, + default means use ``http://$my_ip:9696``. + * In case when [neutron]auth_strategy is ``keystone``, + default means to resolve the endpoint from Keystone Catalog. + + - New config section ``[service_catalog]`` for access credentials used + to discover Ironic API URL from Keystone Catalog. + Previousely credentials from ``[keystone_authtoken]`` section were used, + which is now deprecated for such purpose. +fixes: + - Do not rely on keystonemiddleware config options for instantiating + clients for other OpenStack services. + This allows changing keystonemiddleware options from legacy ones + and thus support Keystone V3 for token validation. diff --git a/requirements.txt b/requirements.txt index 5a555fd92..7958bd976 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ netaddr!=0.7.16,>=0.7.12 # BSD paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=4.2.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 -python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0 +keystoneauth1>=2.10.0 # Apache-2.0 ironic-lib>=2.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT From ff5347b3df56a8bcd93dc7c56318c3910730fce3 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Wed, 23 Mar 2016 18:06:25 +0200 Subject: [PATCH 127/166] Update devstack plugin with new auth options Change-Id: I0a12f010d5b4325ee707bf674adb4813a8721c05 --- devstack/lib/ironic | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index c22f8516d..64cc041b1 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -561,19 +561,9 @@ function configure_ironic { # API specific configuration. function configure_ironic_api { iniset $IRONIC_CONF_FILE DEFAULT auth_strategy $IRONIC_AUTH_STRATEGY + configure_auth_token_middleware $IRONIC_CONF_FILE ironic $IRONIC_AUTH_CACHE_DIR/api iniset $IRONIC_CONF_FILE oslo_policy policy_file $IRONIC_POLICY_JSON - # TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632). - # These codes need to be changed to use the function of configure_auth_token_middleware - # after Ironic conforms to the new auth plugin. - iniset $IRONIC_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 - iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic - iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_PROJECT_NAME - iniset $IRONIC_CONF_FILE keystone_authtoken cafile $SSL_BUNDLE_FILE - iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api - iniset_rpc_backend ironic $IRONIC_CONF_FILE iniset $IRONIC_CONF_FILE api port $IRONIC_SERVICE_PORT @@ -582,9 +572,35 @@ function configure_ironic_api { cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON } +function configure_auth_for { + local service_config_section + service_config_section=$1 + iniset $IRONIC_CONF_FILE $service_config_section auth_type password + iniset $IRONIC_CONF_FILE $service_config_section auth_url $KEYSTONE_SERVICE_URI + iniset $IRONIC_CONF_FILE $service_config_section username ironic + iniset $IRONIC_CONF_FILE $service_config_section password $SERVICE_PASSWORD + iniset $IRONIC_CONF_FILE $service_config_section project_name $SERVICE_PROJECT_NAME + iniset $IRONIC_CONF_FILE $service_config_section user_domain_id default + iniset $IRONIC_CONF_FILE $service_config_section project_domain_id default + iniset $IRONIC_CONF_FILE $service_config_section cafile $SSL_BUNDLE_FILE + +} + # configure_ironic_conductor() - Is used by configure_ironic(). # Sets conductor specific settings. function configure_ironic_conductor { + + # set keystone region for all services + iniset $IRONIC_CONF_FILE keystone region_name $REGION_NAME + + # set keystone auth plugin options for services + configure_auth_for neutron + configure_auth_for swift + configure_auth_for glance + configure_auth_for inspector + # this one is needed for lookup of Ironic API endpoint via Keystone + configure_auth_for service_catalog + cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR local ironic_rootwrap From 288f4e617b21a18dd29eaeb8f47b07191bade2b0 Mon Sep 17 00:00:00 2001 From: Varun Gadiraju Date: Tue, 2 Aug 2016 22:45:13 +0000 Subject: [PATCH 128/166] Fix for "db type could not be determined" error message If tests are run under py27 then run under py34 or py35, user may get `db type could not be determined` error. This patch adds a note to the dev-quickstart guide telling user to remove the file `.testrepository/times.dbm` and then run the py34 or py35 test to work around this error. Change-Id: I57b583eaa8586d14cee730eefc515ad064ac8b32 Closes-Bug: #1229445 (regarding Ironic) --- doc/source/dev/dev-quickstart.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index 79fa7ece8..fabf86d37 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -105,6 +105,15 @@ To run the unit tests under py34 and also run the pep8 tests:: # run all tests (unit under py34 and pep8) tox -epy34 -epep8 +.. note:: + If tests are run under py27 and then run under py34 or py35 the following error may occur:: + + db type could not be determined + ERROR: InvocationError: '/home/ubuntu/ironic/.tox/py35/bin/ostestr' + + To overcome this error remove the file `.testrepository/times.dbm` + and then run the py34 or py35 test. + You may pass options to the test programs using positional arguments. To run a specific unit test, this passes the -r option and desired test (regex string) to `os-testr `_:: From 30fc692e3a8a10166a0745b84ff8affb73f75e9a Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Wed, 3 Aug 2016 21:33:27 +0000 Subject: [PATCH 129/166] Revert "Devstack should use a prebuilt ramdisk by default" This reverts commit 5b752d258a0402abfd7f2511874f42bdf7c77dc1. Due to devstack-gate only setting IRONIC_BUILD_DEPLOY_RAMDISK=false, this will prevent IPA source jobs from ever building a new ramdisk. Change-Id: I3e43c08b7f8e9d41f420b12475c9ce74cb860df9 --- devstack/lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 12d036b4c..c22f8516d 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -131,7 +131,7 @@ IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE) # Whether to build the ramdisk or download a prebuilt one. -IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse False IRONIC_BUILD_DEPLOY_RAMDISK) +IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) # Ironic IPA ramdisk type, supported types are: IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(coreos|tinyipa|dib)$" From e9ea064b5fbc18ad42e95fbca4dd2677da010b5f Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Wed, 3 Aug 2016 17:11:54 +0100 Subject: [PATCH 130/166] Create a custom StringField that can process functions This patch enables a custom StringField that can receive functions as values so that they can be dynamically calculated during runtime. It also ensures that hashing the fields remains consistant, so the hashing of the VersionObjects that use function based defaults remains consistant. Change-Id: Idb8fb5d2e2cec4c36fafeb18701397cc4443be8c Closes-Bug: #1609455 --- ironic/objects/fields.py | 33 +++++++++++++++++ ironic/objects/node.py | 9 ++--- ironic/tests/unit/objects/test_fields.py | 44 +++++++++++++++++++++++ ironic/tests/unit/objects/test_objects.py | 2 +- 4 files changed, 80 insertions(+), 8 deletions(-) diff --git a/ironic/objects/fields.py b/ironic/objects/fields.py index 5c2565dc2..c1add2c63 100644 --- a/ironic/objects/fields.py +++ b/ironic/objects/fields.py @@ -14,6 +14,8 @@ # under the License. import ast +import hashlib +import inspect import six from oslo_versionedobjects import fields as object_fields @@ -33,6 +35,37 @@ class StringField(object_fields.StringField): pass +class StringAcceptsCallable(object_fields.String): + @staticmethod + def coerce(obj, attr, value): + if callable(value): + value = value() + return super(StringAcceptsCallable, StringAcceptsCallable).coerce( + obj, attr, value) + + +class StringFieldThatAcceptsCallable(object_fields.StringField): + """Custom StringField object that allows for functions as default + + In some cases we need to allow for dynamic defaults based on configuration + options, this StringField object allows for a function to be passed as a + default, and will only process it at the point the field is coerced + """ + + AUTO_TYPE = StringAcceptsCallable() + + def __repr__(self): + default = self._default + if (self._default != object_fields.UnspecifiedDefault and + callable(self._default)): + default = "%s-%s" % ( + self._default.__name__, + hashlib.md5(inspect.getsource( + self._default).encode()).hexdigest()) + return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__, + default, self._nullable) + + class DateTimeField(object_fields.DateTimeField): pass diff --git a/ironic/objects/node.py b/ironic/objects/node.py index 6bb03d5c8..b73eed879 100644 --- a/ironic/objects/node.py +++ b/ironic/objects/node.py @@ -117,15 +117,10 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat): 'extra': object_fields.FlexibleDictField(nullable=True), - 'network_interface': object_fields.StringField( - nullable=False, default=_default_network_interface()), + 'network_interface': object_fields.StringFieldThatAcceptsCallable( + nullable=False, default=_default_network_interface), } - def __init__(self, context=None, **kwargs): - self.fields['network_interface']._default = ( - _default_network_interface()) - super(Node, self).__init__(context, **kwargs) - def _validate_property_values(self, properties): """Check if the input of local_gb, cpus and memory_mb are valid. diff --git a/ironic/tests/unit/objects/test_fields.py b/ironic/tests/unit/objects/test_fields.py index a9d736ac9..1291c619c 100644 --- a/ironic/tests/unit/objects/test_fields.py +++ b/ironic/tests/unit/objects/test_fields.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import hashlib +import inspect from ironic.common import exception from ironic.objects import fields @@ -61,3 +63,45 @@ class TestFlexibleDictField(test_base.TestCase): # nullable self.field = fields.FlexibleDictField(nullable=True) self.assertEqual({}, self.field.coerce('obj', 'attr', None)) + + +class TestStringFieldThatAcceptsCallable(test_base.TestCase): + + def setUp(self): + super(TestStringFieldThatAcceptsCallable, self).setUp() + + def test_default_function(): + return "default value" + + self.test_default_function_hash = hashlib.md5( + inspect.getsource(test_default_function).encode()).hexdigest() + self.field = fields.StringFieldThatAcceptsCallable( + default=test_default_function) + + def test_coerce_string(self): + self.assertEqual("value", self.field.coerce('obj', 'attr', "value")) + + def test_coerce_function(self): + def test_function(): + return "value" + self.assertEqual("value", + self.field.coerce('obj', 'attr', test_function)) + + def test_coerce_invalid_type(self): + self.assertRaises(ValueError, self.field.coerce, + 'obj', 'attr', ('invalid', 'tuple')) + + def test_coerce_function_invalid_type(self): + def test_function(): + return ('invalid', 'tuple',) + self.assertRaises(ValueError, + self.field.coerce, 'obj', 'attr', test_function) + + def test_coerce_default_as_function(self): + self.assertEqual("default value", + self.field.coerce('obj', 'attr', None)) + + def test__repr__includes_default_function_name_and_source_hash(self): + expected = ('StringAcceptsCallable(default=test_default_function-%s,' + 'nullable=False)' % self.test_default_function_hash) + self.assertEqual(expected, repr(self.field)) diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py index bd21bd950..5202d80fe 100644 --- a/ironic/tests/unit/objects/test_objects.py +++ b/ironic/tests/unit/objects/test_objects.py @@ -404,7 +404,7 @@ class TestObject(_LocalTest, _TestObject): # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.18-8cdb6010014b29f17ca636bef72b7800', + 'Node': '1.18-37a1d39ba8a4957f505dda936ac9146b', 'MyObj': '1.5-4f5efe8f0fcaf182bbe1c7fe3ba858db', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.6-609504503d68982a10f495659990084b', From bedfbf311bc6ab7b7e442681748809f706ccace6 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 4 Aug 2016 02:35:37 +0000 Subject: [PATCH 131/166] Updated from global requirements Change-Id: I886421684ea169cd7a45dbe4441df5ffdd1272a1 --- requirements.txt | 6 +++--- test-requirements.txt | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 7958bd976..7a253909d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ netaddr!=0.7.16,>=0.7.12 # BSD paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=4.2.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 -keystoneauth1>=2.10.0 # Apache-2.0 +keystoneauth1>=2.10.0 # Apache-2.0 ironic-lib>=2.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT @@ -20,10 +20,10 @@ stevedore>=1.16.0 # Apache-2.0 pysendfile>=2.0.0 # MIT websockify>=0.8.0 # LGPLv3 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.12.0 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 oslo.context!=2.6.0,>=2.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 +oslo.rootwrap>=5.0.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 2972b5e9f..4eb4e4048 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,7 +11,7 @@ PyMySQL>=0.6.2 # MIT License iso8601>=0.1.11 # MIT oslotest>=1.10.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL -python-ironicclient>=1.1.0 # Apache-2.0 +python-ironicclient>=1.6.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT os-testr>=0.7.0 # Apache-2.0 From 906af9e101c73c52b190a217f4d0d19bb4496af9 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 1 Aug 2016 14:02:08 +0000 Subject: [PATCH 132/166] Test that network_interface is explicitly set on POST/PATCH This patch adds unit tests to ensure that node POST and PATCH requests always end up with the network interface explicitly set, based on the previous patch that changes the node object to always set this attribute. Change-Id: I934946892fc1f43a899be8506e1bd779a1429a68 Partial-Bug: #1608511 --- ironic/tests/unit/api/v1/test_nodes.py | 66 ++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 4 deletions(-) diff --git a/ironic/tests/unit/api/v1/test_nodes.py b/ironic/tests/unit/api/v1/test_nodes.py index 6250e7193..476377d00 100644 --- a/ironic/tests/unit/api/v1/test_nodes.py +++ b/ironic/tests/unit/api/v1/test_nodes.py @@ -36,6 +36,7 @@ from ironic.common import boot_devices from ironic.common import exception from ironic.common import states from ironic.conductor import rpcapi +from ironic.conf import CONF from ironic import objects from ironic.tests import base from ironic.tests.unit.api import base as test_api_base @@ -1519,6 +1520,43 @@ class TestPatch(test_api_base.BaseApiTest): self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) + def test_update_network_interface_null_sets_default(self): + CONF.set_override('default_network_interface', 'neutron') + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + network_interface='flat') + self.mock_update_node.return_value = node + network_interface = 'neutron' + headers = {api_base.Version.string: str(api_v1.MAX_VER)} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/network_interface', + 'value': None, + 'op': 'add'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + # check the node we pass to updated_node + node_arg = self.mock_update_node.call_args[0][1] + self.assertEqual(network_interface, node_arg['network_interface']) + + def test_update_network_interface_remove_sets_default(self): + CONF.set_override('default_network_interface', 'neutron') + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + network_interface='flat') + self.mock_update_node.return_value = node + network_interface = 'neutron' + headers = {api_base.Version.string: str(api_v1.MAX_VER)} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/network_interface', + 'op': 'remove'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + # check the node we pass to updated_node + node_arg = self.mock_update_node.call_args[0][1] + self.assertEqual(network_interface, node_arg['network_interface']) + def test_update_network_interface_old_api(self): node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid()) @@ -1604,13 +1642,16 @@ class TestPost(test_api_base.BaseApiTest): self.addCleanup(p.stop) @mock.patch.object(timeutils, 'utcnow') - def test_create_node(self, mock_utcnow): - ndict = test_api_utils.post_get_test_node() + def _test_create_node(self, mock_utcnow, headers=None, **kwargs): + headers = headers or {} + ndict = test_api_utils.post_get_test_node(**kwargs) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time - response = self.post_json('/nodes', ndict) + response = self.post_json('/nodes', ndict, + headers=headers) self.assertEqual(http_client.CREATED, response.status_int) - result = self.get_json('/nodes/%s' % ndict['uuid']) + result = self.get_json('/nodes/%s' % ndict['uuid'], + headers=headers) self.assertEqual(ndict['uuid'], result['uuid']) self.assertFalse(result['updated_at']) return_created_at = timeutils.parse_isotime( @@ -1621,6 +1662,23 @@ class TestPost(test_api_base.BaseApiTest): expected_location = '/v1/nodes/%s' % ndict['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) + return result + + def test_create_node(self): + self._test_create_node() + + def test_create_node_explicit_network_interface(self): + headers = {api_base.Version.string: '1.20'} + result = self._test_create_node(headers=headers, + network_interface='neutron') + self.assertEqual('neutron', result['network_interface']) + + def test_create_node_default_network_interface(self): + CONF.set_override('default_network_interface', 'neutron') + CONF.set_override('enabled_network_interfaces', 'flat,noop,neutron') + headers = {api_base.Version.string: '1.20'} + result = self._test_create_node(headers=headers) + self.assertEqual('neutron', result['network_interface']) def test_create_node_name_empty_invalid(self): ndict = test_api_utils.post_get_test_node(name='') From 1514b2a2f68453dc8da2ea87eacd97f6f7c68ca4 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 1 Aug 2016 14:23:25 +0000 Subject: [PATCH 133/166] Add a data migration to fill node.network_interface This fills node.network_interface based on the default logic that the node update/create calls follow, so that we know network_interface is populated when this code is deployed. Also adds to existing release note about how network_interface is set, to remind people to check the logic and configs to make sure they get the expected result. Change-Id: I09a42c8e54d7782c591415e53fccade972ae8bdb Closes-Bug: #1608511 --- ...f6dfedf_populate_node_network_interface.py | 44 +++++++++++++++++++ .../unit/db/sqlalchemy/test_migrations.py | 32 ++++++++++++++ ...d-network-interfaces-0a13c4aba252573e.yaml | 10 +++++ 3 files changed, 86 insertions(+) create mode 100644 ironic/db/sqlalchemy/alembic/versions/c14cef6dfedf_populate_node_network_interface.py diff --git a/ironic/db/sqlalchemy/alembic/versions/c14cef6dfedf_populate_node_network_interface.py b/ironic/db/sqlalchemy/alembic/versions/c14cef6dfedf_populate_node_network_interface.py new file mode 100644 index 000000000..ac49571f7 --- /dev/null +++ b/ironic/db/sqlalchemy/alembic/versions/c14cef6dfedf_populate_node_network_interface.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Populate node.network_interface + +Revision ID: c14cef6dfedf +Revises: dd34e1f1303b +Create Date: 2016-08-01 14:05:24.197314 + +""" + +# revision identifiers, used by Alembic. +revision = 'c14cef6dfedf' +down_revision = 'dd34e1f1303b' + +from alembic import op +from sqlalchemy import String +from sqlalchemy.sql import table, column, null + +from ironic.conf import CONF + + +node = table('nodes', + column('uuid', String(36)), + column('network_interface', String(255))) + + +def upgrade(): + network_iface = (CONF.default_network_interface or + ('flat' if CONF.dhcp.dhcp_provider == 'neutron' + else 'noop')) + op.execute( + node.update().where( + node.c.network_interface == null()).values( + {'network_interface': network_iface})) diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py index 8b1acf158..b456ddc38 100644 --- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py +++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py @@ -34,6 +34,7 @@ For postgres on Ubuntu this can be done with the following commands: """ +import collections import contextlib from alembic import script @@ -440,6 +441,37 @@ class MigrationCheckersMixin(object): self.assertIsInstance(nodes.c.resource_class.type, sqlalchemy.types.String) + def _pre_upgrade_c14cef6dfedf(self, engine): + # add some nodes. + nodes = db_utils.get_table(engine, 'nodes') + data = [{'uuid': uuidutils.generate_uuid(), + 'network_interface': None}, + {'uuid': uuidutils.generate_uuid(), + 'network_interface': None}, + {'uuid': uuidutils.generate_uuid(), + 'network_interface': 'neutron'}] + nodes.insert().values(data).execute() + return data + + def _check_c14cef6dfedf(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + result = engine.execute(nodes.select()) + counts = collections.defaultdict(int) + + def _was_inserted(uuid): + for row in data: + if row['uuid'] == uuid: + return True + + for row in result: + if _was_inserted(row['uuid']): + counts[row['network_interface']] += 1 + + # using default config values, we should have 2 flat and one neutron + self.assertEqual(2, counts['flat']) + self.assertEqual(1, counts['neutron']) + self.assertEqual(0, counts[None]) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff --git a/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml b/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml index d77c5ade4..7522fa1c1 100644 --- a/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml +++ b/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml @@ -27,3 +27,13 @@ upgrade: set. If it is not set, the network interface is determined by looking at the ``[dhcp]dhcp_provider`` value. If it is ``neutron`` - ``flat`` network interface is the default, ``noop`` otherwise. + The network interface will be set for all nodes without network_interface + already set via a database migration. This will be set following the logic + above. When running database migrations for an existing deployment, it's + important to check the above configuration options to ensure the existing + nodes will have the expected network_interface. If + ``[DEFAULT]default_network_interface`` is not set, everything should go as + expected. If it is set, ensure that it is set to the value that you wish + existing nodes to use. + - Note that if ``[DEFAULT]default_network_interface`` is set, it must be set + in the configuration file for both the API and conductor hosts. From e481aab844b3e79b02ee25865567361fa86309c1 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Wed, 3 Aug 2016 12:03:43 +0300 Subject: [PATCH 134/166] Follow up to keystoneauth patch - fix typos in release notes - fix typos and style in code Change-Id: Ic5222c54c6338f9492b8d6c1f463e6526bd73449 --- ironic/cmd/conductor.py | 4 ++-- ironic/conf/service_catalog.py | 4 ++-- releasenotes/notes/keystone-auth-3155762c524e44df.yaml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ironic/cmd/conductor.py b/ironic/cmd/conductor.py index 794b52ce3..14728a5a2 100644 --- a/ironic/cmd/conductor.py +++ b/ironic/cmd/conductor.py @@ -41,8 +41,8 @@ SECTIONS_WITH_AUTH = ( def _check_auth_options(conf): missing = [] for section in SECTIONS_WITH_AUTH: - if not auth.load_auth(conf, section): - missing.append('[%s]' % section) + if not auth.load_auth(conf, section): + missing.append('[%s]' % section) if missing: link = "http://docs.openstack.org/releasenotes/ironic/newton.html" LOG.warning(_LW("Failed to load authentification credentials from " diff --git a/ironic/conf/service_catalog.py b/ironic/conf/service_catalog.py index 610d20e1d..56b9d0a41 100644 --- a/ironic/conf/service_catalog.py +++ b/ironic/conf/service_catalog.py @@ -18,7 +18,7 @@ from oslo_config import cfg from ironic.common.i18n import _ from ironic.conf import auth -SERVCIE_CATALOG_GROUP = cfg.OptGroup( +SERVICE_CATALOG_GROUP = cfg.OptGroup( 'service_catalog', title='Access info for Ironic service user', help=_('Holds credentials and session options to access ' @@ -26,7 +26,7 @@ SERVCIE_CATALOG_GROUP = cfg.OptGroup( def register_opts(conf): - auth.register_auth_opts(conf, SERVCIE_CATALOG_GROUP.name) + auth.register_auth_opts(conf, SERVICE_CATALOG_GROUP.name) def list_opts(): diff --git a/releasenotes/notes/keystone-auth-3155762c524e44df.yaml b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml index 0dfaf818a..36e4ad614 100644 --- a/releasenotes/notes/keystone-auth-3155762c524e44df.yaml +++ b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml @@ -4,7 +4,7 @@ upgrade: New way of configuring access credentials for OpenStack services clients. For each service both Keystone session options (timeout, SSL-related ones) and Keystone auth_plugin options - (auth_url, auth_type and correspondig auth_plugin options) + (auth_url, auth_type and corresponding auth_plugin options) should be specified in the config section for this service. Config section affected are @@ -15,7 +15,7 @@ upgrade: * ``[service_catalog]`` *new section* for Ironic service user, used to discover Ironic endpoint from Keystone Catalog - This enables fine tuning of authentification for each service. + This enables fine tuning of authentication for each service. Backward-compatible options handling is provided using values from ``[keystone_authtoken]`` config section, @@ -34,7 +34,7 @@ upgrade: - New config section ``[service_catalog]`` for access credentials used to discover Ironic API URL from Keystone Catalog. - Previousely credentials from ``[keystone_authtoken]`` section were used, + Previously credentials from ``[keystone_authtoken]`` section were used, which is now deprecated for such purpose. fixes: - Do not rely on keystonemiddleware config options for instantiating From c7da7a69250fcffb6841d36710770608d603bb6a Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 3 Jun 2016 15:43:12 -0700 Subject: [PATCH 135/166] Add keystone policy support to Ironic Implements more fine-grained policy support within our API service, following the oslo policy-in-code spec, while maintaining compatibility with the previous default policy.json file. An empty policy.json file is included, along with a sample file listig all supported policy settings and their default values. A new tox target "genpolicy" has been added to ease automation of sample policy file generation. All calls to policy.enforce() have been replaced with with policy.authorize() to avoid silent failures when a rule is undefined, because enforce() does not raise() if the target rule does not exist. NOTE: policy.enforce() is not removed by this patch, but a deprecation warning will be logged if it this method is invoked. Updates unit test coverage for the new authorize() method, as well as more general unit test updates for some of the new rules. Partial-bug: #1526752 Change-Id: Ie4398f840601d027e2fe209c17d854421687c7b7 --- devstack/lib/ironic | 27 ++- etc/ironic/policy.json | 6 +- etc/ironic/policy.json.sample | 72 ++++++ ironic/api/acl.py | 34 --- ironic/api/app.py | 12 +- ironic/api/config.py | 1 - ironic/api/controllers/v1/chassis.py | 19 ++ ironic/api/controllers/v1/driver.py | 22 ++ ironic/api/controllers/v1/node.py | 64 ++++++ ironic/api/controllers/v1/port.py | 19 ++ ironic/api/hooks.py | 29 +-- ironic/common/exception.py | 4 +- ironic/common/policy.py | 210 +++++++++++++++++- ironic/tests/unit/api/base.py | 7 +- ironic/tests/unit/api/test_acl.py | 3 +- ironic/tests/unit/api/test_audit.py | 6 +- ironic/tests/unit/api/test_hooks.py | 40 +--- ironic/tests/unit/common/test_policy.py | 99 ++++++--- ironic/tests/unit/fake_policy.py | 42 ---- ironic/tests/unit/policy_fixture.py | 15 +- ...ement-policy-in-code-cbb0216ef5f8224f.yaml | 22 ++ setup.cfg | 3 + tox.ini | 6 + 23 files changed, 563 insertions(+), 199 deletions(-) create mode 100644 etc/ironic/policy.json.sample delete mode 100644 ironic/api/acl.py delete mode 100644 ironic/tests/unit/fake_policy.py create mode 100644 releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 64cc041b1..a6df583cf 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -722,24 +722,31 @@ function create_ironic_cache_dir { # create_ironic_accounts() - Set up common required ironic accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service ironic admin # if enabled +# service ironic admin +# service nova baremetal_admin +# demo demo baremetal_observer function create_ironic_accounts { - - # Ironic - if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - # Get ironic user if exists - - # NOTE(Shrews): This user MUST have admin level privileges! - create_service_user "ironic" "admin" - + if [[ "$ENABLED_SERVICES" =~ "ir-api" && "$ENABLED_SERVICES" =~ "key" ]]; then + # Define service and endpoints in Keystone get_or_create_service "ironic" "baremetal" "Ironic baremetal provisioning service" get_or_create_endpoint "baremetal" \ "$REGION_NAME" \ "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" + + # Create ironic service user + # TODO(deva): make this work with the 'service' role + # https://bugs.launchpad.net/ironic/+bug/1605398 + create_service_user "ironic" "admin" + + # Create additional bare metal tenant and roles + get_or_create_role baremetal_admin + get_or_create_role baremetal_observer + get_or_add_user_project_role baremetal_admin nova $SERVICE_PROJECT_NAME + get_or_add_user_project_role baremetal_observer demo demo fi } diff --git a/etc/ironic/policy.json b/etc/ironic/policy.json index f7726778e..1ae73ec93 100644 --- a/etc/ironic/policy.json +++ b/etc/ironic/policy.json @@ -1,5 +1,5 @@ +# Beginning with the Newton release, you may leave this file empty +# to use default policy defined in code. { - "admin_api": "role:admin or role:administrator", - "show_password": "!", - "default": "rule:admin_api" + } diff --git a/etc/ironic/policy.json.sample b/etc/ironic/policy.json.sample new file mode 100644 index 000000000..888c0f0c0 --- /dev/null +++ b/etc/ironic/policy.json.sample @@ -0,0 +1,72 @@ +# Legacy rule for cloud admin access +"admin_api": "role:admin or role:administrator" +# Internal flag for public API routes +"public_api": "is_public_api:True" +# Show or mask passwords in API responses +"show_password": "!" +# May be used to restrict access to specific tenants +"is_member": "tenant:demo or tenant:baremetal" +# Read-only API access +"is_observer": "rule:is_member and (role:observer or role:baremetal_observer)" +# Full read/write API access +"is_admin": "rule:admin_api or (rule:is_member and role:baremetal_admin)" +# Retrieve Node records +"baremetal:node:get": "rule:is_admin or rule:is_observer" +# Retrieve Node boot device metadata +"baremetal:node:get_boot_device": "rule:is_admin or rule:is_observer" +# View Node power and provision state +"baremetal:node:get_states": "rule:is_admin or rule:is_observer" +# Create Node records +"baremetal:node:create": "rule:is_admin" +# Delete Node records +"baremetal:node:delete": "rule:is_admin" +# Update Node records +"baremetal:node:update": "rule:is_admin" +# Request active validation of Nodes +"baremetal:node:validate": "rule:is_admin" +# Set maintenance flag, taking a Node out of service +"baremetal:node:set_maintenance": "rule:is_admin" +# Clear maintenance flag, placing the Node into service again +"baremetal:node:clear_maintenance": "role:is_admin" +# Change Node boot device +"baremetal:node:set_boot_device": "rule:is_admin" +# Change Node power status +"baremetal:node:set_power_state": "rule:is_admin" +# Change Node provision status +"baremetal:node:set_provision_state": "rule:is_admin" +# Change Node RAID status +"baremetal:node:set_raid_state": "rule:is_admin" +# Get Node console connection information +"baremetal:node:get_console": "rule:is_admin" +# Change Node console status +"baremetal:node:set_console_state": "rule:is_admin" +# Retrieve Port records +"baremetal:port:get": "rule:is_admin or rule:is_observer" +# Create Port records +"baremetal:port:create": "rule:is_admin" +# Delete Port records +"baremetal:port:delete": "rule:is_admin" +# Update Port records +"baremetal:port:update": "rule:is_admin" +# Retrieve Chassis records +"baremetal:chassis:get": "rule:is_admin or rule:is_observer" +# Create Chassis records +"baremetal:chassis:create": "rule:is_admin" +# Delete Chassis records +"baremetal:chassis:delete": "rule:is_admin" +# Update Chassis records +"baremetal:chassis:update": "rule:is_admin" +# View list of available drivers +"baremetal:driver:get": "rule:is_admin or rule:is_observer" +# View driver-specific properties +"baremetal:driver:get_properties": "rule:is_admin or rule:is_observer" +# View driver-specific RAID metadata +"baremetal:driver:get_raid_logical_disk_properties": "rule:is_admin or rule:is_observer" +# Access vendor-specific Node functions +"baremetal:node:vendor_passthru": "rule:is_admin" +# Access vendor-specific Driver functions +"baremetal:driver:vendor_passthru": "rule:is_admin" +# Send heartbeats from IPA ramdisk +"baremetal:node:ipa_heartbeat": "rule:public_api" +# Access IPA ramdisk functions +"baremetal:driver:ipa_lookup": "rule:public_api" diff --git a/ironic/api/acl.py b/ironic/api/acl.py deleted file mode 100644 index ef5532b0c..000000000 --- a/ironic/api/acl.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Access Control Lists (ACL's) control access the API server.""" - -from ironic.api.middleware import auth_token - - -def install(app, conf, public_routes): - """Install ACL check on application. - - :param app: A WSGI application. - :param conf: Settings. Dict'ified and passed to keystonemiddleware - :param public_routes: The list of the routes which will be allowed to - access without authentication. - :return: The same WSGI application with ACL installed. - - """ - return auth_token.AuthTokenMiddleware(app, - conf=dict(conf), - public_api_routes=public_routes) diff --git a/ironic/api/app.py b/ironic/api/app.py index 5621d9759..7c2c401b7 100644 --- a/ironic/api/app.py +++ b/ironic/api/app.py @@ -21,11 +21,11 @@ from oslo_config import cfg import oslo_middleware.cors as cors_middleware import pecan -from ironic.api import acl from ironic.api import config from ironic.api.controllers.base import Version from ironic.api import hooks from ironic.api import middleware +from ironic.api.middleware import auth_token from ironic.common import exception from ironic.conf import CONF @@ -49,9 +49,6 @@ def setup_app(pecan_config=None, extra_hooks=None): if not pecan_config: pecan_config = get_pecan_config() - if pecan_config.app.enable_acl: - app_hooks.append(hooks.TrustedCallHook()) - pecan.configuration.set_config(dict(pecan_config), overwrite=True) app = pecan.make_app( @@ -76,8 +73,10 @@ def setup_app(pecan_config=None, extra_hooks=None): reason=e ) - if pecan_config.app.enable_acl: - app = acl.install(app, cfg.CONF, pecan_config.app.acl_public_routes) + if CONF.auth_strategy == "keystone": + app = auth_token.AuthTokenMiddleware( + app, dict(cfg.CONF), + public_api_routes=pecan_config.app.acl_public_routes) # Create a CORS wrapper, and attach ironic-specific defaults that must be # included in all CORS responses. @@ -94,7 +93,6 @@ def setup_app(pecan_config=None, extra_hooks=None): class VersionSelectorApplication(object): def __init__(self): pc = get_pecan_config() - pc.app.enable_acl = (CONF.auth_strategy == 'keystone') self.v1 = setup_app(pecan_config=pc) def __call__(self, environ, start_response): diff --git a/ironic/api/config.py b/ironic/api/config.py index 9a1ff1129..f707f5b4a 100644 --- a/ironic/api/config.py +++ b/ironic/api/config.py @@ -26,7 +26,6 @@ app = { 'modules': ['ironic.api'], 'static_root': '%(confdir)s/public', 'debug': False, - 'enable_acl': True, 'acl_public_routes': [ '/', '/v1', diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py index e43841b5f..f5ebab2d3 100644 --- a/ironic/api/controllers/v1/chassis.py +++ b/ironic/api/controllers/v1/chassis.py @@ -31,6 +31,7 @@ from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ +from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) @@ -207,6 +208,9 @@ class ChassisController(rest.RestController): :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:get', cdict, cdict) + api_utils.check_allow_specify_fields(fields) if fields is None: fields = _DEFAULT_RETURN_FIELDS @@ -224,6 +228,9 @@ class ChassisController(rest.RestController): :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:get', cdict, cdict) + # /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "chassis": @@ -242,6 +249,9 @@ class ChassisController(rest.RestController): :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:get', cdict, cdict) + api_utils.check_allow_specify_fields(fields) rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context, chassis_uuid) @@ -254,6 +264,9 @@ class ChassisController(rest.RestController): :param chassis: a chassis within the request body. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:create', cdict, cdict) + new_chassis = objects.Chassis(pecan.request.context, **chassis.as_dict()) new_chassis.create() @@ -270,6 +283,9 @@ class ChassisController(rest.RestController): :param chassis_uuid: UUID of a chassis. :param patch: a json PATCH document to apply to this chassis. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:update', cdict, cdict) + rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context, chassis_uuid) try: @@ -301,6 +317,9 @@ class ChassisController(rest.RestController): :param chassis_uuid: UUID of a chassis. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:chassis:delete', cdict, cdict) + rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context, chassis_uuid) rpc_chassis.destroy() diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py index 4d327e9b2..fed3f9593 100644 --- a/ironic/api/controllers/v1/driver.py +++ b/ironic/api/controllers/v1/driver.py @@ -26,6 +26,7 @@ from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception +from ironic.common import policy METRICS = metrics_utils.get_metrics_logger(__name__) @@ -153,6 +154,9 @@ class DriverPassthruController(rest.RestController): :raises: DriverNotFound if the driver name is invalid or the driver cannot be loaded. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:vendor_passthru', cdict, cdict) + if driver_name not in _VENDOR_METHODS: topic = pecan.request.rpcapi.get_topic_for_driver(driver_name) ret = pecan.request.rpcapi.get_driver_vendor_passthru_methods( @@ -172,6 +176,12 @@ class DriverPassthruController(rest.RestController): implementation. :param data: body of data to supply to the specified method. """ + cdict = pecan.request.context.to_dict() + if method == "lookup": + policy.authorize('baremetal:driver:ipa_lookup', cdict, cdict) + else: + policy.authorize('baremetal:driver:vendor_passthru', cdict, cdict) + topic = pecan.request.rpcapi.get_topic_for_driver(driver_name) return api_utils.vendor_passthru(driver_name, method, topic, data=data, driver_passthru=True) @@ -198,6 +208,10 @@ class DriverRaidController(rest.RestController): :raises: DriverNotFound, if driver is not loaded on any of the conductors. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:get_raid_logical_disk_properties', + cdict, cdict) + if not api_utils.allow_raid_config(): raise exception.NotAcceptable() @@ -236,6 +250,9 @@ class DriversController(rest.RestController): # will break from a single-line doc string. # This is a result of a bug in sphinxcontrib-pecanwsme # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8 + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:get', cdict, cdict) + driver_list = pecan.request.dbapi.get_active_driver_dict() return DriverList.convert_with_links(driver_list) @@ -247,6 +264,8 @@ class DriversController(rest.RestController): # retrieving a list of drivers using the current sqlalchemy schema, but # this path must be exposed for Pecan to route any paths we might # choose to expose below it. + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:get', cdict, cdict) driver_dict = pecan.request.dbapi.get_active_driver_dict() for name, hosts in driver_dict.items(): @@ -266,6 +285,9 @@ class DriversController(rest.RestController): :raises: DriverNotFound (HTTP 404) if the driver name is invalid or the driver cannot be loaded. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:get_properties', cdict, cdict) + if driver_name not in _DRIVER_PROPERTIES: topic = pecan.request.rpcapi.get_topic_for_driver(driver_name) properties = pecan.request.rpcapi.get_driver_properties( diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index d714ae839..679234009 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -38,6 +38,7 @@ from ironic.api.controllers.v1 import versions from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ +from ironic.common import policy from ironic.common import states as ir_states from ironic.conductor import utils as conductor_utils from ironic import objects @@ -198,6 +199,9 @@ class BootDeviceController(rest.RestController): Default: False. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_boot_device', cdict, cdict) + rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) pecan.request.rpcapi.set_boot_device(pecan.request.context, @@ -220,6 +224,9 @@ class BootDeviceController(rest.RestController): future boots or not, None if it is unknown. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get_boot_device', cdict, cdict) + return self._get_boot_device(node_ident) @METRICS.timer('BootDeviceController.supported') @@ -232,6 +239,9 @@ class BootDeviceController(rest.RestController): devices. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get_boot_device', cdict, cdict) + boot_devices = self._get_boot_device(node_ident, supported=True) return {'supported_boot_devices': boot_devices} @@ -267,6 +277,9 @@ class NodeConsoleController(rest.RestController): :param node_ident: UUID or logical name of a node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get_console', cdict, cdict) + rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) try: @@ -289,6 +302,9 @@ class NodeConsoleController(rest.RestController): :param enabled: Boolean value; whether to enable or disable the console. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_console_state', cdict, cdict) + rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) pecan.request.rpcapi.set_console_mode(pecan.request.context, @@ -377,6 +393,9 @@ class NodeStatesController(rest.RestController): :param node_ident: the UUID or logical_name of a node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get_states', cdict, cdict) + # NOTE(lucasagomes): All these state values come from the # DB. Ironic counts with a periodic task that verify the current # power states of the nodes and update the DB accordingly. @@ -398,6 +417,9 @@ class NodeStatesController(rest.RestController): :raises: NotAcceptable, if requested version of the API is less than 1.12. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_raid_state', cdict, cdict) + if not api_utils.allow_raid_config(): raise exception.NotAcceptable() rpc_node = api_utils.get_rpc_node(node_ident) @@ -426,6 +448,9 @@ class NodeStatesController(rest.RestController): state is not valid or if the node is in CLEANING state. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_power_state', cdict, cdict) + # TODO(lucasagomes): Test if it's able to transition to the # target state from the current one rpc_node = api_utils.get_rpc_node(node_ident) @@ -503,6 +528,9 @@ class NodeStatesController(rest.RestController): :raises: NotAcceptable (HTTP 406) if the API version specified does not allow the requested state transition. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_provision_state', cdict, cdict) + api_utils.check_allow_management_verbs(target) rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) @@ -903,6 +931,9 @@ class NodeVendorPassthruController(rest.RestController): entries. :raises: NodeNotFound if the node is not found. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:vendor_passthru', cdict, cdict) + # Raise an exception if node is not found rpc_node = api_utils.get_rpc_node(node_ident) @@ -924,6 +955,12 @@ class NodeVendorPassthruController(rest.RestController): :param method: name of the method in vendor driver. :param data: body of data to supply to the specified method. """ + cdict = pecan.request.context.to_dict() + if method == 'heartbeat': + policy.authorize('baremetal:node:ipa_heartbeat', cdict, cdict) + else: + policy.authorize('baremetal:node:vendor_passthru', cdict, cdict) + # Raise an exception if node is not found rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) @@ -956,6 +993,9 @@ class NodeMaintenanceController(rest.RestController): :param reason: Optional, the reason why it's in maintenance. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:set_maintenance', cdict, cdict) + self._set_maintenance(node_ident, True, reason=reason) @METRICS.timer('NodeMaintenanceController.delete') @@ -966,6 +1006,9 @@ class NodeMaintenanceController(rest.RestController): :param node_ident: the UUID or logical name of a node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:clear_maintenance', cdict, cdict) + self._set_maintenance(node_ident, False) @@ -1169,6 +1212,9 @@ class NodesController(rest.RestController): :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get', cdict, cdict) + api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) api_utils.check_for_invalid_state_and_allow_filter(provision_state) @@ -1215,6 +1261,9 @@ class NodesController(rest.RestController): :param resource_class: Optional string value to get only nodes with that resource_class. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get', cdict, cdict) + api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) api_utils.check_allow_specify_resource_class(resource_class) @@ -1243,6 +1292,9 @@ class NodesController(rest.RestController): :param node: UUID or name of a node. :param node_uuid: UUID of a node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:validate', cdict, cdict) + if node is not None: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. @@ -1265,6 +1317,9 @@ class NodesController(rest.RestController): :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:get', cdict, cdict) + if self.from_chassis: raise exception.OperationNotPermitted() @@ -1281,6 +1336,9 @@ class NodesController(rest.RestController): :param node: a node within the request body. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:create', cdict, cdict) + if self.from_chassis: raise exception.OperationNotPermitted() @@ -1345,6 +1403,9 @@ class NodesController(rest.RestController): :param node_ident: UUID or logical name of a node. :param patch: a json PATCH document to apply to this node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:update', cdict, cdict) + if self.from_chassis: raise exception.OperationNotPermitted() @@ -1426,6 +1487,9 @@ class NodesController(rest.RestController): :param node_ident: UUID or logical name of a node. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:delete', cdict, cdict) + if self.from_chassis: raise exception.OperationNotPermitted() diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py index 90d549cdc..e2899815d 100644 --- a/ironic/api/controllers/v1/port.py +++ b/ironic/api/controllers/v1/port.py @@ -31,6 +31,7 @@ from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ +from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) @@ -308,6 +309,9 @@ class PortsController(rest.RestController): of the resource to be returned. :raises: NotAcceptable """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:get', cdict, cdict) + api_utils.check_allow_specify_fields(fields) if (fields and not api_utils.allow_port_advanced_net_fields() and set(fields).intersection(self.advanced_net_fields)): @@ -351,6 +355,9 @@ class PortsController(rest.RestController): :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :raises: NotAcceptable, HTTPNotFound """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:get', cdict, cdict) + if not node_uuid and node: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. @@ -379,6 +386,9 @@ class PortsController(rest.RestController): of the resource to be returned. :raises: NotAcceptable """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:get', cdict, cdict) + if self.from_nodes: raise exception.OperationNotPermitted() @@ -395,6 +405,9 @@ class PortsController(rest.RestController): :param port: a port within the request body. :raises: NotAcceptable """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:create', cdict, cdict) + if self.from_nodes: raise exception.OperationNotPermitted() @@ -421,6 +434,9 @@ class PortsController(rest.RestController): :param patch: a json PATCH document to apply to this port. :raises: NotAcceptable """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:update', cdict, cdict) + if self.from_nodes: raise exception.OperationNotPermitted() if not api_utils.allow_port_advanced_net_fields(): @@ -470,6 +486,9 @@ class PortsController(rest.RestController): :param port_uuid: UUID of a port. """ + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:port:delete', cdict, cdict) + if self.from_nodes: raise exception.OperationNotPermitted() rpc_port = objects.Port.get_by_uuid(pecan.request.context, diff --git a/ironic/api/hooks.py b/ironic/api/hooks.py index 764b0a434..f25a05be9 100644 --- a/ironic/api/hooks.py +++ b/ironic/api/hooks.py @@ -17,7 +17,6 @@ from oslo_config import cfg from pecan import hooks from six.moves import http_client -from webob import exc from ironic.common import context from ironic.common import policy @@ -69,6 +68,7 @@ class ContextHook(hooks.PecanHook): # Do not pass any token with context for noauth mode auth_token = (None if cfg.CONF.auth_strategy == 'noauth' else headers.get('X-Auth-Token')) + is_public_api = state.request.environ.get('is_public_api', False) creds = { 'user': headers.get('X-User') or headers.get('X-User-Id'), @@ -77,16 +77,17 @@ class ContextHook(hooks.PecanHook): 'domain_name': headers.get('X-User-Domain-Name'), 'auth_token': auth_token, 'roles': headers.get('X-Roles', '').split(','), + 'is_public_api': is_public_api, } - is_admin = policy.enforce('admin_api', creds, creds) - is_public_api = state.request.environ.get('is_public_api', False) - show_password = policy.enforce('show_password', creds, creds) + # TODO(deva): refactor this so enforce is called directly at relevant + # places in code, not globally and for every request + show_password = policy.check('show_password', creds, creds) + is_admin = policy.check('is_admin', creds, creds) state.request.context = context.RequestContext( - is_admin=is_admin, - is_public_api=is_public_api, show_password=show_password, + is_admin=is_admin, **creds) def after(self, state): @@ -106,22 +107,6 @@ class RPCHook(hooks.PecanHook): state.request.rpcapi = rpcapi.ConductorAPI() -class TrustedCallHook(hooks.PecanHook): - """Verify that the user has admin rights. - - Checks whether the API call is performed against a public - resource or the user has admin privileges in the appropriate - tenant, domain or other administrative unit. - - """ - def before(self, state): - ctx = state.request.context - if ctx.is_public_api: - return - policy.enforce('admin_api', ctx.to_dict(), ctx.to_dict(), - do_raise=True, exc=exc.HTTPForbidden) - - class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. diff --git a/ironic/common/exception.py b/ironic/common/exception.py index ee82c3294..bdb308daa 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -430,8 +430,8 @@ class CommunicationError(IronicException): _msg_fmt = _("Unable to communicate with the server.") -class HTTPForbidden(Forbidden): - pass +class HTTPForbidden(NotAuthorized): + _msg_fmt = _("Access was denied to the following resource: %(resource)s") class Unauthorized(IronicException): diff --git a/ironic/common/policy.py b/ironic/common/policy.py index a9a79d9bc..237b78f85 100644 --- a/ironic/common/policy.py +++ b/ironic/common/policy.py @@ -17,10 +17,165 @@ from oslo_concurrency import lockutils from oslo_config import cfg +from oslo_log import log from oslo_policy import policy +from ironic.common import exception +from ironic.common.i18n import _LW + _ENFORCER = None CONF = cfg.CONF +LOG = log.getLogger(__name__) + +default_policies = [ + # Legacy setting, don't remove. Likely to be overridden by operators who + # forget to update their policy.json configuration file. + # This gets rolled into the new "is_admin" rule below. + policy.RuleDefault('admin_api', + 'role:admin or role:administrator', + description='Legacy rule for cloud admin access'), + # is_public_api is set in the environment from AuthTokenMiddleware + policy.RuleDefault('public_api', + 'is_public_api:True', + description='Internal flag for public API routes'), + # Generic default to hide passwords + policy.RuleDefault('show_password', + '!', + description='Show or mask passwords in API responses'), + # Roles likely to be overriden by operator + policy.RuleDefault('is_member', + 'tenant:demo or tenant:baremetal', + description='May be used to restrict access to specific tenants'), # noqa + policy.RuleDefault('is_observer', + 'rule:is_member and (role:observer or role:baremetal_observer)', # noqa + description='Read-only API access'), + policy.RuleDefault('is_admin', + 'rule:admin_api or (rule:is_member and role:baremetal_admin)', # noqa + description='Full read/write API access'), +] + +# NOTE(deva): to follow policy-in-code spec, we define defaults for +# the granular policies in code, rather than in policy.json. +# All of these may be overridden by configuration, but we can +# depend on their existence throughout the code. + +node_policies = [ + policy.RuleDefault('baremetal:node:get', + 'rule:is_admin or rule:is_observer', + description='Retrieve Node records'), + policy.RuleDefault('baremetal:node:get_boot_device', + 'rule:is_admin or rule:is_observer', + description='Retrieve Node boot device metadata'), + policy.RuleDefault('baremetal:node:get_states', + 'rule:is_admin or rule:is_observer', + description='View Node power and provision state'), + policy.RuleDefault('baremetal:node:create', + 'rule:is_admin', + description='Create Node records'), + policy.RuleDefault('baremetal:node:delete', + 'rule:is_admin', + description='Delete Node records'), + policy.RuleDefault('baremetal:node:update', + 'rule:is_admin', + description='Update Node records'), + policy.RuleDefault('baremetal:node:validate', + 'rule:is_admin', + description='Request active validation of Nodes'), + policy.RuleDefault('baremetal:node:set_maintenance', + 'rule:is_admin', + description='Set maintenance flag, taking a Node ' + 'out of service'), + policy.RuleDefault('baremetal:node:clear_maintenance', + 'rule:is_admin', + description='Clear maintenance flag, placing the Node ' + 'into service again'), + policy.RuleDefault('baremetal:node:set_boot_device', + 'rule:is_admin', + description='Change Node boot device'), + policy.RuleDefault('baremetal:node:set_power_state', + 'rule:is_admin', + description='Change Node power status'), + policy.RuleDefault('baremetal:node:set_provision_state', + 'rule:is_admin', + description='Change Node provision status'), + policy.RuleDefault('baremetal:node:set_raid_state', + 'rule:is_admin', + description='Change Node RAID status'), + policy.RuleDefault('baremetal:node:get_console', + 'rule:is_admin', + description='Get Node console connection information'), + policy.RuleDefault('baremetal:node:set_console_state', + 'rule:is_admin', + description='Change Node console status'), +] + +port_policies = [ + policy.RuleDefault('baremetal:port:get', + 'rule:is_admin or rule:is_observer', + description='Retrieve Port records'), + policy.RuleDefault('baremetal:port:create', + 'rule:is_admin', + description='Create Port records'), + policy.RuleDefault('baremetal:port:delete', + 'rule:is_admin', + description='Delete Port records'), + policy.RuleDefault('baremetal:port:update', + 'rule:is_admin', + description='Update Port records'), +] + +chassis_policies = [ + policy.RuleDefault('baremetal:chassis:get', + 'rule:is_admin or rule:is_observer', + description='Retrieve Chassis records'), + policy.RuleDefault('baremetal:chassis:create', + 'rule:is_admin', + description='Create Chassis records'), + policy.RuleDefault('baremetal:chassis:delete', + 'rule:is_admin', + description='Delete Chassis records'), + policy.RuleDefault('baremetal:chassis:update', + 'rule:is_admin', + description='Update Chassis records'), +] + +driver_policies = [ + policy.RuleDefault('baremetal:driver:get', + 'rule:is_admin or rule:is_observer', + description='View list of available drivers'), + policy.RuleDefault('baremetal:driver:get_properties', + 'rule:is_admin or rule:is_observer', + description='View driver-specific properties'), + policy.RuleDefault('baremetal:driver:get_raid_logical_disk_properties', + 'rule:is_admin or rule:is_observer', + description='View driver-specific RAID metadata'), + +] + +extra_policies = [ + policy.RuleDefault('baremetal:node:vendor_passthru', + 'rule:is_admin', + description='Access vendor-specific Node functions'), + policy.RuleDefault('baremetal:driver:vendor_passthru', + 'rule:is_admin', + description='Access vendor-specific Driver functions'), + policy.RuleDefault('baremetal:node:ipa_heartbeat', + 'rule:public_api', + description='Send heartbeats from IPA ramdisk'), + policy.RuleDefault('baremetal:driver:ipa_lookup', + 'rule:public_api', + description='Access IPA ramdisk functions'), +] + + +def list_policies(): + policies = (default_policies + + node_policies + + port_policies + + chassis_policies + + driver_policies + + extra_policies) + return policies @lockutils.synchronized('policy_enforcer', 'ironic-') @@ -29,10 +184,11 @@ def init_enforcer(policy_file=None, rules=None, """Synchronously initializes the policy enforcer :param policy_file: Custom policy file to use, if none is specified, - `CONF.policy_file` will be used. + `CONF.oslo_policy.policy_file` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. - :param default_rule: Default rule to use, CONF.default_rule will + :param default_rule: Default rule to use, + CONF.oslo_policy.policy_default_rule will be used if none is specified. :param use_conf: Whether to load rules from config file. @@ -42,10 +198,15 @@ def init_enforcer(policy_file=None, rules=None, if _ENFORCER: return + # NOTE(deva): Register defaults for policy-in-code here so that they are + # loaded exactly once - when this module-global is initialized. + # Defining these in the relevant API modules won't work + # because API classes lack singletons and don't use globals. _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf) + _ENFORCER.register_defaults(list_policies()) def get_enforcer(): @@ -57,12 +218,57 @@ def get_enforcer(): return _ENFORCER +# NOTE(deva): We can't call these methods from within decorators because the +# 'target' and 'creds' parameter must be fetched from the call time +# context-local pecan.request magic variable, but decorators are compiled +# at module-load time. + + +def authorize(rule, target, creds, *args, **kwargs): + """A shortcut for policy.Enforcer.authorize() + + Checks authorization of a rule against the target and credentials, and + raises an exception if the rule is not defined. + Always returns true if CONF.auth_strategy == noauth. + + Beginning with the Newton cycle, this should be used in place of 'enforce'. + """ + if CONF.auth_strategy == 'noauth': + return True + enforcer = get_enforcer() + try: + return enforcer.authorize(rule, target, creds, do_raise=True, + *args, **kwargs) + except policy.PolicyNotAuthorized: + raise exception.HTTPForbidden(resource=rule) + + +def check(rule, target, creds, *args, **kwargs): + """A shortcut for policy.Enforcer.enforce() + + Checks authorization of a rule against the target and credentials + and returns True or False. + """ + enforcer = get_enforcer() + return enforcer.enforce(rule, target, creds, *args, **kwargs) + + def enforce(rule, target, creds, do_raise=False, exc=None, *args, **kwargs): """A shortcut for policy.Enforcer.enforce() Checks authorization of a rule against the target and credentials. + Always returns true if CONF.auth_strategy == noauth. """ + # NOTE(deva): this method is obsoleted by authorize(), but retained for + # backwards compatibility in case it has been used downstream. + # It may be removed in the 'P' cycle. + LOG.warning(_LW( + "Deprecation warning: calls to ironic.common.policy.enforce() " + "should be replaced with authorize(). This method may be removed " + "in a future release.")) + if CONF.auth_strategy == 'noauth': + return True enforcer = get_enforcer() return enforcer.enforce(rule, target, creds, do_raise=do_raise, exc=exc, *args, **kwargs) diff --git a/ironic/tests/unit/api/base.py b/ironic/tests/unit/api/base.py index b81b2326b..4d7452486 100644 --- a/ironic/tests/unit/api/base.py +++ b/ironic/tests/unit/api/base.py @@ -45,10 +45,11 @@ class BaseApiTest(base.DbTestCase): def setUp(self): super(BaseApiTest, self).setUp() - cfg.CONF.set_override("auth_version", "v2.0", + cfg.CONF.set_override("auth_version", "v3", group='keystone_authtoken') cfg.CONF.set_override("admin_user", "admin", group='keystone_authtoken') + cfg.CONF.set_override("auth_strategy", "noauth") self.app = self._make_app() def reset_pecan(): @@ -60,7 +61,7 @@ class BaseApiTest(base.DbTestCase): self._check_version = p.start() self.addCleanup(p.stop) - def _make_app(self, enable_acl=False): + def _make_app(self): # Determine where we are so we can set up paths in the config root_dir = self.path_get() @@ -70,11 +71,9 @@ class BaseApiTest(base.DbTestCase): 'modules': ['ironic.api'], 'static_root': '%s/public' % root_dir, 'template_path': '%s/api/templates' % root_dir, - 'enable_acl': enable_acl, 'acl_public_routes': ['/', '/v1'], }, } - return pecan.testing.load_test_app(self.config) def _request_json(self, path, params, expect_errors=False, headers=None, diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py index 65479c1b0..fb1125009 100644 --- a/ironic/tests/unit/api/test_acl.py +++ b/ironic/tests/unit/api/test_acl.py @@ -50,7 +50,8 @@ class TestACL(base.BaseApiTest): def _make_app(self): cfg.CONF.set_override('cache', 'fake.cache', group='keystone_authtoken') - return super(TestACL, self)._make_app(enable_acl=True) + cfg.CONF.set_override('auth_strategy', 'keystone') + return super(TestACL, self)._make_app() def test_non_authenticated(self): response = self.get_json(self.node_path, expect_errors=True) diff --git a/ironic/tests/unit/api/test_audit.py b/ironic/tests/unit/api/test_audit.py index 6e53fbfb1..bd1565c1c 100644 --- a/ironic/tests/unit/api/test_audit.py +++ b/ironic/tests/unit/api/test_audit.py @@ -38,7 +38,7 @@ class TestAuditMiddleware(base.BaseApiTest): @mock.patch.object(audit, 'AuditMiddleware') def test_enable_audit_request(self, mock_audit): CONF.audit.enabled = True - self._make_app(enable_acl=True) + self._make_app() mock_audit.assert_called_once_with( mock.ANY, audit_map_file=CONF.audit.audit_map_file, @@ -50,10 +50,10 @@ class TestAuditMiddleware(base.BaseApiTest): mock_audit.side_effect = IOError("file access error") self.assertRaises(exception.InputFileError, - self._make_app, enable_acl=True) + self._make_app) @mock.patch.object(audit, 'AuditMiddleware') def test_disable_audit_request(self, mock_audit): CONF.audit.enabled = False - self._make_app(enable_acl=True) + self._make_app() self.assertFalse(mock_audit.called) diff --git a/ironic/tests/unit/api/test_hooks.py b/ironic/tests/unit/api/test_hooks.py index e29ea298f..7c9a58938 100644 --- a/ironic/tests/unit/api/test_hooks.py +++ b/ironic/tests/unit/api/test_hooks.py @@ -21,13 +21,11 @@ from oslo_config import cfg import oslo_messaging as messaging import six from six.moves import http_client -from webob import exc as webob_exc from ironic.api.controllers import root from ironic.api import hooks from ironic.common import context from ironic.tests.unit.api import base -from ironic.tests.unit import policy_fixture class FakeRequest(object): @@ -217,6 +215,7 @@ class TestNoExceptionTracebackHook(base.BaseApiTest): class TestContextHook(base.BaseApiTest): @mock.patch.object(context, 'RequestContext') def test_context_hook_not_admin(self, mock_ctx): + cfg.CONF.set_override('auth_strategy', 'keystone') headers = fake_headers(admin=False) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook(None) @@ -234,6 +233,7 @@ class TestContextHook(base.BaseApiTest): @mock.patch.object(context, 'RequestContext') def test_context_hook_admin(self, mock_ctx): + cfg.CONF.set_override('auth_strategy', 'keystone') headers = fake_headers(admin=True) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook(None) @@ -251,6 +251,7 @@ class TestContextHook(base.BaseApiTest): @mock.patch.object(context, 'RequestContext') def test_context_hook_public_api(self, mock_ctx): + cfg.CONF.set_override('auth_strategy', 'keystone') headers = fake_headers(admin=True) env = {'is_public_api': True} reqstate = FakeRequestState(headers=headers, environ=env) @@ -306,41 +307,6 @@ class TestContextHook(base.BaseApiTest): response.headers) -class TestTrustedCallHook(base.BaseApiTest): - def test_trusted_call_hook_not_admin(self): - headers = fake_headers(admin=False) - reqstate = FakeRequestState(headers=headers) - reqstate.set_context() - trusted_call_hook = hooks.TrustedCallHook() - self.assertRaises(webob_exc.HTTPForbidden, - trusted_call_hook.before, reqstate) - - def test_trusted_call_hook_admin(self): - headers = fake_headers(admin=True) - reqstate = FakeRequestState(headers=headers) - reqstate.set_context() - trusted_call_hook = hooks.TrustedCallHook() - trusted_call_hook.before(reqstate) - - def test_trusted_call_hook_public_api(self): - headers = fake_headers(admin=False) - env = {'is_public_api': True} - reqstate = FakeRequestState(headers=headers, environ=env) - reqstate.set_context() - trusted_call_hook = hooks.TrustedCallHook() - trusted_call_hook.before(reqstate) - - -class TestTrustedCallHookCompatJuno(TestTrustedCallHook): - def setUp(self): - super(TestTrustedCallHookCompatJuno, self).setUp() - self.policy = self.useFixture( - policy_fixture.PolicyFixture(compat='juno')) - - def test_trusted_call_hook_public_api(self): - self.skipTest('no public_api trusted call policy in juno') - - class TestPublicUrlHook(base.BaseApiTest): def test_before_host_url(self): diff --git a/ironic/tests/unit/common/test_policy.py b/ironic/tests/unit/common/test_policy.py index 231541468..97d337558 100644 --- a/ironic/tests/unit/common/test_policy.py +++ b/ironic/tests/unit/common/test_policy.py @@ -15,60 +15,107 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_policy import policy as oslo_policy + +from ironic.common import exception from ironic.common import policy from ironic.tests import base -class PolicyTestCase(base.TestCase): +class PolicyInCodeTestCase(base.TestCase): """Tests whether the configuration of the policy engine is corect.""" def test_admin_api(self): - creds = ({'roles': [u'admin']}, + creds = ({'roles': ['admin']}, {'roles': ['administrator']}, {'roles': ['admin', 'administrator']}) for c in creds: - self.assertTrue(policy.enforce('admin_api', c, c)) + self.assertTrue(policy.check('admin_api', c, c)) def test_public_api(self): creds = {'is_public_api': 'True'} - self.assertTrue(policy.enforce('public_api', creds, creds)) - - def test_trusted_call(self): - creds = ({'roles': ['admin']}, - {'is_public_api': 'True'}, - {'roles': ['admin'], 'is_public_api': 'True'}, - {'roles': ['Member'], 'is_public_api': 'True'}) - - for c in creds: - self.assertTrue(policy.enforce('trusted_call', c, c)) + self.assertTrue(policy.check('public_api', creds, creds)) def test_show_password(self): creds = {'roles': [u'admin'], 'tenant': 'admin'} - self.assertTrue(policy.enforce('show_password', creds, creds)) + self.assertTrue(policy.check('show_password', creds, creds)) + + def test_node_get(self): + creds = {'roles': ['baremetal_observer'], 'tenant': 'demo'} + self.assertTrue(policy.check('baremetal:node:get', creds, creds)) + + def test_node_create(self): + creds = {'roles': ['baremetal_admin'], 'tenant': 'demo'} + self.assertTrue(policy.check('baremetal:node:create', creds, creds)) -class PolicyTestCaseNegative(base.TestCase): +class PolicyInCodeTestCaseNegative(base.TestCase): """Tests whether the configuration of the policy engine is corect.""" def test_admin_api(self): creds = {'roles': ['Member']} - self.assertFalse(policy.enforce('admin_api', creds, creds)) + self.assertFalse(policy.check('admin_api', creds, creds)) def test_public_api(self): creds = ({'is_public_api': 'False'}, {}) for c in creds: - self.assertFalse(policy.enforce('public_api', c, c)) - - def test_trusted_call(self): - creds = ({'roles': ['Member']}, - {'is_public_api': 'False'}, - {'roles': ['Member'], 'is_public_api': 'False'}) - - for c in creds: - self.assertFalse(policy.enforce('trusted_call', c, c)) + self.assertFalse(policy.check('public_api', c, c)) def test_show_password(self): creds = {'roles': [u'admin'], 'tenant': 'demo'} - self.assertFalse(policy.enforce('show_password', creds, creds)) + self.assertFalse(policy.check('show_password', creds, creds)) + + def test_node_get(self): + creds = {'roles': ['generic_user'], 'tenant': 'demo'} + self.assertFalse(policy.check('baremetal:node:get', creds, creds)) + + def test_node_create(self): + creds = {'roles': ['baremetal_observer'], 'tenant': 'demo'} + self.assertFalse(policy.check('baremetal:node:create', creds, creds)) + + +class PolicyTestCase(base.TestCase): + """Tests whether ironic.common.policy behaves as expected.""" + + def setUp(self): + super(PolicyTestCase, self).setUp() + rule = oslo_policy.RuleDefault('has_foo_role', "role:foo") + enforcer = policy.get_enforcer() + enforcer.register_default(rule) + + def test_authorize_passes(self): + creds = {'roles': ['foo']} + policy.authorize('has_foo_role', creds, creds) + + def test_authorize_access_forbidden(self): + creds = {'roles': ['bar']} + self.assertRaises( + exception.HTTPForbidden, + policy.authorize, 'has_foo_role', creds, creds) + + def test_authorize_policy_not_registered(self): + creds = {'roles': ['foo']} + self.assertRaises( + oslo_policy.PolicyNotRegistered, + policy.authorize, 'has_bar_role', creds, creds) + + def test_enforce_existing_rule_passes(self): + creds = {'roles': ['foo']} + self.assertTrue(policy.enforce('has_foo_role', creds, creds)) + + def test_enforce_missing_rule_fails(self): + creds = {'roles': ['foo']} + self.assertFalse(policy.enforce('has_bar_role', creds, creds)) + + def test_enforce_existing_rule_fails(self): + creds = {'roles': ['bar']} + self.assertFalse(policy.enforce('has_foo_role', creds, creds)) + + def test_enforce_existing_rule_raises(self): + creds = {'roles': ['bar']} + self.assertRaises( + exception.IronicException, + policy.enforce, 'has_foo_role', creds, creds, True, + exception.IronicException) diff --git a/ironic/tests/unit/fake_policy.py b/ironic/tests/unit/fake_policy.py deleted file mode 100644 index 66f600845..000000000 --- a/ironic/tests/unit/fake_policy.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -policy_data = """ -{ - "admin_api": "role:admin or role:administrator", - "public_api": "is_public_api:True", - "trusted_call": "rule:admin_api or rule:public_api", - "default": "rule:trusted_call", - "show_password": "tenant:admin" -} -""" - - -policy_data_compat_juno = """ -{ - "admin": "role:admin or role:administrator", - "admin_api": "is_admin:True", - "default": "rule:admin_api" -} -""" - - -def get_policy_data(compat): - if not compat: - return policy_data - elif compat == 'juno': - return policy_data_compat_juno - else: - raise Exception('Policy data for %s not available' % compat) diff --git a/ironic/tests/unit/policy_fixture.py b/ironic/tests/unit/policy_fixture.py index 7f3f48ac9..4fe3b5faa 100644 --- a/ironic/tests/unit/policy_fixture.py +++ b/ironic/tests/unit/policy_fixture.py @@ -19,22 +19,27 @@ from oslo_config import cfg from oslo_policy import opts as policy_opts from ironic.common import policy as ironic_policy -from ironic.tests.unit import fake_policy CONF = cfg.CONF +# NOTE(deva): We ship a default that always masks passwords, but for testing +# we need to override that default to ensure passwords can be +# made visible by operators that choose to do so. +policy_data = """ +{ + "show_password": "tenant:admin" +} +""" + class PolicyFixture(fixtures.Fixture): - def __init__(self, compat=None): - self.compat = compat - def setUp(self): super(PolicyFixture, self).setUp() self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file_name = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.get_policy_data(self.compat)) + policy_file.write(policy_data) policy_opts.set_defaults(CONF) CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') ironic_policy._ENFORCER = None diff --git a/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml b/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml new file mode 100644 index 000000000..6755307f9 --- /dev/null +++ b/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml @@ -0,0 +1,22 @@ +--- +features: + - | + RESTful access to every API resource may now be controlled by adjusting + policy settings. Defaults are set in code, and remain backwards compatible + with the previously-included policy.json file. Two new roles are checked + by default, "baremetal_admin" and "baremetal_observer", though these may be + replaced or overridden by configuration. The "baremetal_observer" role + grants read-only access to Ironic's API. +security: + - | + Previously, access to Ironic's REST API was "all or nothing". With this + release, it is now possible to restrict read and write access to API + resources to specific cloud roles. +upgrade: + - | + During an upgrade, it is recommended that all deployers re-evaluate the + settings in their /etc/ironic/policy.json file. This file should now be + used only to override default configuration, such as by limiting access to + the Bare Metal service to specific tenants or restricting access to + specific API endpoints. A policy.json.sample file is provided that lists + all supported policies. diff --git a/setup.cfg b/setup.cfg index defced996..576df9a67 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,6 +25,9 @@ packages = oslo.config.opts = ironic = ironic.conf.opts:list_opts +oslo.policy.policies = + ironic.api = ironic.common.policy:list_policies + console_scripts = ironic-api = ironic.cmd.api:main ironic-dbsync = ironic.cmd.dbsync:main diff --git a/tox.ini b/tox.ini index ef60d136e..89b0bf2bb 100644 --- a/tox.ini +++ b/tox.ini @@ -58,6 +58,12 @@ envdir = {toxworkdir}/venv commands = oslo-config-generator --config-file=tools/config/ironic-config-generator.conf +[testenv:genpolicy] +sitepackages = False +envdir = {toxworkdir}/venv +commands = + oslopolicy-sample-generator --namespace=ironic.api --output-file=etc/ironic/policy.json.sample + [testenv:debug] commands = oslo_debug_helper -t ironic/tests/unit {posargs} From 24e02f99b216bf0d24766d313ccacd3ef6cc5405 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Thu, 4 Aug 2016 17:59:38 +0300 Subject: [PATCH 136/166] Fix tempest realted exceptions during docs build We don't have tempest in our {test-}requirements.txt, which causes docs build to complain about missing tempest.* modules, as we have autodoc enabled, which imports all the modules it can find to get their docstrings. This change adds ironic_tempest_plugin modules to autodoc_exclude_modules, and also adds it to exclude_patterns, so that sphinx ignores ironic_tempest_plugin/* files too. Also fixes a couple of warnings because of the incorrect formatting of the LocalLinkConnectionType.validate docstring. Closes-bug: #1609850 Change-Id: Id24e587b690b423e65dad55e70224426873e8d5d --- doc/source/conf.py | 5 +++++ ironic/api/controllers/v1/types.py | 10 +++++----- setup.cfg | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index f88619427..aaa12bb8c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -64,6 +64,11 @@ MOCK_MODULES = ['nova', 'nova.compute', 'nova.context'] for module in MOCK_MODULES: sys.modules[module] = mock.Mock() +# A list of glob-style patterns that should be excluded when looking for +# source files. They are matched against the source file names relative to the +# source directory, using slashes as directory separators on all platforms. +exclude_patterns = ['api/ironic_tempest_plugin.*'] + # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with diff --git a/ironic/api/controllers/v1/types.py b/ironic/api/controllers/v1/types.py index 9cfe206b7..482e99326 100644 --- a/ironic/api/controllers/v1/types.py +++ b/ironic/api/controllers/v1/types.py @@ -272,20 +272,20 @@ class LocalLinkConnectionType(wtypes.UserType): """Validate and convert the input to a LocalLinkConnectionType. :param value: A dictionary of values to validate, switch_id is a MAC - address or an OpenFlow based datapath_id, switch_info is an optional - field. + address or an OpenFlow based datapath_id, switch_info is an + optional field. For example:: - { + + { 'switch_id': mac_or_datapath_id(), 'port_id': 'Ethernet3/1', 'switch_info': 'switch1' - } + } :returns: A dictionary. :raises: Invalid if some of the keys in the dictionary being validated are unknown, invalid, or some required ones are missing. - """ wtypes.DictType(wtypes.text, wtypes.text).validate(value) diff --git a/setup.cfg b/setup.cfg index defced996..f8ad81532 100644 --- a/setup.cfg +++ b/setup.cfg @@ -106,6 +106,7 @@ autodoc_index_modules = True autodoc_exclude_modules = ironic.db.sqlalchemy.alembic.env ironic.db.sqlalchemy.alembic.versions.* + ironic_tempest_plugin.* warnerrors = True [build_sphinx] From fd9614dbb2d4b97826e8c3ae022a105bd7073b08 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Thu, 4 Aug 2016 18:19:14 +0300 Subject: [PATCH 137/166] Fix __all__ module attributes They should be iterables containing strings. Closes-bug: #1609853 Change-Id: I1d0e044851323a9899b347b893f95a436e2a45aa --- ironic/api/controllers/v1/__init__.py | 2 +- ironic/api/middleware/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py index 493be13c7..cda8e41b8 100644 --- a/ironic/api/controllers/v1/__init__.py +++ b/ironic/api/controllers/v1/__init__.py @@ -179,4 +179,4 @@ class Controller(rest.RestController): return super(Controller, self)._route(args) -__all__ = (Controller) +__all__ = ('Controller',) diff --git a/ironic/api/middleware/__init__.py b/ironic/api/middleware/__init__.py index 609b4ed98..385547d70 100644 --- a/ironic/api/middleware/__init__.py +++ b/ironic/api/middleware/__init__.py @@ -19,5 +19,5 @@ from ironic.api.middleware import parsable_error ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware AuthTokenMiddleware = auth_token.AuthTokenMiddleware -__all__ = (ParsableErrorMiddleware, - AuthTokenMiddleware) +__all__ = ('ParsableErrorMiddleware', + 'AuthTokenMiddleware') From 4483de30ba28e60d0f742441d928c3e28a291f90 Mon Sep 17 00:00:00 2001 From: Sinval Vieira Date: Mon, 29 Feb 2016 18:09:55 +0000 Subject: [PATCH 138/166] Add Dynamic Allocation feature for the OneView drivers This change is about adding the ability to the OneView drivers of dynamically allocate OneView resources to Ironic. The current version of the drivers consider what we call "pre-allocation" of nodes, meaning that when a node is registered in Ironic, even if it is not in use, this resource is still reserved in OneView. This change will prevent such situations by allocating OneView resources only at boot time, allowing both systems to really share the same pool of hardware. Change-Id: I43d1db490b4834080562946b8a6ca584ea36864d Co-Authored-By: Lilia Sampaio Co-Authored-By: Xavier Co-Authored-By: Hugo Nicodemos Co-Authored-By: Thiago Paiva Brito Co-Authored-By: Caio Oliveira Partial-Bug: #1541096 --- devstack/lib/ironic | 5 +- etc/ironic/ironic.conf.sample | 23 +- ironic/common/exception.py | 5 + ironic/conf/oneview.py | 24 +- ironic/drivers/modules/oneview/common.py | 79 +++- ironic/drivers/modules/oneview/deploy.py | 264 +++++++++++++ .../drivers/modules/oneview/deploy_utils.py | 335 +++++++++++++++++ ironic/drivers/modules/oneview/management.py | 4 - ironic/drivers/modules/oneview/power.py | 4 +- ironic/drivers/oneview.py | 31 +- .../drivers/modules/oneview/test_common.py | 142 ++++++- .../drivers/modules/oneview/test_deploy.py | 144 ++++++++ .../modules/oneview/test_deploy_utils.py | 349 ++++++++++++++++++ .../drivers/third_party_driver_mock_specs.py | 2 + .../unit/drivers/third_party_driver_mocks.py | 2 + ...c-allocation-feature-2fd6b4df7943f178.yaml | 5 + 16 files changed, 1359 insertions(+), 59 deletions(-) create mode 100644 ironic/drivers/modules/oneview/deploy.py create mode 100644 ironic/drivers/modules/oneview/deploy_utils.py create mode 100644 ironic/tests/unit/drivers/modules/oneview/test_deploy.py create mode 100644 ironic/tests/unit/drivers/modules/oneview/test_deploy_utils.py create mode 100644 releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml diff --git a/devstack/lib/ironic b/devstack/lib/ironic index a6df583cf..88953d067 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -83,7 +83,7 @@ IRONIC_HW_ARCH=${IRONIC_HW_ARCH:-x86_64} # # # *_oneview: -# +# # # IRONIC_IPMIINFO_FILE is deprecated, please use IRONIC_HWINFO_FILE. IRONIC_IPMIINFO_FILE will be removed in Ocata. IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-""} @@ -1053,8 +1053,11 @@ function enroll_nodes { local server_profile_template_uri server_profile_template_uri=$(echo $hardware_info |awk '{print $4}') mac_address=$(echo $hardware_info |awk '{print $5}') + local applied_server_profile_uri + applied_server_profile_uri=$(echo $hardware_info |awk '{print $6}') node_options+=" -i server_hardware_uri=$server_hardware_uri" + node_options+=" -i applied_server_profile_uri=$applied_server_profile_uri" node_options+=" -p capabilities=" node_options+="server_hardware_type_uri:$server_hardware_type_uri," node_options+="enclosure_group_uri:$enclosure_group_uri," diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 62724b44f..0ada7a9ff 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1909,26 +1909,37 @@ # From ironic # -# URL where OneView is available (string value) +# URL where OneView is available. (string value) #manager_url = -# OneView username to be used (string value) +# OneView username to be used. (string value) #username = -# OneView password to be used (string value) +# OneView password to be used. (string value) #password = -# Option to allow insecure connection with OneView (boolean +# Option to allow insecure connection with OneView. (boolean # value) #allow_insecure_connections = false -# Path to CA certificate (string value) +# Path to CA certificate. (string value) #tls_cacert_file = -# Max connection retries to check changes on OneView (integer +# Max connection retries to check changes on OneView. (integer # value) #max_polling_attempts = 12 +# Period (in seconds) for periodic tasks to be executed. +# (integer value) +#periodic_check_interval = 300 + +# Whether to enable the periodic tasks for OneView driver be +# aware when OneView hardware resources are taken and released +# by Ironic or OneView users and proactively manage nodes in +# clean fail state according to Dynamic Allocation model of +# hardware resources allocation in OneView. (boolean value) +#enable_periodic_tasks = true + [oslo_concurrency] diff --git a/ironic/common/exception.py b/ironic/common/exception.py index bdb308daa..7d8341c15 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -595,6 +595,11 @@ class OneViewError(IronicException): _msg_fmt = _("OneView exception occurred. Error: %(error)s") +class OneViewInvalidNodeParameter(OneViewError): + _msg_fmt = _("Error while obtaining OneView info from node %(node_uuid)s. " + "Error: %(error)s") + + class NodeTagNotFound(IronicException): _msg_fmt = _("Node %(node_id)s doesn't have a tag '%(tag)s'") diff --git a/ironic/conf/oneview.py b/ironic/conf/oneview.py index 47f5bd786..6dc5da6ba 100644 --- a/ironic/conf/oneview.py +++ b/ironic/conf/oneview.py @@ -20,20 +20,32 @@ from ironic.common.i18n import _ opts = [ cfg.StrOpt('manager_url', - help=_('URL where OneView is available')), + help=_('URL where OneView is available.')), cfg.StrOpt('username', - help=_('OneView username to be used')), + help=_('OneView username to be used.')), cfg.StrOpt('password', secret=True, - help=_('OneView password to be used')), + help=_('OneView password to be used.')), cfg.BoolOpt('allow_insecure_connections', default=False, - help=_('Option to allow insecure connection with OneView')), + help=_('Option to allow insecure connection with OneView.')), cfg.StrOpt('tls_cacert_file', - help=_('Path to CA certificate')), + help=_('Path to CA certificate.')), cfg.IntOpt('max_polling_attempts', default=12, - help=_('Max connection retries to check changes on OneView')), + help=_('Max connection retries to check changes on OneView.')), + cfg.BoolOpt('enable_periodic_tasks', + default=True, + help=_('Whether to enable the periodic tasks for OneView ' + 'driver be aware when OneView hardware resources are ' + 'taken and released by Ironic or OneView users ' + 'and proactively manage nodes in clean fail state ' + 'according to Dynamic Allocation model of hardware ' + 'resources allocation in OneView.')), + cfg.IntOpt('periodic_check_interval', + default=300, + help=_('Period (in seconds) for periodic tasks to be ' + 'executed when enable_periodic_tasks=True.')), ] diff --git a/ironic/drivers/modules/oneview/common.py b/ironic/drivers/modules/oneview/common.py index 275c65fcd..a40d4d549 100644 --- a/ironic/drivers/modules/oneview/common.py +++ b/ironic/drivers/modules/oneview/common.py @@ -1,4 +1,3 @@ -# # Copyright 2015 Hewlett Packard Development Company, LP # Copyright 2015 Universidade Federal de Campina Grande # @@ -61,6 +60,15 @@ COMMON_PROPERTIES.update(REQUIRED_ON_DRIVER_INFO) COMMON_PROPERTIES.update(REQUIRED_ON_PROPERTIES) COMMON_PROPERTIES.update(OPTIONAL_ON_PROPERTIES) +ISCSI_PXE_ONEVIEW = 'iscsi_pxe_oneview' +AGENT_PXE_ONEVIEW = 'agent_pxe_oneview' + +# NOTE(xavierr): We don't want to translate NODE_IN_USE_BY_ONEVIEW and +# SERVER_HARDWARE_ALLOCATION_ERROR to avoid inconsistency in the nodes +# caused by updates on translation in upgrades of ironic. +NODE_IN_USE_BY_ONEVIEW = 'node in use by OneView' +SERVER_HARDWARE_ALLOCATION_ERROR = 'server hardware allocation error' + def get_oneview_client(): """Generates an instance of the OneView client. @@ -70,7 +78,6 @@ def get_oneview_client(): :returns: an instance of the OneView client """ - oneview_client = client.Client( manager_url=CONF.oneview.manager_url, username=CONF.oneview.username, @@ -140,12 +147,16 @@ def get_oneview_info(node): :enclosure_group_uri: the uri of the enclosure group in OneView :server_profile_template_uri: the uri of the server profile template in OneView - :raises InvalidParameterValue if node capabilities are malformed + :raises OneViewInvalidNodeParameter if node capabilities are malformed """ - capabilities_dict = utils.capabilities_to_dict( - node.properties.get('capabilities', '') - ) + try: + capabilities_dict = utils.capabilities_to_dict( + node.properties.get('capabilities', '') + ) + except exception.InvalidParameterValue as e: + raise exception.OneViewInvalidNodeParameter(node_uuid=node.uuid, + error=e) driver_info = node.driver_info @@ -159,6 +170,8 @@ def get_oneview_info(node): 'server_profile_template_uri': capabilities_dict.get('server_profile_template_uri') or driver_info.get('server_profile_template_uri'), + 'applied_server_profile_uri': + driver_info.get('applied_server_profile_uri'), } return oneview_info @@ -180,25 +193,41 @@ def validate_oneview_resources_compatibility(task): node = task.node node_ports = task.ports + + try: + oneview_info = get_oneview_info(task.node) + except exception.InvalidParameterValue as e: + msg = (_("Error while obtaining OneView info from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) + try: oneview_client = get_oneview_client() - oneview_info = get_oneview_info(node) oneview_client.validate_node_server_hardware( oneview_info, node.properties.get('memory_mb'), node.properties.get('cpus') ) oneview_client.validate_node_server_hardware_type(oneview_info) - oneview_client.check_server_profile_is_applied(oneview_info) - oneview_client.is_node_port_mac_compatible_with_server_profile( - oneview_info, node_ports - ) oneview_client.validate_node_enclosure_group(oneview_info) oneview_client.validate_node_server_profile_template(oneview_info) + + # NOTE(thiagop): Support to pre-allocation will be dropped in 'P' + # release + if is_dynamic_allocation_enabled(task.node): + oneview_client.is_node_port_mac_compatible_with_server_hardware( + oneview_info, node_ports + ) + oneview_client.validate_node_server_profile_template(oneview_info) + else: + oneview_client.check_server_profile_is_applied(oneview_info) + oneview_client.is_node_port_mac_compatible_with_server_profile( + oneview_info, node_ports + ) except oneview_exceptions.OneViewException as oneview_exc: - msg = (_("Error validating node resources with OneView: %s") - % oneview_exc) - LOG.error(msg) + msg = (_("Error validating node resources with OneView: %s") % + oneview_exc) raise exception.OneViewError(error=msg) @@ -252,7 +281,13 @@ def node_has_server_profile(func): """ def inner(*args, **kwargs): task = args[1] - oneview_info = get_oneview_info(task.node) + try: + oneview_info = get_oneview_info(task.node) + except exception.InvalidParameterValue as e: + msg = (_("Error while obtaining OneView info from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': task.node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) oneview_client = get_oneview_client() try: node_has_server_profile = ( @@ -272,3 +307,17 @@ def node_has_server_profile(func): ) return func(*args, **kwargs) return inner + + +def is_dynamic_allocation_enabled(node): + flag = node.driver_info.get('dynamic_allocation') + if flag: + if isinstance(flag, bool): + return flag is True + else: + msg = (_LE("Invalid dynamic_allocation parameter value in " + "node's %(node_uuid)s driver_info. Valid values " + "are booleans true or false.") % + {"node_uuid": node.uuid}) + raise exception.InvalidParameterValue(msg) + return False diff --git a/ironic/drivers/modules/oneview/deploy.py b/ironic/drivers/modules/oneview/deploy.py new file mode 100644 index 000000000..455e6e27e --- /dev/null +++ b/ironic/drivers/modules/oneview/deploy.py @@ -0,0 +1,264 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP. +# Copyright 2016 Universidade Federal de Campina Grande +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from futurist import periodics +from oslo_log import log as logging +import six + +from ironic.common import exception +from ironic.common.i18n import _LE +from ironic.common.i18n import _LI +from ironic.common import states +from ironic.drivers.modules import agent +from ironic.drivers.modules import iscsi_deploy +from ironic.drivers.modules.oneview import common +from ironic.drivers.modules.oneview import deploy_utils +from ironic import objects + +LOG = logging.getLogger(__name__) + +CONF = common.CONF + + +@six.add_metaclass(abc.ABCMeta) +class OneViewPeriodicTasks(object): + + @abc.abstractproperty + def oneview_driver(self): + pass + + @periodics.periodic(spacing=CONF.oneview.periodic_check_interval, + enabled=CONF.oneview.enable_periodic_tasks) + def _periodic_check_nodes_taken_by_oneview(self, manager, context): + """Checks if nodes in Ironic were taken by OneView users. + + This driver periodic task will check for nodes that were taken by + OneView users while the node is in available state, set the node to + maintenance mode with an appropriate maintenance reason message and + move the node to manageable state. + + :param manager: a ConductorManager instance + :param context: request context + :returns: None. + """ + + filters = { + 'provision_state': states.AVAILABLE, + 'maintenance': False, + 'driver': self.oneview_driver + } + node_iter = manager.iter_nodes(filters=filters) + + for node_uuid, driver in node_iter: + + node = objects.Node.get(context, node_uuid) + + try: + oneview_using = deploy_utils.is_node_in_use_by_oneview(node) + except exception.OneViewError as e: + LOG.error(_LE("Error while determining if node " + "%(node_uuid)s is in use by OneView. " + "Error: %(error)s"), + {'node_uuid': node.uuid, 'error': e}) + + if oneview_using: + purpose = (_LI('Updating node %(node_uuid)s in use ' + 'by OneView from %(provision_state)s state ' + 'to %(target_state)s state and maintenance ' + 'mode %(maintenance)s.'), + {'node_uuid': node_uuid, + 'provision_state': states.AVAILABLE, + 'target_state': states.MANAGEABLE, + 'maintenance': True}) + + LOG.info(purpose) + + node.maintenance = True + node.maintenance_reason = common.NODE_IN_USE_BY_ONEVIEW + manager.update_node(context, node) + manager.do_provisioning_action(context, node.uuid, 'manage') + + @periodics.periodic(spacing=CONF.oneview.periodic_check_interval, + enabled=CONF.oneview.enable_periodic_tasks) + def _periodic_check_nodes_freed_by_oneview(self, manager, context): + """Checks if nodes taken by OneView users were freed. + + This driver periodic task will be responsible to poll the nodes that + are in maintenance mode and on manageable state to check if the Server + Profile was removed, indicating that the node was freed by the OneView + user. If so, it'll provide the node, that will pass through the + cleaning process and become available to be provisioned. + + :param manager: a ConductorManager instance + :param context: request context + :returns: None. + """ + + filters = { + 'provision_state': states.MANAGEABLE, + 'maintenance': True, + 'driver': self.oneview_driver + } + node_iter = manager.iter_nodes(fields=['maintenance_reason'], + filters=filters) + for node_uuid, driver, maintenance_reason in node_iter: + + if maintenance_reason == common.NODE_IN_USE_BY_ONEVIEW: + + node = objects.Node.get(context, node_uuid) + + try: + oneview_using = deploy_utils.is_node_in_use_by_oneview( + node + ) + except exception.OneViewError as e: + LOG.error(_LE("Error while determining if node " + "%(node_uuid)s is in use by OneView. " + "Error: %(error)s"), + {'node_uuid': node.uuid, 'error': e}) + + if not oneview_using: + purpose = (_LI('Bringing node %(node_uuid)s back from ' + 'use by OneView from %(provision_state)s ' + 'state to %(target_state)s state and ' + 'maintenance mode %(maintenance)s.'), + {'node_uuid': node_uuid, + 'provision_state': states.MANAGEABLE, + 'target_state': states.AVAILABLE, + 'maintenance': False}) + + LOG.info(purpose) + + node.maintenance = False + node.maintenance_reason = None + manager.update_node(context, node) + manager.do_provisioning_action( + context, node.uuid, 'provide' + ) + + @periodics.periodic(spacing=CONF.oneview.periodic_check_interval, + enabled=CONF.oneview.enable_periodic_tasks) + def _periodic_check_nodes_taken_on_cleanfail(self, manager, context): + """Checks failed deploys due to Oneview users taking Server Hardware. + + This last driver periodic task will take care of nodes that would be + caught on a race condition between OneView and a deploy by Ironic. In + such cases, the validation will fail, throwing the node on deploy fail + and, afterwards on clean fail. + + This task will set the node to maintenance mode with a proper reason + message and move it to manageable state, from where the second task + can rescue the node as soon as the Server Profile is removed. + + :param manager: a ConductorManager instance + :param context: request context + :returns: None. + """ + + filters = { + 'provision_state': states.CLEANFAIL, + 'driver': self.oneview_driver + } + node_iter = manager.iter_nodes(fields=['driver_internal_info'], + filters=filters) + + for node_uuid, driver, driver_internal_info in node_iter: + + node_oneview_error = driver_internal_info.get('oneview_error') + if node_oneview_error == common.SERVER_HARDWARE_ALLOCATION_ERROR: + + node = objects.Node.get(context, node_uuid) + + purpose = (_LI('Bringing node %(node_uuid)s back from use ' + 'by OneView from %(provision_state)s state ' + 'to %(target_state)s state and ' + 'maintenance mode %(maintenance)s.'), + {'node_uuid': node_uuid, + 'provision_state': states.CLEANFAIL, + 'target_state': states.MANAGEABLE, + 'maintenance': False}) + + LOG.info(purpose) + + node.maintenance = True + node.maintenance_reason = common.NODE_IN_USE_BY_ONEVIEW + driver_internal_info = node.driver_internal_info + driver_internal_info.pop('oneview_error', None) + node.driver_internal_info = driver_internal_info + manager.update_node(context, node) + manager.do_provisioning_action(context, node.uuid, 'manage') + + +class OneViewIscsiDeploy(iscsi_deploy.ISCSIDeploy, OneViewPeriodicTasks): + """Class for OneView ISCSI deployment driver.""" + + oneview_driver = common.ISCSI_PXE_ONEVIEW + + def get_properties(self): + deploy_utils.get_properties() + + def prepare(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.prepare(task) + super(OneViewIscsiDeploy, self).prepare(task) + + def tear_down(self, task): + if (common.is_dynamic_allocation_enabled(task.node) and + not CONF.conductor.automated_clean): + deploy_utils.tear_down(task) + super(OneViewIscsiDeploy, self).tear_down(task) + + def prepare_cleaning(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.prepare_cleaning(task) + return super(OneViewIscsiDeploy, self).prepare_cleaning(task) + + def tear_down_cleaning(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.tear_down_cleaning(task) + return super(OneViewIscsiDeploy, self).tear_down_cleaning(task) + + +class OneViewAgentDeploy(agent.AgentDeploy, OneViewPeriodicTasks): + """Class for OneView Agent deployment driver.""" + + oneview_driver = common.AGENT_PXE_ONEVIEW + + def get_properties(self): + deploy_utils.get_properties() + + def prepare(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.prepare(task) + super(OneViewAgentDeploy, self).prepare(task) + + def tear_down(self, task): + if (common.is_dynamic_allocation_enabled(task.node) and + not CONF.conductor.automated_clean): + deploy_utils.tear_down(task) + super(OneViewAgentDeploy, self).tear_down(task) + + def prepare_cleaning(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.prepare_cleaning(task) + return super(OneViewAgentDeploy, self).prepare_cleaning(task) + + def tear_down_cleaning(self, task): + if common.is_dynamic_allocation_enabled(task.node): + deploy_utils.tear_down_cleaning(task) + return super(OneViewAgentDeploy, self).tear_down_cleaning(task) diff --git a/ironic/drivers/modules/oneview/deploy_utils.py b/ironic/drivers/modules/oneview/deploy_utils.py new file mode 100644 index 000000000..7bb00727e --- /dev/null +++ b/ironic/drivers/modules/oneview/deploy_utils.py @@ -0,0 +1,335 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP. +# Copyright 2016 Universidade Federal de Campina Grande +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common.i18n import _LE +from ironic.common.i18n import _LI +from ironic.common.i18n import _LW +from ironic.common import states +from ironic.drivers.modules.oneview import common + +LOG = logging.getLogger(__name__) + +oneview_exception = importutils.try_import('oneview_client.exceptions') +oneview_utils = importutils.try_import('oneview_client.utils') + + +def get_properties(): + return common.COMMON_PROPERTIES + + +def prepare(task): + """Applies Server Profile and update the node when preparing. + + This method is responsible for applying a Server Profile to the Server + Hardware and add the uri of the applied Server Profile in the node's + 'applied_server_profile_uri' field on properties/capabilities. + + :param task: A TaskManager object + :raises InstanceDeployFailure: If the node doesn't have the needed OneView + informations, if Server Hardware is in use by an OneView user, or + if the Server Profile can't be applied. + + """ + if task.node.provision_state == states.DEPLOYING: + try: + instance_display_name = task.node.instance_info.get('display_name') + instance_uuid = task.node.instance_uuid + server_profile_name = ( + "%(instance_name)s [%(instance_uuid)s]" % + {"instance_name": instance_display_name, + "instance_uuid": instance_uuid} + ) + _allocate_server_hardware_to_ironic(task.node, server_profile_name) + except exception.OneViewError as e: + raise exception.InstanceDeployFailure(node=task.node.uuid, + reason=e) + + +def tear_down(task): + """Remove Server profile and update the node when tear down. + + This method is responsible for power a Server Hardware off, remove a Server + Profile from the Server Hardware and remove the uri of the applied Server + Profile from the node's 'applied_server_profile_uri' in + properties/capabilities. + + :param task: A TaskManager object + :raises InstanceDeployFailure: If node has no uri of applied Server + Profile, or if some error occur while deleting Server Profile. + + """ + try: + _deallocate_server_hardware_from_ironic(task.node) + except exception.OneViewError as e: + raise exception.InstanceDeployFailure(node=task.node.uuid, reason=e) + + +def prepare_cleaning(task): + """Applies Server Profile and update the node when preparing cleaning. + + This method is responsible for applying a Server Profile to the Server + Hardware and add the uri of the applied Server Profile in the node's + 'applied_server_profile_uri' field on properties/capabilities. + + :param task: A TaskManager object + :raises NodeCleaningFailure: If the node doesn't have the needed OneView + informations, if Server Hardware is in use by an OneView user, or + if the Server Profile can't be applied. + + """ + try: + server_profile_name = "Ironic Cleaning [%s]" % task.node.uuid + _allocate_server_hardware_to_ironic(task.node, server_profile_name) + except exception.OneViewError as e: + oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR + driver_internal_info = task.node.driver_internal_info + driver_internal_info['oneview_error'] = oneview_error + task.node.driver_internal_info = driver_internal_info + task.node.save() + raise exception.NodeCleaningFailure(node=task.node.uuid, + reason=e) + + +def tear_down_cleaning(task): + """Remove Server profile and update the node when tear down cleaning. + + This method is responsible for power a Server Hardware off, remove a Server + Profile from the Server Hardware and remove the uri of the applied Server + Profile from the node's 'applied_server_profile_uri' in + properties/capabilities. + + :param task: A TaskManager object + :raises NodeCleaningFailure: If node has no uri of applied Server Profile, + or if some error occur while deleting Server Profile. + + """ + try: + _deallocate_server_hardware_from_ironic(task.node) + except exception.OneViewError as e: + raise exception.NodeCleaningFailure(node=task.node.uuid, reason=e) + + +def is_node_in_use_by_oneview(node): + """Check if node is in use by OneView user. + + :param node: an ironic node object + :returns: Boolean value. True if node is in use by OneView, + False otherwise. + :raises OneViewError: if not possible to get OneView's informations + for the given node, if not possible to retrieve Server Hardware + from OneView. + + """ + try: + oneview_info = common.get_oneview_info(node) + except exception.InvalidParameterValue as e: + msg = (_("Error while obtaining OneView info from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) + + oneview_client = common.get_oneview_client() + + sh_uuid = oneview_utils.get_uuid_from_uri( + oneview_info.get("server_hardware_uri") + ) + + try: + server_hardware = oneview_client.get_server_hardware_by_uuid( + sh_uuid + ) + except oneview_exception.OneViewResourceNotFoundError as e: + msg = (_("Error while obtaining Server Hardware from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) + + applied_sp_uri = ( + node.driver_info.get('applied_server_profile_uri') + ) + + # Check if Profile exists in Oneview and it is different of the one + # applied by ironic + if (server_hardware.server_profile_uri not in (None, '') and + applied_sp_uri != server_hardware.server_profile_uri): + + LOG.warning(_LW("Node %s is already in use by OneView."), + node.uuid) + + return True + + else: + LOG.debug(_( + "Hardware %(hardware_uri)s is free for use by " + "ironic on node %(node_uuid)s."), + {"hardware_uri": server_hardware.uri, + "node_uuid": node.uuid}) + + return False + + +def _add_applied_server_profile_uri_field(node, applied_profile): + """Adds the applied Server Profile uri to a node. + + :param node: an ironic node object + + """ + driver_info = node.driver_info + driver_info['applied_server_profile_uri'] = applied_profile.uri + node.driver_info = driver_info + node.save() + + +def _del_applied_server_profile_uri_field(node): + """Delete the applied Server Profile uri from a node if it exists. + + :param node: an ironic node object + + """ + driver_info = node.driver_info + driver_info.pop('applied_server_profile_uri', None) + node.driver_info = driver_info + node.save() + + +def _allocate_server_hardware_to_ironic(node, server_profile_name): + """Allocate Server Hardware to ironic. + + :param node: an ironic node object + :param server_profile_name: a formatted string with the Server Profile + name + :raises OneViewError: if an error occurs while allocating the Server + Hardware to ironic + + """ + node_in_use_by_oneview = is_node_in_use_by_oneview(node) + + if not node_in_use_by_oneview: + + try: + oneview_info = common.get_oneview_info(node) + except exception.InvalidParameterValue as e: + msg = (_("Error while obtaining OneView info from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) + + applied_sp_uri = node.driver_info.get('applied_server_profile_uri') + + sh_uuid = oneview_utils.get_uuid_from_uri( + oneview_info.get("server_hardware_uri") + ) + spt_uuid = oneview_utils.get_uuid_from_uri( + oneview_info.get("server_profile_template_uri") + ) + oneview_client = common.get_oneview_client() + server_hardware = oneview_client.get_server_hardware_by_uuid(sh_uuid) + + # Don't have Server Profile on OneView but has + # `applied_server_profile_uri` on driver_info + if (server_hardware.server_profile_uri in (None, '') and + applied_sp_uri is not (None, '')): + + _del_applied_server_profile_uri_field(node) + LOG.info(_LI( + "Inconsistent 'applied_server_profile_uri' parameter " + "value in driver_info. There is no Server Profile " + "applied to node %(node_uuid)s. Value deleted."), + {"node_uuid": node.uuid} + ) + + # applied_server_profile_uri exists and is equal to Server profile + # applied on Hardware. Do not apply again. + if (applied_sp_uri and server_hardware.server_profile_uri and + server_hardware.server_profile_uri == applied_sp_uri): + LOG.info(_LI( + "The Server Profile %(applied_sp_uri)s was already applied " + "by ironic on node %(node_uuid)s. Reusing."), + {"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri} + ) + return + + try: + applied_profile = oneview_client.clone_template_and_apply( + server_profile_name, sh_uuid, spt_uuid + ) + _add_applied_server_profile_uri_field(node, applied_profile) + + LOG.info( + _LI("Server Profile %(server_profile_uuid)s was successfully" + " applied to node %(node_uuid)s."), + {"node_uuid": node.uuid, + "server_profile_uuid": applied_profile.uri} + ) + + except oneview_exception.OneViewServerProfileAssignmentError as e: + LOG.error(_LE("An error occurred during allocating server " + "hardware to ironic during prepare: %s"), e) + raise exception.OneViewError(error=e) + else: + msg = (_("Node %s is already in use by OneView.") % + node.uuid) + + raise exception.OneViewError(error=msg) + + +def _deallocate_server_hardware_from_ironic(node): + """Deallocate Server Hardware from ironic. + + :param node: an ironic node object + :raises OneViewError: if an error occurs while deallocating the Server + Hardware to ironic + + """ + try: + oneview_info = common.get_oneview_info(node) + except exception.InvalidParameterValue as e: + msg = (_("Error while obtaining OneView info from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + raise exception.OneViewError(error=msg) + + oneview_client = common.get_oneview_client() + oneview_client.power_off(oneview_info) + + applied_sp_uuid = oneview_utils.get_uuid_from_uri( + oneview_info.get('applied_server_profile_uri') + ) + + try: + oneview_client.delete_server_profile(applied_sp_uuid) + _del_applied_server_profile_uri_field(node) + + LOG.info( + _LI("Server Profile %(server_profile_uuid)s was successfully" + " deleted from node %(node_uuid)s." + ), + {"node_uuid": node.uuid, "server_profile_uuid": applied_sp_uuid} + ) + except oneview_exception.OneViewException as e: + + msg = (_("Error while deleting applied Server Profile from node " + "%(node_uuid)s. Error: %(error)s") % + {'node_uuid': node.uuid, 'error': e}) + + raise exception.OneViewError( + node=node.uuid, reason=msg + ) diff --git a/ironic/drivers/modules/oneview/management.py b/ironic/drivers/modules/oneview/management.py index 790246fa9..2f4832340 100644 --- a/ironic/drivers/modules/oneview/management.py +++ b/ironic/drivers/modules/oneview/management.py @@ -97,7 +97,6 @@ class OneViewManagement(base.ManagementInterface): if the server is already powered on. :raises: OneViewError if the communication with OneView fails """ - oneview_info = common.get_oneview_info(task.node) if device not in self.get_supported_boot_devices(task): @@ -115,7 +114,6 @@ class OneViewManagement(base.ManagementInterface): "Error setting boot device on OneView. Error: %s") % oneview_exc ) - LOG.error(msg) raise exception.OneViewError(error=msg) @common.node_has_server_profile @@ -135,7 +133,6 @@ class OneViewManagement(base.ManagementInterface): :raises: InvalidParameterValue if the boot device is unknown :raises: OneViewError if the communication with OneView fails """ - oneview_info = common.get_oneview_info(task.node) try: @@ -146,7 +143,6 @@ class OneViewManagement(base.ManagementInterface): "Error getting boot device from OneView. Error: %s") % oneview_exc ) - LOG.error(msg) raise exception.OneViewError(msg) primary_device = boot_order[0] diff --git a/ironic/drivers/modules/oneview/power.py b/ironic/drivers/modules/oneview/power.py index f53fbfdbf..17801e182 100644 --- a/ironic/drivers/modules/oneview/power.py +++ b/ironic/drivers/modules/oneview/power.py @@ -69,8 +69,8 @@ class OneViewPower(base.PowerInterface): :raises: OneViewError if fails to retrieve power state of OneView resource """ - oneview_info = common.get_oneview_info(task.node) + oneview_client = common.get_oneview_client() try: power_state = oneview_client.get_node_power_state(oneview_info) @@ -95,8 +95,8 @@ class OneViewPower(base.PowerInterface): :raises: PowerStateFailure if the power couldn't be set to power_state. :raises: OneViewError if OneView fails setting the power state. """ - oneview_info = common.get_oneview_info(task.node) + oneview_client = common.get_oneview_client() LOG.debug('Setting power state of node %(node_uuid)s to ' diff --git a/ironic/drivers/oneview.py b/ironic/drivers/oneview.py index 8da4537b1..4d5992d2b 100644 --- a/ironic/drivers/oneview.py +++ b/ironic/drivers/oneview.py @@ -1,4 +1,3 @@ -# # Copyright 2015 Hewlett Packard Development Company, LP # Copyright 2015 Universidade Federal de Campina Grande # @@ -22,9 +21,9 @@ from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.drivers import base -from ironic.drivers.modules import agent from ironic.drivers.modules import iscsi_deploy from ironic.drivers.modules.oneview import common +from ironic.drivers.modules.oneview import deploy from ironic.drivers.modules.oneview import management from ironic.drivers.modules.oneview import power from ironic.drivers.modules.oneview import vendor @@ -32,14 +31,12 @@ from ironic.drivers.modules import pxe class AgentPXEOneViewDriver(base.BaseDriver): - """Agent + OneView driver. + """OneViewDriver using OneViewClient interface. - This driver implements the `core` functionality, combining - :class:`ironic.drivers.ov.OVPower` for power on/off and reboot of virtual - machines, with :class:`ironic.driver.pxe.PXEBoot` for booting deploy kernel - and ramdisk and :class:`ironic.driver.iscsi_deploy.ISCSIDeploy` for image - deployment. Implementations are in those respective classes; this class is - merely the glue between them. + This driver implements the `core` functionality using + :class:ironic.drivers.modules.oneview.power.OneViewPower for power + management. And + :class:ironic.drivers.modules.oneview.deploy.OneViewAgentDeploy for deploy. """ def __init__(self): @@ -56,19 +53,17 @@ class AgentPXEOneViewDriver(base.BaseDriver): self.power = power.OneViewPower() self.management = management.OneViewManagement() self.boot = pxe.PXEBoot() - self.deploy = agent.AgentDeploy() + self.deploy = deploy.OneViewAgentDeploy() self.vendor = vendor.AgentVendorInterface() class ISCSIPXEOneViewDriver(base.BaseDriver): - """PXE + OneView driver. + """OneViewDriver using OneViewClient interface. - This driver implements the `core` functionality, combining - :class:`ironic.drivers.ov.OVPower` for power on/off and reboot of virtual - machines, with :class:`ironic.driver.pxe.PXEBoot` for booting deploy kernel - and ramdisk and :class:`ironic.driver.iscsi_deploy.ISCSIDeploy` for image - deployment. Implementations are in those respective classes; this class is - merely the glue between them. + This driver implements the `core` functionality using + :class:ironic.drivers.modules.oneview.power.OneViewPower for power + management. And + :class:ironic.drivers.modules.oneview.deploy.OneViewIscsiDeploy for deploy. """ def __init__(self): @@ -85,5 +80,5 @@ class ISCSIPXEOneViewDriver(base.BaseDriver): self.power = power.OneViewPower() self.management = management.OneViewManagement() self.boot = pxe.PXEBoot() - self.deploy = iscsi_deploy.ISCSIDeploy() + self.deploy = deploy.OneViewIscsiDeploy() self.vendor = iscsi_deploy.VendorPassthru() diff --git a/ironic/tests/unit/drivers/modules/oneview/test_common.py b/ironic/tests/unit/drivers/modules/oneview/test_common.py index 81d1fa111..928f3a574 100644 --- a/ironic/tests/unit/drivers/modules/oneview/test_common.py +++ b/ironic/tests/unit/drivers/modules/oneview/test_common.py @@ -1,5 +1,3 @@ -# -*- encoding: utf-8 -*- -# # Copyright 2015 Hewlett Packard Development Company, LP # Copyright 2015 Universidade Federal de Campina Grande # @@ -115,6 +113,7 @@ class OneViewCommonTestCase(db_base.DbTestCase): 'server_hardware_type_uri': 'fake_sht_uri', 'enclosure_group_uri': 'fake_eg_uri', 'server_profile_template_uri': 'fake_spt_uri', + 'applied_server_profile_uri': None, } self.assertEqual( @@ -124,7 +123,6 @@ class OneViewCommonTestCase(db_base.DbTestCase): def test_get_oneview_info_missing_spt(self): driver_info = db_utils.get_test_oneview_driver_info() - properties = db_utils.get_test_oneview_properties() properties["capabilities"] = ("server_hardware_type_uri:fake_sht_uri," "enclosure_group_uri:fake_eg_uri") @@ -138,6 +136,7 @@ class OneViewCommonTestCase(db_base.DbTestCase): 'server_hardware_type_uri': 'fake_sht_uri', 'enclosure_group_uri': 'fake_eg_uri', 'server_profile_template_uri': None, + 'applied_server_profile_uri': None, } self.assertEqual( @@ -165,6 +164,7 @@ class OneViewCommonTestCase(db_base.DbTestCase): 'server_hardware_type_uri': 'fake_sht_uri', 'enclosure_group_uri': 'fake_eg_uri', 'server_profile_template_uri': 'fake_spt_uri', + 'applied_server_profile_uri': None, } self.assertEqual( @@ -172,6 +172,20 @@ class OneViewCommonTestCase(db_base.DbTestCase): common.get_oneview_info(incomplete_node) ) + def test_get_oneview_info_malformed_capabilities(self): + driver_info = db_utils.get_test_oneview_driver_info() + + del driver_info["server_hardware_uri"] + properties = db_utils.get_test_oneview_properties() + properties["capabilities"] = "anything,000" + + self.node.driver_info = driver_info + self.node.properties = properties + + self.assertRaises(exception.OneViewInvalidNodeParameter, + common.get_oneview_info, + self.node) + # TODO(gabriel-bezerra): Remove this after Mitaka @mock.patch.object(common, 'LOG', autospec=True) def test_deprecated_spt_in_driver_info(self, log_mock): @@ -194,6 +208,7 @@ class OneViewCommonTestCase(db_base.DbTestCase): 'server_hardware_type_uri': 'fake_sht_uri', 'enclosure_group_uri': 'fake_eg_uri', 'server_profile_template_uri': 'fake_spt_uri', + 'applied_server_profile_uri': None, } self.assertEqual( @@ -226,6 +241,7 @@ class OneViewCommonTestCase(db_base.DbTestCase): 'server_hardware_type_uri': 'fake_sht_uri', 'enclosure_group_uri': 'fake_eg_uri', 'server_profile_template_uri': 'fake_spt_uri', + 'applied_server_profile_uri': None, } self.assertEqual( @@ -281,8 +297,9 @@ class OneViewCommonTestCase(db_base.DbTestCase): @mock.patch.object(common, 'get_oneview_client', spec_set=True, autospec=True) - def test_validate_oneview_resources_compatibility(self, - mock_get_ov_client): + def test_validate_oneview_resources_compatibility( + self, mock_get_ov_client + ): oneview_client = mock_get_ov_client() with task_manager.acquire(self.context, self.node.uuid) as task: common.validate_oneview_resources_compatibility(task) @@ -290,12 +307,123 @@ class OneViewCommonTestCase(db_base.DbTestCase): oneview_client.validate_node_server_hardware.called) self.assertTrue( oneview_client.validate_node_server_hardware_type.called) + self.assertTrue( + oneview_client.validate_node_enclosure_group.called) + self.assertTrue( + oneview_client.validate_node_server_profile_template.called) self.assertTrue( oneview_client.check_server_profile_is_applied.called) self.assertTrue( - oneview_client.is_node_port_mac_compatible_with_server_profile. - called) + oneview_client. + is_node_port_mac_compatible_with_server_profile.called) + self.assertFalse( + oneview_client. + is_node_port_mac_compatible_with_server_hardware.called) + self.assertFalse( + oneview_client.validate_spt_primary_boot_connection.called) + + @mock.patch.object(common, 'get_oneview_client', spec_set=True, + autospec=True) + def test_validate_oneview_resources_compatibility_dynamic_allocation( + self, mock_get_ov_client + ): + """Validate compatibility of resources for Dynamic Allocation model. + + 1) Set 'dynamic_allocation' flag as True on node's driver_info + 2) Check validate_node_server_hardware method is called + 3) Check validate_node_server_hardware_type method is called + 4) Check validate_node_enclosure_group method is called + 5) Check validate_node_server_profile_template method is called + 6) Check is_node_port_mac_compatible_with_server_hardware method + is called + 7) Check validate_node_server_profile_template method is called + 8) Check check_server_profile_is_applied method is not called + 9) Check is_node_port_mac_compatible_with_server_profile method is + not called + + """ + oneview_client = mock_get_ov_client() + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = True + task.node.driver_info = driver_info + + common.validate_oneview_resources_compatibility(task) + self.assertTrue( + oneview_client.validate_node_server_hardware.called) + self.assertTrue( + oneview_client.validate_node_server_hardware_type.called) self.assertTrue( oneview_client.validate_node_enclosure_group.called) self.assertTrue( oneview_client.validate_node_server_profile_template.called) + self.assertTrue( + oneview_client. + is_node_port_mac_compatible_with_server_hardware.called) + self.assertTrue( + oneview_client.validate_node_server_profile_template.called) + self.assertFalse( + oneview_client.check_server_profile_is_applied.called) + self.assertFalse( + oneview_client. + is_node_port_mac_compatible_with_server_profile.called) + + def test_is_dynamic_allocation_enabled(self): + """Ensure Dynamic Allocation is enabled when flag is True. + + 1) Set 'dynamic_allocation' flag as True on node's driver_info + 2) Check Dynamic Allocation is enabled for the given node + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = True + task.node.driver_info = driver_info + + self.assertTrue( + common.is_dynamic_allocation_enabled(task.node) + ) + + def test_is_dynamic_allocation_enabled_false(self): + """Ensure Dynamic Allocation is disabled when flag is False. + + 1) Set 'dynamic_allocation' flag as False on node's driver_info + 2) Check Dynamic Allocation is disabled for the given node + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = False + task.node.driver_info = driver_info + + self.assertFalse( + common.is_dynamic_allocation_enabled(task.node) + ) + + def test_is_dynamic_allocation_enabled_none(self): + """Ensure Dynamic Allocation is disabled when flag is None. + + 1) Set 'dynamic_allocation' flag as None on node's driver_info + 2) Check Dynamic Allocation is disabled for the given node + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = None + task.node.driver_info = driver_info + + self.assertFalse( + common.is_dynamic_allocation_enabled(task.node) + ) + + def test_is_dynamic_allocation_enabled_without_flag(self): + """Ensure Dynamic Allocation is disabled when node doesnt't have flag. + + 1) Create a node without 'dynamic_allocation' flag + 2) Check Dynamic Allocation is disabled for the given node + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertFalse( + common.is_dynamic_allocation_enabled(task.node) + ) diff --git a/ironic/tests/unit/drivers/modules/oneview/test_deploy.py b/ironic/tests/unit/drivers/modules/oneview/test_deploy.py new file mode 100644 index 000000000..e4df3b44b --- /dev/null +++ b/ironic/tests/unit/drivers/modules/oneview/test_deploy.py @@ -0,0 +1,144 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP. +# Copyright 2016 Universidade Federal de Campina Grande +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_utils import importutils + +from ironic.common import driver_factory +from ironic.drivers.modules.oneview import common +from ironic.drivers.modules.oneview import deploy +from ironic.drivers.modules.oneview import deploy_utils +from ironic import objects +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + +oneview_models = importutils.try_import('oneview_client.models') + + +@mock.patch.object(common, 'get_oneview_client', spec_set=True, autospec=True) +class OneViewPeriodicTasks(db_base.DbTestCase): + + def setUp(self): + super(OneViewPeriodicTasks, self).setUp() + self.config(manager_url='https://1.2.3.4', group='oneview') + self.config(username='user', group='oneview') + self.config(password='password', group='oneview') + + mgr_utils.mock_the_extension_manager(driver='fake_oneview') + self.driver = driver_factory.get_driver('fake_oneview') + + self.node = obj_utils.create_test_node( + self.context, driver='fake_oneview', + properties=db_utils.get_test_oneview_properties(), + driver_info=db_utils.get_test_oneview_driver_info(), + ) + self.info = common.get_oneview_info(self.node) + + @mock.patch.object(objects.Node, 'get') + @mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview') + def test__periodic_check_nodes_taken_by_oneview( + self, mock_is_node_in_use_by_oneview, mock_get_node, + mock_get_ov_client + ): + + manager = mock.MagicMock( + spec=['iter_nodes', 'update_node', 'do_provisioning_action'] + ) + + manager.iter_nodes.return_value = [ + (self.node.uuid, 'fake_oneview') + ] + + mock_get_node.return_value = self.node + mock_is_node_in_use_by_oneview.return_value = True + + class OneViewDriverDeploy(deploy.OneViewPeriodicTasks): + oneview_driver = 'fake_oneview' + + oneview_driver_deploy = OneViewDriverDeploy() + oneview_driver_deploy._periodic_check_nodes_taken_by_oneview( + manager, self.context + ) + self.assertTrue(manager.update_node.called) + self.assertTrue(manager.do_provisioning_action.called) + self.assertTrue(self.node.maintenance) + self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW, + self.node.maintenance_reason) + + @mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview') + def test__periodic_check_nodes_freed_by_oneview( + self, mock_is_node_in_use_by_oneview, mock_get_ov_client + ): + + manager = mock.MagicMock( + spec=['iter_nodes', 'update_node', 'do_provisioning_action'] + ) + + manager.iter_nodes.return_value = [ + (self.node.uuid, 'fake_oneview', + common.NODE_IN_USE_BY_ONEVIEW) + ] + + mock_is_node_in_use_by_oneview.return_value = False + + class OneViewDriverDeploy(deploy.OneViewPeriodicTasks): + oneview_driver = 'fake_oneview' + + oneview_driver_deploy = OneViewDriverDeploy() + oneview_driver_deploy._periodic_check_nodes_freed_by_oneview( + manager, self.context + ) + self.assertTrue(manager.update_node.called) + self.assertTrue(manager.do_provisioning_action.called) + self.assertFalse(self.node.maintenance) + self.assertIsNone(self.node.maintenance_reason) + + @mock.patch.object(objects.Node, 'get') + def test__periodic_check_nodes_taken_on_cleanfail( + self, mock_get_node, mock_get_ov_client + ): + + driver_internal_info = { + 'oneview_error': common.SERVER_HARDWARE_ALLOCATION_ERROR + } + + manager = mock.MagicMock( + spec=['iter_nodes', 'update_node', 'do_provisioning_action'] + ) + + manager.iter_nodes.return_value = [ + (self.node.uuid, 'fake_oneview', driver_internal_info) + ] + + self.node.driver_internal_info = driver_internal_info + mock_get_node.return_value = self.node + + class OneViewDriverDeploy(deploy.OneViewPeriodicTasks): + oneview_driver = 'fake_oneview' + + oneview_driver_deploy = OneViewDriverDeploy() + oneview_driver_deploy._periodic_check_nodes_taken_on_cleanfail( + manager, self.context + ) + self.assertTrue(manager.update_node.called) + self.assertTrue(manager.do_provisioning_action.called) + self.assertTrue(self.node.maintenance) + self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW, + self.node.maintenance_reason) + self.assertDictEqual({}, self.node.driver_internal_info) diff --git a/ironic/tests/unit/drivers/modules/oneview/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/oneview/test_deploy_utils.py new file mode 100644 index 000000000..695ed7fb1 --- /dev/null +++ b/ironic/tests/unit/drivers/modules/oneview/test_deploy_utils.py @@ -0,0 +1,349 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP. +# Copyright 2016 Universidade Federal de Campina Grande +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_utils import importutils + +from ironic.common import driver_factory +from ironic.common import exception +from ironic.common import states +from ironic.conductor import task_manager +from ironic.drivers.modules.oneview import common +from ironic.drivers.modules.oneview import deploy_utils +from ironic import objects +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + +oneview_models = importutils.try_import('oneview_client.models') + + +@mock.patch.object(common, 'get_oneview_client', spec_set=True, autospec=True) +class OneViewDeployUtilsTestCase(db_base.DbTestCase): + + def setUp(self): + super(OneViewDeployUtilsTestCase, self).setUp() + self.config(manager_url='https://1.2.3.4', group='oneview') + self.config(username='user', group='oneview') + self.config(password='password', group='oneview') + + mgr_utils.mock_the_extension_manager(driver='fake_oneview') + self.driver = driver_factory.get_driver('fake_oneview') + + self.node = obj_utils.create_test_node( + self.context, driver='fake_oneview', + properties=db_utils.get_test_oneview_properties(), + driver_info=db_utils.get_test_oneview_driver_info(), + ) + self.info = common.get_oneview_info(self.node) + + # Tests for prepare + def test_prepare_node_is_in_use_by_oneview(self, mock_get_ov_client): + """`prepare` behavior when the node already has a Profile on OneView. + + """ + oneview_client = mock_get_ov_client() + fake_server_hardware = oneview_models.ServerHardware() + fake_server_hardware.server_profile_uri = "/any/sp_uri" + oneview_client.get_server_hardware.return_value = fake_server_hardware + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = True + task.node.driver_info = driver_info + task.node.provision_state = states.DEPLOYING + self.assertRaises( + exception.InstanceDeployFailure, + deploy_utils.prepare, + task + ) + + @mock.patch.object(objects.Node, 'save') + def test_prepare_node_is_successfuly_allocated_to_ironic( + self, mock_node_save, mock_get_ov_client + ): + """`prepare` behavior when the node is free from OneView standpoint. + + """ + ov_client = mock_get_ov_client() + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = None + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + with task_manager.acquire(self.context, self.node.uuid) as task: + task.node.provision_state = states.DEPLOYING + deploy_utils.prepare(task) + self.assertTrue(ov_client.clone_template_and_apply.called) + self.assertTrue(ov_client.get_server_profile_from_hardware) + + # Tests for tear_down + def test_tear_down(self, mock_get_ov_client): + """`tear_down` behavior when node already has Profile applied + + """ + ov_client = mock_get_ov_client() + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = \ + '/rest/server-profiles/1234556789' + task.node.driver_info = driver_info + + self.assertTrue( + 'applied_server_profile_uri' in task.node.driver_info + ) + deploy_utils.tear_down(task) + self.assertFalse( + 'applied_server_profile_uri' in task.node.driver_info + ) + self.assertTrue( + ov_client.delete_server_profile.called + ) + + # Tests for prepare_cleaning + @mock.patch.object(objects.Node, 'save') + def test_prepare_cleaning_when_node_does_not_have_sp_applied( + self, mock_node_save, mock_get_ov_client + ): + """`prepare_cleaning` behavior when node is free + + """ + ov_client = mock_get_ov_client() + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = None + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + with task_manager.acquire(self.context, self.node.uuid) as task: + deploy_utils.prepare_cleaning(task) + self.assertTrue(ov_client.clone_template_and_apply.called) + + @mock.patch.object(objects.Node, 'save') + def test_prepare_cleaning_when_node_has_sp_applied( + self, mock_node_save, mock_get_ov_client + ): + """`prepare_cleaning` behavior when node already has Profile applied + + """ + ov_client = mock_get_ov_client() + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = 'same/sp_applied' + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = 'same/sp_applied' + task.node.driver_info = driver_info + + deploy_utils.prepare_cleaning(task) + self.assertFalse(ov_client.clone_template_and_apply.called) + + def test_prepare_cleaning_node_is_in_use_by_oneview( + self, mock_get_ov_client + ): + """`prepare_cleaning` behavior when node has Server Profile on OneView + + """ + oneview_client = mock_get_ov_client() + fake_server_hardware = oneview_models.ServerHardware() + fake_server_hardware.server_profile_uri = "/any/sp_uri" + oneview_client.get_server_hardware.return_value = fake_server_hardware + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = True + task.node.driver_info = driver_info + task.node.provision_state = states.DEPLOYING + self.assertRaises( + exception.NodeCleaningFailure, + deploy_utils.prepare_cleaning, + task + ) + + # Tests for tear_down_cleaning + def test_tear_down_cleaning(self, mock_get_ov_client): + """Checks if Server Profile was deleted and its uri removed + + """ + ov_client = mock_get_ov_client() + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = \ + '/rest/server-profiles/1234556789' + task.node.driver_info = driver_info + + self.assertIn('applied_server_profile_uri', task.node.driver_info) + deploy_utils.tear_down_cleaning(task) + self.assertNotIn('applied_server_profile_uri', + task.node.driver_info) + self.assertTrue(ov_client.delete_server_profile.called) + + # Tests for is_node_in_use_by_oneview + def test_is_node_in_use_by_oneview(self, mock_get_ov_client): + """Node has a Server Profile applied by a third party user. + + """ + fake_server_hardware = oneview_models.ServerHardware() + fake_server_hardware.server_profile_uri = "/any/sp_uri" + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['dynamic_allocation'] = True + task.node.driver_info = driver_info + self.assertTrue( + deploy_utils.is_node_in_use_by_oneview(task.node) + ) + + def test_is_node_in_use_by_oneview_no_server_profile( + self, mock_get_ov_client + ): + """Node has no Server Profile. + + """ + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = None + + ov_client = mock_get_ov_client.return_value + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertFalse( + deploy_utils.is_node_in_use_by_oneview(task.node) + ) + + def test_is_node_in_use_by_oneview_same_server_profile_applied( + self, mock_get_ov_client + ): + """Node's Server Profile uri is the same applied by ironic. + + """ + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = 'same/applied_sp_uri/' + + ov_client = mock_get_ov_client.return_value + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = 'same/applied_sp_uri/' + task.node.driver_info = driver_info + self.assertFalse( + deploy_utils.is_node_in_use_by_oneview(task.node) + ) + + # Tests for _add_applied_server_profile_uri_field + def test__add_applied_server_profile_uri_field(self, mock_get_ov_client): + """Checks if applied_server_profile_uri was added to driver_info. + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + task.node.driver_info = driver_info + fake_server_profile = oneview_models.ServerProfile() + fake_server_profile.uri = 'any/applied_sp_uri/' + + self.assertNotIn('applied_server_profile_uri', + task.node.driver_info) + deploy_utils._add_applied_server_profile_uri_field( + task.node, + fake_server_profile + ) + self.assertIn('applied_server_profile_uri', task.node.driver_info) + + # Tests for _del_applied_server_profile_uri_field + def test__del_applied_server_profile_uri_field(self, mock_get_ov_client): + """Checks if applied_server_profile_uri was removed from driver_info. + + """ + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/' + task.node.driver_info = driver_info + + self.assertIn('applied_server_profile_uri', task.node.driver_info) + deploy_utils._del_applied_server_profile_uri_field(task.node) + self.assertNotIn('applied_server_profile_uri', + task.node.driver_info) + + # Tests for _allocate_server_hardware_to_ironic + @mock.patch.object(objects.Node, 'save') + def test__allocate_server_hardware_to_ironic( + self, mock_node_save, mock_get_ov_client + ): + """Checks if a Server Profile was created and its uri is in driver_info. + + """ + ov_client = mock_get_ov_client.return_value + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = None + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + mock_get_ov_client.return_value = ov_client + + with task_manager.acquire(self.context, self.node.uuid) as task: + deploy_utils._allocate_server_hardware_to_ironic( + task.node, 'serverProfileName' + ) + self.assertTrue(ov_client.clone_template_and_apply.called) + self.assertIn('applied_server_profile_uri', task.node.driver_info) + + @mock.patch.object(objects.Node, 'save') + @mock.patch.object(deploy_utils, + '_del_applied_server_profile_uri_field') + def test__allocate_server_hardware_to_ironic_node_has_server_profile( + self, mock_delete_applied_sp, mock_node_save, mock_get_ov_client + ): + """Tests server profile allocation when applied_server_profile_uri exists. + + This test consider that no Server Profile is applied on the Server + Hardware but the applied_server_profile_uri remained on the node. Thus, + the conductor should remove the value and apply a new server profile to + use the node. + """ + ov_client = mock_get_ov_client.return_value + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = None + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + mock_get_ov_client.return_value = ov_client + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/' + task.node.driver_info = driver_info + + deploy_utils._allocate_server_hardware_to_ironic( + task.node, 'serverProfileName' + ) + self.assertTrue(mock_delete_applied_sp.called) + + # Tests for _deallocate_server_hardware_from_ironic + @mock.patch.object(objects.Node, 'save') + def test__deallocate_server_hardware_from_ironic( + self, mock_node_save, mock_get_ov_client + ): + ov_client = mock_get_ov_client.return_value + fake_sh = oneview_models.ServerHardware() + fake_sh.server_profile_uri = 'any/applied_sp_uri/' + ov_client.get_server_hardware_by_uuid.return_value = fake_sh + mock_get_ov_client.return_value = ov_client + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/' + task.node.driver_info = driver_info + + deploy_utils._deallocate_server_hardware_from_ironic(task.node) + self.assertTrue(ov_client.delete_server_profile.called) + self.assertTrue( + 'applied_server_profile_uri' not in task.node.driver_info + ) diff --git a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py index 1d2b6cea0..d84f9e895 100644 --- a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py +++ b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py @@ -126,6 +126,8 @@ ONEVIEWCLIENT_SPEC = ( 'client', 'states', 'exceptions', + 'models', + 'utils', ) ONEVIEWCLIENT_CLIENT_CLS_SPEC = ( diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py index 065dd67b4..8fb57c971 100644 --- a/ironic/tests/unit/drivers/third_party_driver_mocks.py +++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py @@ -126,8 +126,10 @@ if not oneview_client: ONEVIEW_ERROR='error') sys.modules['oneview_client.states'] = states sys.modules['oneview_client.exceptions'] = oneview_client.exceptions + sys.modules['oneview_client.utils'] = oneview_client.utils oneview_client.exceptions.OneViewException = type('OneViewException', (Exception,), {}) + sys.modules['oneview_client.models'] = oneview_client.models if 'ironic.drivers.oneview' in sys.modules: six.moves.reload_module(sys.modules['ironic.drivers.modules.oneview']) diff --git a/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml new file mode 100644 index 000000000..119a15593 --- /dev/null +++ b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml @@ -0,0 +1,5 @@ +--- +features: + - Add Dynamic Allocation feature for the OneView drivers. +deprecations: + - Deprecates pre-allocation feature for the OneView drivers. From bfdf369a5e0dacf8ee2457e46d937eeae35efdb4 Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Thu, 4 Aug 2016 18:20:22 -0300 Subject: [PATCH 139/166] Replacing generic OneViewError w/ InvalidNodeParameter On commit 4483de30ba28e60d0f742441d928c3e28a291f90 we inserted a new OneViewInvalidNodeParameter exception and refactored get_oneview_info to remove duplicated code on exception treatment, but some other modules needed to be refactored too to remove the generic OneViewError. This patch fixes it. Change-Id: Ie9eadcebb744ead75cb9cd9cd732c392b559114c --- ironic/drivers/modules/oneview/common.py | 16 ++----------- .../drivers/modules/oneview/deploy_utils.py | 24 +++---------------- 2 files changed, 5 insertions(+), 35 deletions(-) diff --git a/ironic/drivers/modules/oneview/common.py b/ironic/drivers/modules/oneview/common.py index a40d4d549..216ec952b 100644 --- a/ironic/drivers/modules/oneview/common.py +++ b/ironic/drivers/modules/oneview/common.py @@ -194,13 +194,7 @@ def validate_oneview_resources_compatibility(task): node = task.node node_ports = task.ports - try: - oneview_info = get_oneview_info(task.node) - except exception.InvalidParameterValue as e: - msg = (_("Error while obtaining OneView info from node " - "%(node_uuid)s. Error: %(error)s") % - {'node_uuid': node.uuid, 'error': e}) - raise exception.OneViewError(error=msg) + oneview_info = get_oneview_info(task.node) try: oneview_client = get_oneview_client() @@ -281,13 +275,7 @@ def node_has_server_profile(func): """ def inner(*args, **kwargs): task = args[1] - try: - oneview_info = get_oneview_info(task.node) - except exception.InvalidParameterValue as e: - msg = (_("Error while obtaining OneView info from node " - "%(node_uuid)s. Error: %(error)s") % - {'node_uuid': task.node.uuid, 'error': e}) - raise exception.OneViewError(error=msg) + oneview_info = get_oneview_info(task.node) oneview_client = get_oneview_client() try: node_has_server_profile = ( diff --git a/ironic/drivers/modules/oneview/deploy_utils.py b/ironic/drivers/modules/oneview/deploy_utils.py index 7bb00727e..6138fb6de 100644 --- a/ironic/drivers/modules/oneview/deploy_utils.py +++ b/ironic/drivers/modules/oneview/deploy_utils.py @@ -138,13 +138,7 @@ def is_node_in_use_by_oneview(node): from OneView. """ - try: - oneview_info = common.get_oneview_info(node) - except exception.InvalidParameterValue as e: - msg = (_("Error while obtaining OneView info from node " - "%(node_uuid)s. Error: %(error)s") % - {'node_uuid': node.uuid, 'error': e}) - raise exception.OneViewError(error=msg) + oneview_info = common.get_oneview_info(node) oneview_client = common.get_oneview_client() @@ -224,13 +218,7 @@ def _allocate_server_hardware_to_ironic(node, server_profile_name): if not node_in_use_by_oneview: - try: - oneview_info = common.get_oneview_info(node) - except exception.InvalidParameterValue as e: - msg = (_("Error while obtaining OneView info from node " - "%(node_uuid)s. Error: %(error)s") % - {'node_uuid': node.uuid, 'error': e}) - raise exception.OneViewError(error=msg) + oneview_info = common.get_oneview_info(node) applied_sp_uri = node.driver_info.get('applied_server_profile_uri') @@ -299,13 +287,7 @@ def _deallocate_server_hardware_from_ironic(node): Hardware to ironic """ - try: - oneview_info = common.get_oneview_info(node) - except exception.InvalidParameterValue as e: - msg = (_("Error while obtaining OneView info from node " - "%(node_uuid)s. Error: %(error)s") % - {'node_uuid': node.uuid, 'error': e}) - raise exception.OneViewError(error=msg) + oneview_info = common.get_oneview_info(node) oneview_client = common.get_oneview_client() oneview_client.power_off(oneview_info) From 15ab3638b3fa725e06e907b1cc672c1da8534f3f Mon Sep 17 00:00:00 2001 From: zhangyanxian Date: Fri, 5 Aug 2016 02:00:24 +0000 Subject: [PATCH 140/166] Fix typo in Install-guide.rst file Change-Id: I0e33ad57c8bf192d5511efd115dd3a6b79bed849 --- doc/source/deploy/install-guide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index d922ef83c..f415d8f20 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -1446,7 +1446,7 @@ reboots won't happen via PXE or Virtual Media. Instead, it will boot from a local boot loader installed on the disk. It's important to note that in order for this to work the image being -deployed with Bare Metal serivce **must** contain ``grub2`` installed within it. +deployed with Bare Metal service **must** contain ``grub2`` installed within it. Enabling the local boot is different when Bare Metal service is used with Compute service and without it. From fc92d5ce4359a8f095683888ea5c01e494320c6f Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Fri, 5 Aug 2016 09:39:39 +0300 Subject: [PATCH 141/166] Update add nova user to baremetal_admin behaviour Some jobs/installations install ironic without nova. We need to check that nova service is enabled/installed before adding nova user to baremetal_admin. Otherwise it leads to deployment errors. Change-Id: I7d753085d5c8ff6633fba3947752aed0e99c72b2 Closes-Bug: #1610011 --- devstack/lib/ironic | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 88953d067..c87d43f44 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -745,7 +745,9 @@ function create_ironic_accounts { # Create additional bare metal tenant and roles get_or_create_role baremetal_admin get_or_create_role baremetal_observer - get_or_add_user_project_role baremetal_admin nova $SERVICE_PROJECT_NAME + if is_service_enabled nova; then + get_or_add_user_project_role baremetal_admin nova $SERVICE_PROJECT_NAME + fi get_or_add_user_project_role baremetal_observer demo demo fi } From 8bdd538c0c387c8c2633c8615f2a15a31918a548 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 15 Jun 2016 17:30:33 +0200 Subject: [PATCH 142/166] Promote agent vendor passthru to core API Introduces new /v1/lookup and /v1/heartbeat/ endpoints (and associated controllers). This change does not deprecate the old passthru endpoints, it should be done after IPA switches to using the new ones. Change-Id: I9080c07b03103cd7a323e2fc01be821733b07eea Partial-Bug: #1570841 --- devstack/lib/ironic | 2 +- doc/source/webapi/v1.rst | 4 + etc/ironic/ironic.conf.sample | 13 +- ironic/api/config.py | 3 + ironic/api/controllers/v1/__init__.py | 26 +++ ironic/api/controllers/v1/ramdisk.py | 150 +++++++++++++++ ironic/api/controllers/v1/types.py | 21 +++ ironic/api/controllers/v1/utils.py | 8 + ironic/api/controllers/v1/versions.py | 4 +- ironic/common/exception.py | 5 + ironic/conf/agent.py | 3 - ironic/conf/api.py | 8 + ironic/drivers/modules/agent_base_vendor.py | 19 +- ironic/tests/unit/api/test_root.py | 18 ++ ironic/tests/unit/api/v1/test_ramdisk.py | 172 ++++++++++++++++++ ironic/tests/unit/api/v1/test_types.py | 21 +++ .../drivers/modules/test_agent_base_vendor.py | 2 +- .../lookup-heartbeat-f9772521d12a0549.yaml | 17 ++ 18 files changed, 470 insertions(+), 26 deletions(-) create mode 100644 ironic/api/controllers/v1/ramdisk.py create mode 100644 ironic/tests/unit/api/v1/test_ramdisk.py create mode 100644 releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml diff --git a/devstack/lib/ironic b/devstack/lib/ironic index 88953d067..bbbcf7949 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -682,7 +682,7 @@ function configure_ironic_conductor { fi if is_deployed_by_agent; then - iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 + iniset $IRONIC_CONF_FILE api ramdisk_heartbeat_timeout 30 fi # FIXME: this really needs to be tested in the gate. For now, any diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst index d60f8d46b..64cadeca8 100644 --- a/doc/source/webapi/v1.rst +++ b/doc/source/webapi/v1.rst @@ -32,6 +32,10 @@ always requests the newest supported API version. API Versions History -------------------- +**1.22** + + Added endpoints for deployment ramdisks. + **1.21** Add node ``resource_class`` field. diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 0ada7a9ff..8e2e3eaf0 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -400,10 +400,6 @@ # be set to True. Defaults to True. (boolean value) #stream_raw_images = true -# Maximum interval (in seconds) for agent heartbeats. (integer -# value) -#heartbeat_timeout = 300 - # Number of times to retry getting power state to check if # bare metal node has been powered off after a soft power off. # (integer value) @@ -486,6 +482,15 @@ # 'public_endpoint' option. (boolean value) #enable_ssl_api = false +# Whether to restrict the lookup API to only nodes in certain +# states. (boolean value) +#restrict_lookup = true + +# Maximum interval (in seconds) for agent heartbeats. (integer +# value) +# Deprecated group/name - [agent]/heartbeat_timeout +#ramdisk_heartbeat_timeout = 300 + [audit] diff --git a/ironic/api/config.py b/ironic/api/config.py index f707f5b4a..abf7d24c8 100644 --- a/ironic/api/config.py +++ b/ironic/api/config.py @@ -30,6 +30,9 @@ app = { '/', '/v1', # IPA ramdisk methods + '/v1/lookup', + '/v1/heartbeat/[a-z0-9\-]+', + # Old IPA ramdisk methods - will be removed in the Ocata release '/v1/drivers/[a-z0-9_]*/vendor_passthru/lookup', '/v1/nodes/[a-z0-9\-]+/vendor_passthru/heartbeat', ], diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py index cda8e41b8..5d285fdd2 100644 --- a/ironic/api/controllers/v1/__init__.py +++ b/ironic/api/controllers/v1/__init__.py @@ -29,6 +29,8 @@ from ironic.api.controllers.v1 import chassis from ironic.api.controllers.v1 import driver from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import port +from ironic.api.controllers.v1 import ramdisk +from ironic.api.controllers.v1 import utils from ironic.api.controllers.v1 import versions from ironic.api import expose from ironic.common.i18n import _ @@ -78,6 +80,12 @@ class V1(base.APIBase): drivers = [link.Link] """Links to the drivers resource""" + lookup = [link.Link] + """Links to the lookup resource""" + + heartbeat = [link.Link] + """Links to the heartbeat resource""" + @staticmethod def convert(): v1 = V1() @@ -120,6 +128,22 @@ class V1(base.APIBase): 'drivers', '', bookmark=True) ] + if utils.allow_ramdisk_endpoints(): + v1.lookup = [link.Link.make_link('self', pecan.request.public_url, + 'lookup', ''), + link.Link.make_link('bookmark', + pecan.request.public_url, + 'lookup', '', + bookmark=True) + ] + v1.heartbeat = [link.Link.make_link('self', + pecan.request.public_url, + 'heartbeat', ''), + link.Link.make_link('bookmark', + pecan.request.public_url, + 'heartbeat', '', + bookmark=True) + ] return v1 @@ -130,6 +154,8 @@ class Controller(rest.RestController): ports = port.PortsController() chassis = chassis.ChassisController() drivers = driver.DriversController() + lookup = ramdisk.LookupController() + heartbeat = ramdisk.HeartbeatController() @expose.expose(V1) def get(self): diff --git a/ironic/api/controllers/v1/ramdisk.py b/ironic/api/controllers/v1/ramdisk.py new file mode 100644 index 000000000..ed9c77b29 --- /dev/null +++ b/ironic/api/controllers/v1/ramdisk.py @@ -0,0 +1,150 @@ +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +import pecan +from pecan import rest +from six.moves import http_client +from wsme import types as wtypes + +from ironic.api.controllers import base +from ironic.api.controllers.v1 import node as node_ctl +from ironic.api.controllers.v1 import types +from ironic.api.controllers.v1 import utils as api_utils +from ironic.api import expose +from ironic.common import exception +from ironic.common import policy +from ironic.common import states +from ironic import objects + + +CONF = cfg.CONF + +_LOOKUP_RETURN_FIELDS = ('uuid', 'properties', 'instance_info', + 'driver_internal_info') +_LOOKUP_ALLOWED_STATES = {states.DEPLOYING, states.DEPLOYWAIT, + states.CLEANING, states.CLEANWAIT, + states.INSPECTING} + + +def config(): + return { + 'metrics': { + 'backend': CONF.metrics.agent_backend, + 'prepend_host': CONF.metrics.agent_prepend_host, + 'prepend_uuid': CONF.metrics.agent_prepend_uuid, + 'prepend_host_reverse': CONF.metrics.agent_prepend_host_reverse, + 'global_prefix': CONF.metrics.agent_global_prefix + }, + 'metrics_statsd': { + 'statsd_host': CONF.metrics_statsd.agent_statsd_host, + 'statsd_port': CONF.metrics_statsd.agent_statsd_port + }, + 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout + } + + +class LookupResult(base.APIBase): + """API representation of the node lookup result.""" + + node = node_ctl.Node + """The short node representation.""" + + config = {wtypes.text: types.jsontype} + """The configuration to pass to the ramdisk.""" + + @classmethod + def sample(cls): + return cls(node=node_ctl.Node.sample(), + config={'heartbeat_timeout': 600}) + + @classmethod + def convert_with_links(cls, node): + node = node_ctl.Node.convert_with_links(node, _LOOKUP_RETURN_FIELDS) + return cls(node=node, config=config()) + + +class LookupController(rest.RestController): + """Controller handling node lookup for a deploy ramdisk.""" + + @expose.expose(LookupResult, types.list_of_macaddress, types.uuid) + def get_all(self, addresses=None, node_uuid=None): + """Look up a node by its MAC addresses and optionally UUID. + + If the "restrict_lookup" option is set to True (the default), limit + the search to nodes in certain transient states (e.g. deploy wait). + + :param addresses: list of MAC addresses for a node. + :param node_uuid: UUID of a node. + :raises: NotFound if requested API version does not allow this + endpoint. + :raises: NotFound if suitable node was not found. + """ + if not api_utils.allow_ramdisk_endpoints(): + raise exception.NotFound() + + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:driver:ipa_lookup', cdict, cdict) + + if not addresses and not node_uuid: + raise exception.IncompleteLookup() + + try: + if node_uuid: + node = objects.Node.get_by_uuid( + pecan.request.context, node_uuid) + else: + node = objects.Node.get_by_port_addresses( + pecan.request.context, addresses) + except exception.NotFound: + # NOTE(dtantsur): we are reraising the same exception to make sure + # we don't disclose the difference between nodes that are not found + # at all and nodes in a wrong state by different error messages. + raise exception.NotFound() + + if (CONF.api.restrict_lookup and + node.provision_state not in _LOOKUP_ALLOWED_STATES): + raise exception.NotFound() + + return LookupResult.convert_with_links(node) + + +class HeartbeatController(rest.RestController): + """Controller handling heartbeats from deploy ramdisk.""" + + @expose.expose(None, types.uuid_or_name, wtypes.text, + status_code=http_client.ACCEPTED) + def post(self, node_ident, callback_url): + """Process a heartbeat from the deploy ramdisk. + + :param node_ident: the UUID or logical name of a node. + :param callback_url: the URL to reach back to the ramdisk. + """ + if not api_utils.allow_ramdisk_endpoints(): + raise exception.NotFound() + + cdict = pecan.request.context.to_dict() + policy.authorize('baremetal:node:ipa_heartbeat', cdict, cdict) + + rpc_node = api_utils.get_rpc_node(node_ident) + + try: + topic = pecan.request.rpcapi.get_topic_for(rpc_node) + except exception.NoValidHost as e: + e.code = http_client.BAD_REQUEST + raise + + pecan.request.rpcapi.heartbeat(pecan.request.context, + rpc_node.uuid, callback_url, + topic=topic) diff --git a/ironic/api/controllers/v1/types.py b/ironic/api/controllers/v1/types.py index 9cfe206b7..5979ace4a 100644 --- a/ironic/api/controllers/v1/types.py +++ b/ironic/api/controllers/v1/types.py @@ -176,6 +176,26 @@ class ListType(wtypes.UserType): return ListType.validate(value) +class ListOfMacAddressesType(ListType): + """List of MAC addresses.""" + + @staticmethod + def validate(value): + """Validate and convert the input to a ListOfMacAddressesType. + + :param value: A comma separated string of MAC addresses. + :returns: A list of unique MACs, whose order is not guaranteed. + """ + items = ListType.validate(value) + return [MacAddressType.validate(item) for item in items] + + @staticmethod + def frombasetype(value): + if value is None: + return None + return ListOfMacAddressesType.validate(value) + + macaddress = MacAddressType() uuid_or_name = UuidOrNameType() name = NameType() @@ -184,6 +204,7 @@ boolean = BooleanType() listtype = ListType() # Can't call it 'json' because that's the name of the stdlib module jsontype = JsonType() +list_of_macaddress = ListOfMacAddressesType() class JsonPatchType(wtypes.Base): diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py index 00b6a17d9..6c0cba767 100644 --- a/ironic/api/controllers/v1/utils.py +++ b/ironic/api/controllers/v1/utils.py @@ -383,6 +383,14 @@ def allow_resource_class(): versions.MINOR_21_RESOURCE_CLASS) +def allow_ramdisk_endpoints(): + """Check if heartbeat and lookup endpoints are allowed. + + Version 1.22 of the API introduced them. + """ + return pecan.request.version.minor >= versions.MINOR_22_LOOKUP_HEARTBEAT + + def get_controller_reserved_names(cls): """Get reserved names for a given controller. diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py index 152f5e64a..aa02fb930 100644 --- a/ironic/api/controllers/v1/versions.py +++ b/ironic/api/controllers/v1/versions.py @@ -51,6 +51,7 @@ BASE_VERSION = 1 # v1.19: Add port.local_link_connection and port.pxe_enabled. # v1.20: Add node.network_interface # v1.21: Add node.resource_class +# v1.22: Ramdisk lookup and heartbeat endpoints. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -74,11 +75,12 @@ MINOR_18_PORT_INTERNAL_INFO = 18 MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 MINOR_20_NETWORK_INTERFACE = 20 MINOR_21_RESOURCE_CLASS = 21 +MINOR_22_LOOKUP_HEARTBEAT = 22 # When adding another version, update MINOR_MAX_VERSION and also update # doc/source/webapi/v1.rst with a detailed explanation of what the version has # changed. -MINOR_MAX_VERSION = MINOR_21_RESOURCE_CLASS +MINOR_MAX_VERSION = MINOR_22_LOOKUP_HEARTBEAT # String representations of the minor and maximum versions MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff --git a/ironic/common/exception.py b/ironic/common/exception.py index 7d8341c15..b483b8d7d 100644 --- a/ironic/common/exception.py +++ b/ironic/common/exception.py @@ -606,3 +606,8 @@ class NodeTagNotFound(IronicException): class NetworkError(IronicException): _msg_fmt = _("Network operation failure.") + + +class IncompleteLookup(Invalid): + _msg_fmt = _("At least one of 'addresses' and 'node_uuid' parameters " + "is required") diff --git a/ironic/conf/agent.py b/ironic/conf/agent.py index 9555ca973..899e8afb8 100644 --- a/ironic/conf/agent.py +++ b/ironic/conf/agent.py @@ -44,9 +44,6 @@ opts = [ 'to the disk. Unless the disk where the image will be ' 'copied to is really slow, this option should be set ' 'to True. Defaults to True.')), - cfg.IntOpt('heartbeat_timeout', - default=300, - help=_('Maximum interval (in seconds) for agent heartbeats.')), cfg.IntOpt('post_deploy_get_power_state_retries', default=6, help=_('Number of times to retry getting power state to check ' diff --git a/ironic/conf/api.py b/ironic/conf/api.py index 7ec6f36c2..d9b74414f 100644 --- a/ironic/conf/api.py +++ b/ironic/conf/api.py @@ -49,6 +49,14 @@ opts = [ "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option.")), + cfg.BoolOpt('restrict_lookup', + default=True, + help=_('Whether to restrict the lookup API to only nodes ' + 'in certain states.')), + cfg.IntOpt('ramdisk_heartbeat_timeout', + default=300, + deprecated_group='agent', deprecated_name='heartbeat_timeout', + help=_('Maximum interval (in seconds) for agent heartbeats.')), ] opt_group = cfg.OptGroup(name='api', diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index 51a6fc40e..d7929596c 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -26,6 +26,7 @@ from oslo_utils import strutils from oslo_utils import timeutils import retrying +from ironic.api.controllers.v1 import ramdisk from ironic.common import boot_devices from ironic.common import exception from ironic.common.i18n import _ @@ -789,23 +790,9 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): # config namespace. Instead of a separate deprecation, # this will die when the vendor_passthru version of # lookup goes away. - 'heartbeat_timeout': CONF.agent.heartbeat_timeout, + 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout, 'node': ndict, - 'config': { - 'metrics': { - 'backend': CONF.metrics.agent_backend, - 'prepend_host': CONF.metrics.agent_prepend_host, - 'prepend_uuid': CONF.metrics.agent_prepend_uuid, - 'prepend_host_reverse': - CONF.metrics.agent_prepend_host_reverse, - 'global_prefix': CONF.metrics.agent_global_prefix - }, - 'metrics_statsd': { - 'statsd_host': CONF.metrics_statsd.agent_statsd_host, - 'statsd_port': CONF.metrics_statsd.agent_statsd_port - }, - 'heartbeat_timeout': CONF.agent.heartbeat_timeout - } + 'config': ramdisk.config(), } def _get_interfaces(self, inventory): diff --git a/ironic/tests/unit/api/test_root.py b/ironic/tests/unit/api/test_root.py index 3f41242f9..ed5e9676b 100644 --- a/ironic/tests/unit/api/test_root.py +++ b/ironic/tests/unit/api/test_root.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from ironic.api.controllers import base as api_base from ironic.api.controllers.v1 import versions from ironic.tests.unit.api import base @@ -51,3 +52,20 @@ class TestV1Root(base.BaseApiTest): self.assertIn({'type': 'application/vnd.openstack.ironic.v1+json', 'base': 'application/json'}, data['media_types']) + + def test_get_v1_root_version_1_22(self): + headers = {api_base.Version.string: '1.22'} + data = self.get_json('/', headers=headers) + self.assertEqual('v1', data['id']) + # Check fields are not empty + for f in data: + self.assertNotIn(f, ['', []]) + # Check if all known resources are present and there are no extra ones. + not_resources = ('id', 'links', 'media_types') + actual_resources = tuple(set(data.keys()) - set(not_resources)) + expected_resources = ('chassis', 'drivers', 'heartbeat', + 'lookup', 'nodes', 'ports') + self.assertEqual(sorted(expected_resources), sorted(actual_resources)) + + self.assertIn({'type': 'application/vnd.openstack.ironic.v1+json', + 'base': 'application/json'}, data['media_types']) diff --git a/ironic/tests/unit/api/v1/test_ramdisk.py b/ironic/tests/unit/api/v1/test_ramdisk.py new file mode 100644 index 000000000..601747ec9 --- /dev/null +++ b/ironic/tests/unit/api/v1/test_ramdisk.py @@ -0,0 +1,172 @@ +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for the API /lookup/ methods. +""" + +import mock +from oslo_config import cfg +from oslo_utils import uuidutils +from six.moves import http_client + +from ironic.api.controllers import base as api_base +from ironic.api.controllers import v1 as api_v1 +from ironic.api.controllers.v1 import ramdisk +from ironic.conductor import rpcapi +from ironic.tests.unit.api import base as test_api_base +from ironic.tests.unit.objects import utils as obj_utils + + +CONF = cfg.CONF + + +class TestLookup(test_api_base.BaseApiTest): + addresses = ['11:22:33:44:55:66', '66:55:44:33:22:11'] + + def setUp(self): + super(TestLookup, self).setUp() + self.node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + provision_state='deploying') + self.node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + provision_state='available') + CONF.set_override('agent_backend', 'statsd', 'metrics') + + def _check_config(self, data): + expected_metrics = { + 'metrics': { + 'backend': 'statsd', + 'prepend_host': CONF.metrics.agent_prepend_host, + 'prepend_uuid': CONF.metrics.agent_prepend_uuid, + 'prepend_host_reverse': + CONF.metrics.agent_prepend_host_reverse, + 'global_prefix': CONF.metrics.agent_global_prefix + }, + 'metrics_statsd': { + 'statsd_host': CONF.metrics_statsd.agent_statsd_host, + 'statsd_port': CONF.metrics_statsd.agent_statsd_port + }, + 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout + } + self.assertEqual(expected_metrics, data['config']) + + def test_nothing_provided(self): + response = self.get_json( + '/lookup', + headers={api_base.Version.string: str(api_v1.MAX_VER)}, + expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + + def test_not_found(self): + response = self.get_json( + '/lookup?addresses=%s' % ','.join(self.addresses), + headers={api_base.Version.string: str(api_v1.MAX_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_old_api_version(self): + obj_utils.create_test_port(self.context, + node_id=self.node.id, + address=self.addresses[1]) + + response = self.get_json( + '/lookup?addresses=%s' % ','.join(self.addresses), + headers={api_base.Version.string: str(api_v1.MIN_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_found_by_addresses(self): + obj_utils.create_test_port(self.context, + node_id=self.node.id, + address=self.addresses[1]) + + data = self.get_json( + '/lookup?addresses=%s' % ','.join(self.addresses), + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertEqual(self.node.uuid, data['node']['uuid']) + self.assertEqual(set(ramdisk._LOOKUP_RETURN_FIELDS) | {'links'}, + set(data['node'])) + self._check_config(data) + + def test_found_by_uuid(self): + data = self.get_json( + '/lookup?addresses=%s&node_uuid=%s' % + (','.join(self.addresses), self.node.uuid), + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertEqual(self.node.uuid, data['node']['uuid']) + self.assertEqual(set(ramdisk._LOOKUP_RETURN_FIELDS) | {'links'}, + set(data['node'])) + self._check_config(data) + + def test_found_by_only_uuid(self): + data = self.get_json( + '/lookup?node_uuid=%s' % self.node.uuid, + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertEqual(self.node.uuid, data['node']['uuid']) + self.assertEqual(set(ramdisk._LOOKUP_RETURN_FIELDS) | {'links'}, + set(data['node'])) + self._check_config(data) + + def test_restrict_lookup(self): + response = self.get_json( + '/lookup?addresses=%s&node_uuid=%s' % + (','.join(self.addresses), self.node2.uuid), + headers={api_base.Version.string: str(api_v1.MAX_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_no_restrict_lookup(self): + CONF.set_override('restrict_lookup', False, 'api') + data = self.get_json( + '/lookup?addresses=%s&node_uuid=%s' % + (','.join(self.addresses), self.node2.uuid), + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertEqual(self.node2.uuid, data['node']['uuid']) + self.assertEqual(set(ramdisk._LOOKUP_RETURN_FIELDS) | {'links'}, + set(data['node'])) + self._check_config(data) + + +@mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for', + lambda *n: 'test-topic') +class TestHeartbeat(test_api_base.BaseApiTest): + def test_old_api_version(self): + response = self.post_json( + '/heartbeat/%s' % uuidutils.generate_uuid(), + {'callback_url': 'url'}, + headers={api_base.Version.string: str(api_v1.MIN_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_node_not_found(self): + response = self.post_json( + '/heartbeat/%s' % uuidutils.generate_uuid(), + {'callback_url': 'url'}, + headers={api_base.Version.string: str(api_v1.MAX_VER)}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + @mock.patch.object(rpcapi.ConductorAPI, 'heartbeat', autospec=True) + def test_ok(self, mock_heartbeat): + node = obj_utils.create_test_node(self.context) + response = self.post_json( + '/heartbeat/%s' % node.uuid, + {'callback_url': 'url'}, + headers={api_base.Version.string: str(api_v1.MAX_VER)}) + self.assertEqual(http_client.ACCEPTED, response.status_int) + self.assertEqual(b'', response.body) + mock_heartbeat.assert_called_once_with(mock.ANY, mock.ANY, + node.uuid, 'url', + topic='test-topic') diff --git a/ironic/tests/unit/api/v1/test_types.py b/ironic/tests/unit/api/v1/test_types.py index 4c3035d91..0c7e5ce45 100644 --- a/ironic/tests/unit/api/v1/test_types.py +++ b/ironic/tests/unit/api/v1/test_types.py @@ -41,6 +41,27 @@ class TestMacAddressType(base.TestCase): types.MacAddressType.validate, 'invalid-mac') +class TestListOfMacAddressesType(base.TestCase): + + def test_valid_mac_addr(self): + test_mac = 'aa:bb:cc:11:22:33' + self.assertEqual([test_mac], + types.ListOfMacAddressesType.validate(test_mac)) + + def test_valid_list(self): + test_mac = 'aa:bb:cc:11:22:33,11:22:33:44:55:66' + self.assertEqual( + sorted(test_mac.split(',')), + sorted(types.ListOfMacAddressesType.validate(test_mac))) + + def test_invalid_mac_addr(self): + self.assertRaises(exception.InvalidMAC, + types.ListOfMacAddressesType.validate, 'invalid-mac') + self.assertRaises(exception.InvalidMAC, + types.ListOfMacAddressesType.validate, + 'aa:bb:cc:11:22:33,invalid-mac') + + class TestUuidType(base.TestCase): def test_valid_uuid(self): diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 78791bc58..7cbc02956 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -132,7 +132,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): 'statsd_host': CONF.metrics_statsd.agent_statsd_host, 'statsd_port': CONF.metrics_statsd.agent_statsd_port }, - 'heartbeat_timeout': CONF.agent.heartbeat_timeout + 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout } find_mock.return_value = self.node diff --git a/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml b/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml new file mode 100644 index 000000000..f4ec0cd6f --- /dev/null +++ b/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml @@ -0,0 +1,17 @@ +--- +features: + - New API endpoint for deploy ramdisk lookup ``/v1/lookup``. + This endpoint is not authenticated to allow ramdisks to access it without + passing the credentials to them. + - New API endpoint for deploy ramdisk heartbeat ``/v1/heartbeat/``. + This endpoint is not authenticated to allow ramdisks to access it without + passing the credentials to them. +deprecations: + - The configuration option ``[agent]heartbeat_timeout`` was renamed to + ``[api]ramdisk_heartbeat_timeout``. The old variant is deprecated. +upgrade: + - A new configuration option ``[api]restrict_lookup`` is added, which + restricts the lookup API (normally only used by ramdisks) to only work when + the node is in specific states used by the ramdisk, and defaults to True. + Operators that need this endpoint to work in any state may set this to + False, though this is insecure and should not be used in normal operation. From 7eb2b4aa0ff06ebe29da433b3ddd847761f0c98a Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Fri, 5 Aug 2016 07:55:57 -0400 Subject: [PATCH 143/166] Remove oslo-incubator references This drops compatibility for icehouse-era RPC backend configuration, as well as references to openstack.common in a docstring and an example traceback. Change-Id: I9603fcf0f78066f188d4852d2a815c0abe12744d --- ironic/common/context.py | 2 +- ironic/common/rpc.py | 5 ----- ironic/tests/unit/api/test_hooks.py | 4 ++-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/ironic/common/context.py b/ironic/common/context.py index ffe11bd66..131a94232 100644 --- a/ironic/common/context.py +++ b/ironic/common/context.py @@ -16,7 +16,7 @@ from oslo_context import context class RequestContext(context.RequestContext): - """Extends security contexts from the OpenStack common library.""" + """Extends security contexts from the oslo.context library.""" def __init__(self, auth_token=None, domain_id=None, domain_name=None, user=None, tenant=None, is_admin=False, is_public_api=False, diff --git a/ironic/common/rpc.py b/ironic/common/rpc.py index dbf702bb9..7ea30e11b 100644 --- a/ironic/common/rpc.py +++ b/ironic/common/rpc.py @@ -30,12 +30,7 @@ ALLOWED_EXMODS = [ ] EXTRA_EXMODS = [] -# NOTE(lucasagomes): The ironic.openstack.common.rpc entries are for -# backwards compat with IceHouse rpc_backend configuration values. TRANSPORT_ALIASES = { - 'ironic.openstack.common.rpc.impl_kombu': 'rabbit', - 'ironic.openstack.common.rpc.impl_qpid': 'qpid', - 'ironic.openstack.common.rpc.impl_zmq': 'zmq', 'ironic.rpc.impl_kombu': 'rabbit', 'ironic.rpc.impl_qpid': 'qpid', 'ironic.rpc.impl_zmq': 'zmq', diff --git a/ironic/tests/unit/api/test_hooks.py b/ironic/tests/unit/api/test_hooks.py index 7c9a58938..c1a939215 100644 --- a/ironic/tests/unit/api/test_hooks.py +++ b/ironic/tests/unit/api/test_hooks.py @@ -103,9 +103,9 @@ def fake_headers(admin=False): class TestNoExceptionTracebackHook(base.BaseApiTest): TRACE = [u'Traceback (most recent call last):', - u' File "/opt/stack/ironic/ironic/openstack/common/rpc/amqp.py",' + u' File "/opt/stack/ironic/ironic/common/rpc/amqp.py",' ' line 434, in _process_data\\n **args)', - u' File "/opt/stack/ironic/ironic/openstack/common/rpc/' + u' File "/opt/stack/ironic/ironic/common/rpc/' 'dispatcher.py", line 172, in dispatch\\n result =' ' getattr(proxyobj, method)(ctxt, **kwargs)'] MSG_WITHOUT_TRACE = "Test exception message." From a58059304ffbdcf47b18e3cd1ed8c8154d253420 Mon Sep 17 00:00:00 2001 From: Gleb Stepanov Date: Fri, 5 Aug 2016 15:49:54 +0300 Subject: [PATCH 144/166] Fix typo Fix typo in word from deactive to deactivated. Change-Id: Ia682245445849ee8dffb2e2756b9503ca0f47f51 --- ironic/drivers/modules/ipmitool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py index fbc031237..5fe3b63ef 100644 --- a/ironic/drivers/modules/ipmitool.py +++ b/ironic/drivers/modules/ipmitool.py @@ -1248,8 +1248,8 @@ class IPMISocatConsole(IPMIConsole): self._exec_stop_console(driver_info) except OSError: # We need to drop any existing sol sessions with sol deactivate. - # OSError is raised when sol session is deactive, so we can - # ignore it. + # OSError is raised when sol session is already deactivated, + # so we can ignore it. pass self._start_console(driver_info, console_utils.start_socat_console) From cd7507f04b309383629ef83b7fec478128918ec1 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 29 Jun 2016 16:47:16 +0100 Subject: [PATCH 145/166] Collect deployment logs from IPA This patch adds the code to collect the deployment logs from the IPA ramdisk. The logs can be collect for every deployment, upon a failure or never. By default, logs are collected upon a failure. After collection, logs can be storaged either in the local filesystem (default) or in Swift. If an error occurs when the logs are being collected, storaged or if the ramdisk does not support the collect_system_logs command Ironic will log an error message, but the deployment will proceed. Documentation on how to enable and other configuration will be done on a subsequent patch. Partial-Bug: #1587143 Change-Id: I6da1110daa94ea25670f71f9862e51cc9bbc6f93 --- etc/ironic/ironic.conf.sample | 35 ++++- ironic/conf/agent.py | 29 +++- ironic/drivers/modules/agent_base_vendor.py | 5 + ironic/drivers/modules/agent_client.py | 7 + ironic/drivers/modules/deploy_utils.py | 4 + ironic/drivers/utils.py | 109 ++++++++++++++ .../drivers/modules/oneview/test_vendor.py | 5 +- .../tests/unit/drivers/modules/test_agent.py | 13 +- .../drivers/modules/test_agent_base_vendor.py | 46 ++++-- .../unit/drivers/modules/test_deploy_utils.py | 23 ++- .../unit/drivers/modules/test_iscsi_deploy.py | 23 ++- ironic/tests/unit/drivers/test_utils.py | 141 ++++++++++++++++++ ...lect-deployment-logs-2ec1634847c3f6a5.yaml | 12 ++ 13 files changed, 422 insertions(+), 30 deletions(-) create mode 100644 releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 0ada7a9ff..bcf2f3448 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -417,6 +417,33 @@ # (string value) #agent_api_version = v1 +# Whether Ironic should collect the deployment logs on +# deployment failure (on_failure), always or never. (string +# value) +# Allowed values: always, on_failure, never +#deploy_logs_collect = on_failure + +# The name of the storage backend where the logs will be +# stored. (string value) +# Allowed values: local, swift +#deploy_logs_storage_backend = local + +# The path to the directory where the logs should be stored, +# used when the deploy_logs_storage_backend is configured to +# "local". (string value) +#deploy_logs_local_path = /var/log/ironic/deploy + +# The name of the Swift container to store the logs, used when +# the deploy_logs_storage_backend is configured to "swift". +# (string value) +#deploy_logs_swift_container = ironic_deploy_logs_container + +# Number of days before a log object is marked as expired in +# Swift. If None, the logs will be kept forever or until +# manually deleted. Used when the deploy_logs_storage_backend +# is configured to "swift". (integer value) +#deploy_logs_swift_days_to_expire = 30 + [amt] @@ -1929,10 +1956,6 @@ # value) #max_polling_attempts = 12 -# Period (in seconds) for periodic tasks to be executed. -# (integer value) -#periodic_check_interval = 300 - # Whether to enable the periodic tasks for OneView driver be # aware when OneView hardware resources are taken and released # by Ironic or OneView users and proactively manage nodes in @@ -1940,6 +1963,10 @@ # hardware resources allocation in OneView. (boolean value) #enable_periodic_tasks = true +# Period (in seconds) for periodic tasks to be executed when +# enable_periodic_tasks=True. (integer value) +#periodic_check_interval = 300 + [oslo_concurrency] diff --git a/ironic/conf/agent.py b/ironic/conf/agent.py index 9555ca973..88dd1873a 100644 --- a/ironic/conf/agent.py +++ b/ironic/conf/agent.py @@ -59,7 +59,34 @@ opts = [ cfg.StrOpt('agent_api_version', default='v1', help=_('API version to use for communicating with the ramdisk ' - 'agent.')) + 'agent.')), + cfg.StrOpt('deploy_logs_collect', + choices=['always', 'on_failure', 'never'], + default='on_failure', + help=_('Whether Ironic should collect the deployment logs on ' + 'deployment failure (on_failure), always or never.')), + cfg.StrOpt('deploy_logs_storage_backend', + choices=['local', 'swift'], + default='local', + help=_('The name of the storage backend where the logs ' + 'will be stored.')), + cfg.StrOpt('deploy_logs_local_path', + default='/var/log/ironic/deploy', + help=_('The path to the directory where the logs should be ' + 'stored, used when the deploy_logs_storage_backend ' + 'is configured to "local".')), + cfg.StrOpt('deploy_logs_swift_container', + default='ironic_deploy_logs_container', + help=_('The name of the Swift container to store the logs, ' + 'used when the deploy_logs_storage_backend is ' + 'configured to "swift".')), + cfg.IntOpt('deploy_logs_swift_days_to_expire', + default=30, + help=_('Number of days before a log object is marked as ' + 'expired in Swift. If None, the logs will be kept ' + 'forever or until manually deleted. Used when the ' + 'deploy_logs_storage_backend is configured to ' + '"swift".')), ] diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index 51a6fc40e..dfdcb5600 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -41,6 +41,7 @@ from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils +from ironic.drivers import utils as driver_utils from ironic import objects LOG = log.getLogger(__name__) @@ -533,6 +534,10 @@ class AgentDeployMixin(object): return task.driver.power.get_power_state(task) node = task.node + + if CONF.agent.deploy_logs_collect == 'always': + driver_utils.collect_ramdisk_logs(node) + # Whether ironic should power off the node via out-of-band or # in-band methods oob_power_off = strutils.bool_from_string( diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py index 06ffe6569..6f3b4fba7 100644 --- a/ironic/drivers/modules/agent_client.py +++ b/ironic/drivers/modules/agent_client.py @@ -229,3 +229,10 @@ class AgentClient(object): method='standby.sync', params={}, wait=True) + + def collect_system_logs(self, node): + """Collect and package diagnostic and support data from the ramdisk.""" + return self._command(node=node, + method='log.collect_system_logs', + params={}, + wait=True) diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 61da5b12a..9b5dc0c71 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -474,6 +474,10 @@ def set_failed_state(task, msg): :param msg: the message to set in last_error of the node. """ node = task.node + + if CONF.agent.deploy_logs_collect in ('on_failure', 'always'): + driver_utils.collect_ramdisk_logs(node) + try: task.process_event('fail') except exception.InvalidState: diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py index c9a447b27..60fe13fc1 100644 --- a/ironic/drivers/utils.py +++ b/ironic/drivers/utils.py @@ -12,18 +12,29 @@ # License for the specific language governing permissions and limitations # under the License. +import base64 +import os +import tempfile + +from oslo_config import cfg from oslo_log import log as logging +from oslo_utils import timeutils import six from ironic.common import exception from ironic.common.i18n import _ +from ironic.common.i18n import _LE from ironic.common.i18n import _LW +from ironic.common import swift from ironic.conductor import utils from ironic.drivers import base +from ironic.drivers.modules import agent_client LOG = logging.getLogger(__name__) +CONF = cfg.CONF + class MixinVendorInterface(base.VendorInterface): """Wrapper around multiple VendorInterfaces.""" @@ -251,3 +262,101 @@ def normalize_mac(mac): :return: Normalized MAC address string. """ return mac.replace('-', '').replace(':', '').lower() + + +def get_ramdisk_logs_file_name(node): + """Construct the log file name. + + :param node: A node object. + :returns: The log file name. + """ + timestamp = timeutils.utcnow().strftime('%Y-%m-%d-%H:%M:%S') + file_name_fields = [node.uuid] + if node.instance_uuid: + file_name_fields.append(node.instance_uuid) + + file_name_fields.append(timestamp) + return '_'.join(file_name_fields) + '.tar.gz' + + +def store_ramdisk_logs(node, logs): + """Store the ramdisk logs. + + This method stores the ramdisk logs according to the configured + storage backend. + + :param node: A node object. + :param logs: A gzipped and base64 encoded string containing the + logs archive. + :raises: OSError if the directory to save the logs cannot be created. + :raises: IOError when the logs can't be saved to the local file system. + :raises: SwiftOperationError, if any operation with Swift fails. + + """ + logs_file_name = get_ramdisk_logs_file_name(node) + data = base64.b64decode(logs) + + if CONF.agent.deploy_logs_storage_backend == 'local': + if not os.path.exists(CONF.agent.deploy_logs_local_path): + os.makedirs(CONF.agent.deploy_logs_local_path) + + log_path = os.path.join(CONF.agent.deploy_logs_local_path, + logs_file_name) + with open(log_path, 'wb') as f: + f.write(data) + + elif CONF.agent.deploy_logs_storage_backend == 'swift': + with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as f: + f.write(data) + f.flush() + + # convert days to seconds + timeout = CONF.agent.deploy_logs_swift_days_to_expire * 86400 + object_headers = {'X-Delete-After': timeout} + swift_api = swift.SwiftAPI() + swift_api.create_object( + CONF.agent.deploy_logs_swift_container, logs_file_name, + f.name, object_headers=object_headers) + + +def collect_ramdisk_logs(node): + """Collect and store the system logs from the IPA ramdisk. + + Collect and store the system logs from the IPA ramdisk. This method + makes a call to the IPA ramdisk to collect the logs and store it + according to the configured storage backend. + + :param node: A node object. + + """ + client = agent_client.AgentClient() + try: + result = client.collect_system_logs(node) + except exception.IronicException as e: + LOG.error(_LE('Failed to invoke collect_system_logs agent command ' + 'for node %(node)s. Error: %(error)s'), + {'node': node.uuid, 'error': e}) + return + + error = result.get('faultstring') + if error is not None: + LOG.error(_LE('Failed to collect logs from the node %(node)s ' + 'deployment. Error: %(error)s'), + {'node': node.uuid, 'error': error}) + return + + try: + store_ramdisk_logs(node, result['command_result']['system_logs']) + except exception.SwiftOperationError as e: + LOG.error(_LE('Failed to store the logs from the node %(node)s ' + 'deployment in Swift. Error: %(error)s'), + {'node': node.uuid, 'error': e}) + except EnvironmentError as e: + LOG.exception(_LE('Failed to store the logs from the node %(node)s ' + 'deployment due a file-system related error. ' + 'Error: %(error)s'), + {'node': node.uuid, 'error': e}) + except Exception as e: + LOG.exception(_LE('Unknown error when storing logs from the node ' + '%(node)s deployment. Error: %(error)s'), + {'node': node.uuid, 'error': e}) diff --git a/ironic/tests/unit/drivers/modules/oneview/test_vendor.py b/ironic/tests/unit/drivers/modules/oneview/test_vendor.py index 8c2da9353..ace029e21 100644 --- a/ironic/tests/unit/drivers/modules/oneview/test_vendor.py +++ b/ironic/tests/unit/drivers/modules/oneview/test_vendor.py @@ -28,6 +28,7 @@ from ironic.drivers.modules import agent_client from ironic.drivers.modules.oneview import power from ironic.drivers.modules.oneview import vendor from ironic.drivers.modules import pxe +from ironic.drivers import utils as driver_utils from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils @@ -152,6 +153,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(power.OneViewPower, 'get_power_state', @@ -160,7 +162,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): spec=types.FunctionType) def test_reboot_and_finish_deploy_power_action_fails( self, power_off_mock, get_power_state_mock, - node_power_action_mock): + node_power_action_mock, collect_ramdisk_logs_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -179,6 +181,7 @@ class TestBaseAgentVendor(db_base.DbTestCase): mock.call(task, states.POWER_OFF)]) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) + collect_ramdisk_logs_mock.assert_called_once_with(task.node) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(power.OneViewPower, 'get_power_state', diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py index 1ad62d52b..8b639f3b8 100644 --- a/ironic/tests/unit/drivers/modules/test_agent.py +++ b/ironic/tests/unit/drivers/modules/test_agent.py @@ -31,6 +31,7 @@ from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import fake from ironic.drivers.modules import pxe +from ironic.drivers import utils as driver_utils from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils @@ -900,6 +901,7 @@ class TestAgentVendor(db_base.DbTestCase): self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(uuid_mock.called) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @@ -912,12 +914,10 @@ class TestAgentVendor(db_base.DbTestCase): @mock.patch('ironic.drivers.modules.agent.AgentVendorInterface' '.check_deploy_success', autospec=True) @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True) - def test_reboot_to_instance_boot_error(self, clean_pxe_mock, - check_deploy_mock, - prepare_mock, power_off_mock, - get_power_state_mock, - node_power_action_mock, - uuid_mock): + def test_reboot_to_instance_boot_error( + self, clean_pxe_mock, check_deploy_mock, prepare_mock, + power_off_mock, get_power_state_mock, node_power_action_mock, + uuid_mock, collect_ramdisk_logs_mock): check_deploy_mock.return_value = "Error" uuid_mock.return_value = None self.node.provision_state = states.DEPLOYWAIT @@ -936,6 +936,7 @@ class TestAgentVendor(db_base.DbTestCase): check_deploy_mock.assert_called_once_with(mock.ANY, task.node) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) + collect_ramdisk_logs_mock.assert_called_once_with(task.node) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', autospec=True) diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 78791bc58..5e7f3dd69 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -32,6 +32,7 @@ from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import fake from ironic.drivers.modules import pxe +from ironic.drivers import utils as driver_utils from ironic import objects from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base @@ -582,15 +583,17 @@ class TestHeartbeat(AgentDeployMixinBaseTest): mock_touch.assert_called_once_with(mock.ANY) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', spec=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'power_off', spec=types.FunctionType) - def test_reboot_and_finish_deploy(self, power_off_mock, - get_power_state_mock, - node_power_action_mock): + def test_reboot_and_finish_deploy( + self, power_off_mock, get_power_state_mock, + node_power_action_mock, mock_collect): + cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent') self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -605,7 +608,9 @@ class TestHeartbeat(AgentDeployMixinBaseTest): task, states.POWER_ON) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + mock_collect.assert_called_once_with(task.node) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', @@ -619,7 +624,7 @@ class TestHeartbeat(AgentDeployMixinBaseTest): def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete( self, configure_tenant_net_mock, remove_provisioning_net_mock, power_off_mock, get_power_state_mock, - node_power_action_mock): + node_power_action_mock, mock_collect): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -637,7 +642,9 @@ class TestHeartbeat(AgentDeployMixinBaseTest): configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + self.assertFalse(mock_collect.called) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(agent_client.AgentClient, 'power_off', spec=types.FunctionType) @@ -647,7 +654,7 @@ class TestHeartbeat(AgentDeployMixinBaseTest): 'configure_tenant_networks', spec_set=True, autospec=True) def test_reboot_and_finish_deploy_soft_poweroff_fails( self, configure_tenant_net_mock, remove_provisioning_net_mock, - power_off_mock, node_power_action_mock): + power_off_mock, node_power_action_mock, mock_collect): power_off_mock.side_effect = RuntimeError("boom") self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE @@ -664,7 +671,9 @@ class TestHeartbeat(AgentDeployMixinBaseTest): configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + self.assertFalse(mock_collect.called) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', @@ -677,7 +686,8 @@ class TestHeartbeat(AgentDeployMixinBaseTest): 'configure_tenant_networks', spec_set=True, autospec=True) def test_reboot_and_finish_deploy_get_power_state_fails( self, configure_tenant_net_mock, remove_provisioning_net_mock, - power_off_mock, get_power_state_mock, node_power_action_mock): + power_off_mock, get_power_state_mock, node_power_action_mock, + mock_collect): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -695,7 +705,9 @@ class TestHeartbeat(AgentDeployMixinBaseTest): configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + self.assertFalse(mock_collect.called) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', @@ -704,7 +716,7 @@ class TestHeartbeat(AgentDeployMixinBaseTest): spec=types.FunctionType) def test_reboot_and_finish_deploy_power_action_fails( self, power_off_mock, get_power_state_mock, - node_power_action_mock): + node_power_action_mock, mock_collect): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() @@ -721,12 +733,14 @@ class TestHeartbeat(AgentDeployMixinBaseTest): mock.call(task, states.POWER_OFF)]) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) + mock_collect.assert_called_once_with(task.node) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(agent_client.AgentClient, 'sync', spec=types.FunctionType) def test_reboot_and_finish_deploy_power_action_oob_power_off( - self, sync_mock, node_power_action_mock): + self, sync_mock, node_power_action_mock, mock_collect): # Enable force power off driver_info = self.node.driver_info driver_info['deploy_forces_oob_reboot'] = True @@ -746,13 +760,15 @@ class TestHeartbeat(AgentDeployMixinBaseTest): ]) self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + self.assertFalse(mock_collect.called) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(agent_base_vendor.LOG, 'warning', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(agent_client.AgentClient, 'sync', spec=types.FunctionType) def test_reboot_and_finish_deploy_power_action_oob_power_off_failed( - self, sync_mock, node_power_action_mock, log_mock): + self, sync_mock, node_power_action_mock, log_mock, mock_collect): # Enable force power off driver_info = self.node.driver_info driver_info['deploy_forces_oob_reboot'] = True @@ -779,6 +795,7 @@ class TestHeartbeat(AgentDeployMixinBaseTest): 'Failed to flush the file system prior to hard rebooting the ' 'node %(node)s. Error: %(error)s', {'node': task.node.uuid, 'error': log_error}) + self.assertFalse(mock_collect.called) @mock.patch.object(agent_client.AgentClient, 'install_bootloader', autospec=True) @@ -841,10 +858,12 @@ class TestHeartbeat(AgentDeployMixinBaseTest): try_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK) + @mock.patch.object(agent_client.AgentClient, 'collect_system_logs', + autospec=True) @mock.patch.object(agent_client.AgentClient, 'install_bootloader', autospec=True) def test_configure_local_boot_boot_loader_install_fail( - self, install_bootloader_mock): + self, install_bootloader_mock, collect_logs_mock): install_bootloader_mock.return_value = { 'command_status': 'FAILED', 'command_error': 'boom'} self.node.provision_state = states.DEPLOYING @@ -859,14 +878,18 @@ class TestHeartbeat(AgentDeployMixinBaseTest): install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', efi_system_part_uuid=None) + collect_logs_mock.assert_called_once_with(mock.ANY, task.node) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) + @mock.patch.object(agent_client.AgentClient, 'collect_system_logs', + autospec=True) @mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True) @mock.patch.object(agent_client.AgentClient, 'install_bootloader', autospec=True) def test_configure_local_boot_set_boot_device_fail( - self, install_bootloader_mock, try_set_boot_device_mock): + self, install_bootloader_mock, try_set_boot_device_mock, + collect_logs_mock): install_bootloader_mock.return_value = { 'command_status': 'SUCCESS', 'command_error': None} try_set_boot_device_mock.side_effect = RuntimeError('error') @@ -884,6 +907,7 @@ class TestHeartbeat(AgentDeployMixinBaseTest): efi_system_part_uuid=None) try_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK) + collect_logs_mock.assert_called_once_with(mock.ANY, task.node) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 9a26c3680..db2f74a72 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -39,6 +39,7 @@ from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils as utils from ironic.drivers.modules import image_cache from ironic.drivers.modules import pxe +from ironic.drivers import utils as driver_utils from ironic.tests import base as tests_base from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base @@ -1275,7 +1276,8 @@ class OtherFunctionTestCase(db_base.DbTestCase): else: self.assertFalse(mock_log.called) - def test_set_failed_state(self): + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + def test_set_failed_state(self, mock_collect): exc_state = exception.InvalidState('invalid state') exc_param = exception.InvalidParameterValue('invalid parameter') mock_call = mock.call(mock.ANY) @@ -1290,8 +1292,10 @@ class OtherFunctionTestCase(db_base.DbTestCase): self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), power_value=iter([exc_param] * len(calls)), log_calls=calls) + self.assertEqual(4, mock_collect.call_count) - def test_set_failed_state_no_poweroff(self): + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + def test_set_failed_state_no_poweroff(self, mock_collect): cfg.CONF.set_override('power_off_after_deploy_failure', False, 'deploy') exc_state = exception.InvalidState('invalid state') @@ -1308,6 +1312,21 @@ class OtherFunctionTestCase(db_base.DbTestCase): self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), power_value=iter([exc_param] * len(calls)), log_calls=calls, poweroff=False) + self.assertEqual(4, mock_collect.call_count) + + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + def test_set_failed_state_collect_deploy_logs(self, mock_collect): + for opt in ('always', 'on_failure'): + cfg.CONF.set_override('deploy_logs_collect', opt, 'agent') + self._test_set_failed_state() + mock_collect.assert_called_once_with(mock.ANY) + mock_collect.reset_mock() + + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + def test_set_failed_state_collect_deploy_logs_never(self, mock_collect): + cfg.CONF.set_override('deploy_logs_collect', 'never', 'agent') + self._test_set_failed_state() + self.assertFalse(mock_collect.called) def test_get_boot_option(self): self.node.instance_info = {'capabilities': '{"boot_option": "local"}'} diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py index 56ca5d46b..4defcb9ba 100644 --- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py +++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py @@ -37,6 +37,7 @@ from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import iscsi_deploy from ironic.drivers.modules import pxe +from ironic.drivers import utils as driver_utils from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils @@ -157,12 +158,14 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): mock_unlink.assert_called_once_with('/path/uuid/disk') mock_rmtree.assert_called_once_with('/path/uuid') + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) - def test_continue_deploy_fail(self, deploy_mock, power_mock, - mock_image_cache, mock_disk_layout): + def test_continue_deploy_fail( + self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout, + mock_collect_logs): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.side_effect = exception.InstanceDeployFailure( "test deploy error") @@ -184,13 +187,16 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) + mock_collect_logs.assert_called_once_with(task.node) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy_fail_no_root_uuid_or_disk_id( - self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): + self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout, + mock_collect_logs): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.return_value = {} self.node.provision_state = states.DEPLOYWAIT @@ -211,13 +217,16 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) + mock_collect_logs.assert_called_once_with(task.node) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy_fail_empty_root_uuid( - self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): + self, deploy_mock, power_mock, mock_image_cache, + mock_disk_layout, mock_collect_logs): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.return_value = {'root uuid': ''} self.node.provision_state = states.DEPLOYWAIT @@ -238,6 +247,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) + mock_collect_logs.assert_called_once_with(task.node) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'LOG', autospec=True) @@ -424,7 +434,9 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): agent_client_mock.start_iscsi_target.assert_called_once_with( task.node, expected_iqn, 3260, wipe_disk_metadata=False) - def test_do_agent_iscsi_deploy_start_iscsi_failure(self): + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + def test_do_agent_iscsi_deploy_start_iscsi_failure( + self, mock_collect_logs): agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient) agent_client_mock.start_iscsi_target.return_value = { 'command_status': 'FAILED', 'command_error': 'booom'} @@ -444,6 +456,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase): self.assertEqual(states.DEPLOYFAIL, self.node.provision_state) self.assertEqual(states.ACTIVE, self.node.target_provision_state) self.assertIsNotNone(self.node.last_error) + mock_collect_logs.assert_called_once_with(task.node) @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url') def test_validate_good_api_url(self, mock_get_url): diff --git a/ironic/tests/unit/drivers/test_utils.py b/ironic/tests/unit/drivers/test_utils.py index 9f43efe60..7cb5619ab 100644 --- a/ironic/tests/unit/drivers/test_utils.py +++ b/ironic/tests/unit/drivers/test_utils.py @@ -13,14 +13,22 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime +import os + import mock +from oslo_config import cfg +from oslo_utils import timeutils from ironic.common import driver_factory from ironic.common import exception +from ironic.common import swift from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils +from ironic.drivers.modules import agent_client from ironic.drivers.modules import fake from ironic.drivers import utils as driver_utils +from ironic.tests import base as tests_base from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.objects import utils as obj_utils @@ -237,3 +245,136 @@ class UtilsTestCase(db_base.DbTestCase): mac_raw = u"0A:1B-2C-3D:4F" mac_clean = driver_utils.normalize_mac(mac_raw) self.assertEqual("0a1b2c3d4f", mac_clean) + + +class UtilsRamdiskLogsTestCase(tests_base.TestCase): + + def setUp(self): + super(UtilsRamdiskLogsTestCase, self).setUp() + self.node = obj_utils.get_test_node(self.context) + + @mock.patch.object(timeutils, 'utcnow', autospec=True) + def test_get_ramdisk_logs_file_name(self, mock_utcnow): + mock_utcnow.return_value = datetime.datetime(2000, 1, 1, 0, 0) + name = driver_utils.get_ramdisk_logs_file_name(self.node) + expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_' + '2000-01-01-00:00:00.tar.gz') + self.assertEqual(expected_name, name) + + # with instance_info + instance_uuid = '7a5641ba-d264-424a-a9d7-e2a293ca482b' + node2 = obj_utils.get_test_node( + self.context, instance_uuid=instance_uuid) + name = driver_utils.get_ramdisk_logs_file_name(node2) + expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_' + + instance_uuid + '_2000-01-01-00:00:00.tar.gz') + self.assertEqual(expected_name, name) + + @mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True) + @mock.patch.object(agent_client.AgentClient, + 'collect_system_logs', autospec=True) + def test_collect_ramdisk_logs(self, mock_collect, mock_store): + logs = 'Gary the Snail' + mock_collect.return_value = {'command_result': {'system_logs': logs}} + driver_utils.collect_ramdisk_logs(self.node) + mock_store.assert_called_once_with(self.node, logs) + + @mock.patch.object(driver_utils.LOG, 'error', autospec=True) + @mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True) + @mock.patch.object(agent_client.AgentClient, + 'collect_system_logs', autospec=True) + def test_collect_ramdisk_logs_IPA_command_fail( + self, mock_collect, mock_store, mock_log): + error_str = 'MR. KRABS! I WANNA GO TO BED!' + mock_collect.return_value = {'faultstring': error_str} + driver_utils.collect_ramdisk_logs(self.node) + # assert store was never invoked + self.assertFalse(mock_store.called) + mock_log.assert_called_once_with( + mock.ANY, {'node': self.node.uuid, 'error': error_str}) + + @mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True) + @mock.patch.object(agent_client.AgentClient, + 'collect_system_logs', autospec=True) + def test_collect_ramdisk_logs_storage_command_fail( + self, mock_collect, mock_store): + mock_collect.side_effect = exception.IronicException('boom') + self.assertIsNone(driver_utils.collect_ramdisk_logs(self.node)) + self.assertFalse(mock_store.called) + + @mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True) + @mock.patch.object(agent_client.AgentClient, + 'collect_system_logs', autospec=True) + def _collect_ramdisk_logs_storage_fail( + self, expected_exception, mock_collect, mock_store): + mock_store.side_effect = expected_exception + logs = 'Gary the Snail' + mock_collect.return_value = {'command_result': {'system_logs': logs}} + driver_utils.collect_ramdisk_logs(self.node) + mock_store.assert_called_once_with(self.node, logs) + + @mock.patch.object(driver_utils.LOG, 'exception', autospec=True) + def test_collect_ramdisk_logs_storage_fail_fs(self, mock_log): + error = IOError('boom') + self._collect_ramdisk_logs_storage_fail(error) + mock_log.assert_called_once_with( + mock.ANY, {'node': self.node.uuid, 'error': error}) + self.assertIn('file-system', mock_log.call_args[0][0]) + + @mock.patch.object(driver_utils.LOG, 'error', autospec=True) + def test_collect_ramdisk_logs_storage_fail_swift(self, mock_log): + error = exception.SwiftOperationError('boom') + self._collect_ramdisk_logs_storage_fail(error) + mock_log.assert_called_once_with( + mock.ANY, {'node': self.node.uuid, 'error': error}) + self.assertIn('Swift', mock_log.call_args[0][0]) + + @mock.patch.object(driver_utils.LOG, 'exception', autospec=True) + def test_collect_ramdisk_logs_storage_fail_unkown(self, mock_log): + error = Exception('boom') + self._collect_ramdisk_logs_storage_fail(error) + mock_log.assert_called_once_with( + mock.ANY, {'node': self.node.uuid, 'error': error}) + self.assertIn('Unknown error', mock_log.call_args[0][0]) + + @mock.patch.object(swift, 'SwiftAPI', autospec=True) + @mock.patch.object(driver_utils, + 'get_ramdisk_logs_file_name', autospec=True) + def test_store_ramdisk_logs_swift(self, mock_logs_name, mock_swift): + container_name = 'ironic_test_container' + file_name = 'ironic_test_file.tar.gz' + b64str = 'ZW5jb2RlZHN0cmluZw==\n' + + cfg.CONF.set_override('deploy_logs_storage_backend', 'swift', 'agent') + cfg.CONF.set_override( + 'deploy_logs_swift_container', container_name, 'agent') + cfg.CONF.set_override('deploy_logs_swift_days_to_expire', 1, 'agent') + + mock_logs_name.return_value = file_name + driver_utils.store_ramdisk_logs(self.node, b64str) + + mock_swift.return_value.create_object.assert_called_once_with( + container_name, file_name, mock.ANY, + object_headers={'X-Delete-After': 86400}) + mock_logs_name.assert_called_once_with(self.node) + + @mock.patch.object(os, 'makedirs', autospec=True) + @mock.patch.object(driver_utils, + 'get_ramdisk_logs_file_name', autospec=True) + def test_store_ramdisk_logs_local(self, mock_logs_name, mock_makedirs): + file_name = 'ironic_test_file.tar.gz' + b64str = 'ZW5jb2RlZHN0cmluZw==\n' + log_path = '/foo/bar' + + cfg.CONF.set_override('deploy_logs_local_path', log_path, 'agent') + mock_logs_name.return_value = file_name + + with mock.patch.object(driver_utils, 'open', new=mock.mock_open(), + create=True) as mock_open: + driver_utils.store_ramdisk_logs(self.node, b64str) + + expected_path = os.path.join(log_path, file_name) + mock_open.assert_called_once_with(expected_path, 'wb') + + mock_makedirs.assert_called_once_with(log_path) + mock_logs_name.assert_called_once_with(self.node) diff --git a/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml b/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml new file mode 100644 index 000000000..f2795b5d6 --- /dev/null +++ b/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml @@ -0,0 +1,12 @@ +--- +features: + - Adds support for collecting deployment logs from the IPA + ramdisk. Five new configuration options were added, [agent]/group + deploy_logs_collect, [agent]deploy_logs_storage_backend, + [agent]/deploy_logs_local_path, [agent]/deploy_logs_swift_container and + [agent]/deploy_logs_swift_days_to_expire. +upgrades: + - Collecting logs on deploy failure is enabled by default and the logs + will be saved to the local disk. Operators upgrading may want to disable + this feature, enable some form of rotation for the logs or change the + configuration to store the logs in Swift to avoid disk space problems. From 6cbea412cc6f2f2927880c1ef4be1cb3cd50a967 Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Fri, 5 Aug 2016 09:09:21 -0700 Subject: [PATCH 146/166] Docs: Run py34 tox test before py27 Since running the py27 tox test before the py34 tox test can cause errors about 'db type could not be determined'. Change the docs to show the py34 test first, so hopefully people will run that test first. Change-Id: I59409edc5cab7f58112ebe28e280f0eeda3becd0 --- doc/source/dev/dev-quickstart.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index d8287c691..6df5953ef 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -134,16 +134,16 @@ All unit tests should be run using tox. To run Ironic's entire test suite:: # run all tests (unit under both py27 and py34, and pep8) tox -To run the unit tests under py27 and also run the pep8 tests:: - - # run all tests (unit under py27 and pep8) - tox -epy27 -epep8 - To run the unit tests under py34 and also run the pep8 tests:: # run all tests (unit under py34 and pep8) tox -epy34 -epep8 +To run the unit tests under py27 and also run the pep8 tests:: + + # run all tests (unit under py27 and pep8) + tox -epy27 -epep8 + .. note:: If tests are run under py27 and then run under py34 or py35 the following error may occur:: From fd8192bf3e07d828fa8eeaa7f9602c2a2a3eb16c Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Wed, 13 Apr 2016 13:21:38 -0300 Subject: [PATCH 147/166] OneView driver docs explaining Dynamic Allocation Once patch 286192 (Add Dynamic Allocation feature for the OneView drivers) lands, we need to explain in the OneView driver docs what the Dynamic Allocation feature is and how to use it properly. This patch changes the OneView driver docs explaining what changes in the new allocation paradigm and details about how to use it. Change-Id: I19c69baa6b81f330c37f15c2fa279f93bd65e372 Co-Authored-By: Lilia Sampaio Co-Authored-By: Sinval Vieira Co-Authored-By: Gabriel Bezerra Closes-Bug: #1541096 --- doc/source/drivers/oneview.rst | 180 ++++++++++++++++++++++++++------- 1 file changed, 141 insertions(+), 39 deletions(-) diff --git a/doc/source/drivers/oneview.rst b/doc/source/drivers/oneview.rst index 1760b43da..7f706df0b 100644 --- a/doc/source/drivers/oneview.rst +++ b/doc/source/drivers/oneview.rst @@ -10,8 +10,8 @@ Overview HP OneView [1]_ is a single integrated platform, packaged as an appliance that implements a software-defined approach to managing physical infrastructure. The appliance supports scenarios such as deploying bare metal servers, for -instance. In this context, the ``HP OneView driver`` for Ironic enables the -users of OneView to use Ironic as a bare metal provider to their managed +instance. In this context, the ``HP OneView driver`` for ironic enables the +users of OneView to use ironic as a bare metal provider to their managed physical hardware. Currently there are two OneView drivers: @@ -20,25 +20,42 @@ Currently there are two OneView drivers: * ``agent_pxe_oneview`` The ``iscsi_pxe_oneview`` and ``agent_pxe_oneview`` drivers implement the -core interfaces of an Ironic Driver [2]_, and use the ``python-oneviewclient`` -[3]_ to provide communication between Ironic and OneView through OneView's -Rest API. +core interfaces of an ironic Driver [2]_, and use the ``python-oneviewclient`` +[3]_ to provide communication between ironic and OneView through OneView's +REST API. To provide a bare metal instance there are four components involved in the process: -* Ironic service -* python-oneviewclient -* OneView appliance -* iscsi_pxe_oneview/agent_pxe_oneview driver +* The ironic service +* The ironic driver for OneView, which can be: + * `iscsi_pxe_oneview` or + * `agent_pxe_oneview` +* The python-oneviewclient library +* The OneView appliance -The role of Ironic is to serve as a bare metal provider to OneView's managed +The role of ironic is to serve as a bare metal provider to OneView's managed physical hardware and to provide communication with other necessary OpenStack -services such as Nova and Glance. When Ironic receives a boot request, it -works together with the Ironic OneView driver to access a machine in OneView, +services such as Nova and Glance. When ironic receives a boot request, it +works together with the ironic OneView driver to access a machine in OneView, the ``python-oneviewclient`` being responsible for the communication with the OneView appliance. +The Mitaka version of the ironic OneView drivers only supported what we call +**pre-allocation** of nodes, meaning that resources in OneView are allocated +prior to the node being made available in ironic. This model is deprecated and +will be supported until OpenStack's `P` release. From the Newton release on, +OneView drivers enables a new feature called **dynamic allocation** of nodes +[6]_. In this model, the driver allocates resources in OneView only at boot +time, allowing idle resources in ironic to be used by OneView users, enabling +actual resource sharing among ironic and OneView users. + +Since OneView can claim nodes in ``available`` state at any time, a set of +tasks runs periodically to detect nodes in use by OneView. A node in use by +OneView is placed in ``manageable`` state and has maintenance mode set. Once +the node is no longer in use, these tasks will make place them back in +``available`` state and clear maintenance mode. + Prerequisites ============= @@ -51,13 +68,13 @@ The following requirements apply for both ``iscsi_pxe_oneview`` and Minimum version supported is 2.0. * ``python-oneviewclient`` is a python package containing a client to manage - the communication between Ironic and OneView. + the communication between ironic and OneView. Install the ``python-oneviewclient`` module to enable the communication. - Minimum version required is 2.0.2 but it is recommended to install the most + Minimum version required is 2.4.0 but it is recommended to install the most up-to-date version.:: - $ pip install "python-oneviewclient<3.0.0,>=2.0.2" + $ pip install "python-oneviewclient<3.0.0,>=2.4.0" Tested platforms ================ @@ -71,11 +88,13 @@ Tested platforms OneView's ServerProfile. It has been tested with the following servers: - Proliant BL460c Gen8 + - Proliant BL460c Gen9 - Proliant BL465c Gen8 - Proliant DL360 Gen9 (starting with python-oneviewclient 2.1.0) - Notice here that to the driver work correctly with Gen8 and Gen9 DL servers - in general, the hardware also needs to run version 4.2.3 of iLO, with Redfish. + Notice that for the driver to work correctly with Gen8 and Gen9 DL servers + in general, the hardware also needs to run version 4.2.3 of iLO, with + Redfish enabled. Drivers ======= @@ -91,15 +110,28 @@ Overview Configuring and enabling the driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -1. Add ``iscsi_pxe_oneview`` to the list of ``enabled_drivers`` in - ``/etc/ironic/ironic.conf``. For example:: +1. Add ``iscsi_pxe_oneview`` to the list of ``enabled_drivers`` in your + ``ironic.conf`` file. For example:: enabled_drivers = iscsi_pxe_oneview 2. Update the [oneview] section of your ``ironic.conf`` file with your OneView credentials and CA certificate files information. -3. Restart the Ironic conductor service. For Ubuntu users, do:: +.. note:: + If you are using the deprecated ``pre-allocation`` feature (i.e.: + ``dynamic_allocation`` is set to False on all nodes), you can disable the + driver periodic tasks by setting ``enable_periodic_tasks=false`` on the + [oneview] section of ``ironic.conf`` + +.. note:: + An operator can set the ``periodic_check_interval`` option in the [oneview] + section to set the interval between running the periodic check. The default + value is 300 seconds (5 minutes). A lower value will reduce the likelyhood + of races between ironic and OneView at the cost of being more resource + intensive. + +3. Restart the ironic conductor service. For Ubuntu users, do:: $ sudo service ironic-conductor restart @@ -112,10 +144,10 @@ Here is an overview of the deploy process for this driver: 1. Admin configures the Proliant baremetal node to use ``iscsi_pxe_oneview`` driver. -2. Ironic gets a request to deploy a Glance image on the baremetal node. +2. ironic gets a request to deploy a Glance image on the baremetal node. 3. Driver sets the boot device to PXE. 4. Driver powers on the baremetal node. -5. Ironic downloads the deploy and user images from a TFTP server. +5. ironic downloads the deploy and user images from a TFTP server. 6. Driver reboots the baremetal node. 7. User image is now deployed. 8. Driver powers off the machine. @@ -134,15 +166,28 @@ Overview Configuring and enabling the driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -1. Add ``agent_pxe_oneview`` to the list of ``enabled_drivers`` in - ``/etc/ironic/ironic.conf``. For example:: +1. Add ``agent_pxe_oneview`` to the list of ``enabled_drivers`` in your + ``ironic.conf``. For example:: enabled_drivers = fake,pxe_ssh,pxe_ipmitool,agent_pxe_oneview 2. Update the [oneview] section of your ``ironic.conf`` file with your OneView credentials and CA certificate files information. -3. Restart the Ironic conductor service. For Ubuntu users, do:: +.. note:: + If you are using the deprecated ``pre-allocation`` feature (i.e.: + ``dynamic_allocation`` is set to False on all nodes), you can disable the + driver periodic tasks by setting ``enable_periodic_tasks=false`` on the + [oneview] section of ``ironic.conf`` + +.. note:: + An operator can set the ``periodic_check_interval`` option in the [oneview] + section to set the interval between running the periodic check. The default + value is 300 seconds (5 minutes). A lower value will reduce the likelyhood + of races between ironic and OneView at the cost of being more resource + intensive. + +3. Restart the ironic conductor service. For Ubuntu users, do:: $ service ironic-conductor restart @@ -155,7 +200,7 @@ Here is an overview of the deploy process for this driver: 1. Admin configures the Proliant baremetal node to use ``agent_pxe_oneview`` driver. -2. Ironic gets a request to deploy a Glance image on the baremetal node. +2. ironic gets a request to deploy a Glance image on the baremetal node. 3. Driver sets the boot device to PXE. 4. Driver powers on the baremetal node. 5. Node downloads the agent deploy images. @@ -167,7 +212,7 @@ Here is an overview of the deploy process for this driver: 11. Driver powers on the machine. 12. Baremetal node is active and ready to be used. -Registering a OneView node in Ironic +Registering a OneView node in ironic ==================================== Nodes configured to use any of the OneView drivers should have the ``driver`` @@ -181,6 +226,12 @@ etc. In this case, to be enrolled, the node must have the following parameters: - ``server_hardware_uri``: URI of the Server Hardware on OneView. + - ``dynamic_allocation``: Boolean value to enable or disable (True/False) + ``dynamic allocation`` for the given node. If this parameter is not set, + the driver will consider the ``pre-allocation`` model to maintain + compatibility on ironic upgrade. The support for this key will be dropped + in P, where only dynamic allocation will be used. + * In ``properties/capabilities`` - ``server_hardware_type_uri``: URI of the Server Hardware Type of the @@ -205,26 +256,77 @@ OneView node, do:: $ ironic node-update $NODE_UUID add \ properties/capabilities=server_hardware_type_uri:$SHT_URI,enclosure_group_uri:$EG_URI,server_profile_template_uri=$SPT_URI -In order to deploy, a Server Profile consistent with the Server Profile -Template of the node MUST be applied to the Server Hardware it represents. -Server Profile Templates and Server Profiles to be utilized for deployments -MUST have configuration such that its **first Network Interface** ``boot`` -property is set to "Primary" and connected to Ironic's provisioning network. +In order to deploy, ironic will create and apply, at boot time, a Server +Profile based on the Server Profile Template specified on the node to the +Server Hardware it represents on OneView. The URI of such Server Profile will +be stored in ``driver_info.applied_server_profile_uri`` field while the Server +is allocated to ironic. -To tell Ironic which NIC should be connected to the provisioning network, do:: +The Server Profile Templates and, therefore, the Server Profiles derived from +them MUST comply with the following requirements: + +* The option `MAC Address` in the `Advanced` section of Server Profile/Server + Profile Template should be set to `Physical` option; +* Their first `Connection` interface should be: + + * Connected to ironic's provisioning network and; + * The `Boot` option should be set to primary. + +Node ports should be created considering the **MAC address of the first +Interface** of the given Server Hardware. + +.. note:: + Old versions of ironic using ``pre-allocation`` model (before Newton + release) and nodes with `dynamic_allocation` flag disabled shall have their + Server Profiles applied during node enrollment and can have their ports + created using the `Virtual` MAC addresses provided on Server Profile + application. + +To tell ironic which NIC should be connected to the provisioning network, do:: $ ironic port-create -n $NODE_UUID -a $MAC_ADDRESS -For more information on the enrollment process of an Ironic node, see [4]_. +For more information on the enrollment process of an ironic node, see [4]_. -For more information on the definitions of ``Server Hardware``, -``Server Profile``, ``Server Profile Template`` and many other OneView -entities, see [1]_ or browse Help in your OneView appliance menu. +For more information on the definitions of ``Server Hardware``, ``Server +Profile``, ``Server Profile Template`` and other OneView entities, refer to +[1]_ or browse Help in your OneView appliance menu. + +3rd Party Tools +=============== + +In order to ease user manual tasks, which are often time-consuming, we provide +useful tools that work nicely with the OneView drivers. + +ironic-oneview-cli +^^^^^^^^^^^^^^^^^^ + +The ``ironic-oneView`` CLI is a command line interface for management tasks +involving OneView nodes. Its features include a facility to create of ironic +nodes with all required parameters for OneView nodes, creation of Nova flavors +for OneView nodes and, starting from version 0.3.0, the migration of nodes from +``pre-allocation`` to the ``dynamic allocation`` model. + +For more details on how Ironic-OneView CLI works and how to set it up, see +[8]_. + +ironic-oneviewd +^^^^^^^^^^^^^^^ + +The ``ironic-oneviewd`` daemon monitors the ironic inventory of resources and +providing facilities to operators managing OneView driver deployments. The +daemon supports both allocation models (dynamic and pre-allocation) as of +version 0.1.0. + +For more details on how Ironic-OneViewd works and how to set it up, see [7]_. References ========== -.. [1] HP OneView - http://www8.hp.com/us/en/business-solutions/converged-systems/oneview.html +.. [1] HP OneView - https://www.hpe.com/us/en/integrated-systems/software.html .. [2] Driver interfaces - http://docs.openstack.org/developer/ironic/dev/architecture.html#drivers .. [3] python-oneviewclient - https://pypi.python.org/pypi/python-oneviewclient .. [4] Enrollment process of a node - http://docs.openstack.org/developer/ironic/deploy/install-guide.html#enrollment-process -.. [5] Ironic install guide - http://docs.openstack.org/developer/ironic/deploy/install-guide.html#installation-guide +.. [5] ironic install guide - http://docs.openstack.org/developer/ironic/deploy/install-guide.html#installation-guide +.. [6] Dynamic Allocation in OneView drivers - http://specs.openstack.org/openstack/ironic-specs/specs/not-implemented/oneview-drivers-dynamic-allocation.html +.. [7] ironic-oneviewd - https://pypi.python.org/pypi/ironic-oneviewd/ +.. [8] ironic-oneview-cli - https://pypi.python.org/pypi/ironic-oneview-cli/ From 6ad85298c4b33648340f0987256a07d8d650f4aa Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Fri, 5 Aug 2016 14:36:21 -0700 Subject: [PATCH 148/166] Simplify code by using mask_dict_password A patch to oslo.utils was done to make mask_dict_password() work like mask_password(). Using mask_dict_password() makes the code simpler and easier to understand. Change-Id: I3c34234eca11ff9d58ccdbbdc2d0114bdb6efebb --- ironic/api/controllers/v1/node.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 679234009..ca595f792 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import ast import datetime from ironic_lib import metrics_utils @@ -802,9 +801,8 @@ class Node(base.APIBase): bookmark=True)] if not show_password and node.driver_info != wtypes.Unset: - node.driver_info = ast.literal_eval(strutils.mask_password( - node.driver_info, - "******")) + node.driver_info = strutils.mask_dict_password(node.driver_info, + "******") # NOTE(lucasagomes): The numeric ID should not be exposed to # the user, it's internal only. From 93d2ba46ef6bd2d4e677bbd43c066aa5da880f5e Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Sun, 7 Aug 2016 16:01:29 -0700 Subject: [PATCH 149/166] Metrics for agent client These are timing metrics for client calls to the agent. Change-Id: I397cb5902e5d8503d924fe6a6693d8a31dcd2dd6 Related-bug: #1526219 --- ironic/drivers/modules/agent_client.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py index 6f3b4fba7..c8a82f19f 100644 --- a/ironic/drivers/modules/agent_client.py +++ b/ironic/drivers/modules/agent_client.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ironic_lib import metrics_utils from oslo_log import log from oslo_serialization import jsonutils import requests @@ -24,11 +25,14 @@ from ironic.conf import CONF LOG = log.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + DEFAULT_IPA_PORTAL_PORT = 3260 class AgentClient(object): """Client for interacting with nodes via a REST API.""" + @METRICS.timer('AgentClient.__init__') def __init__(self): self.session = requests.Session() self.session.headers.update({'Content-Type': 'application/json'}) @@ -49,6 +53,7 @@ class AgentClient(object): 'params': params, }) + @METRICS.timer('AgentClient._command') def _command(self, node, method, params, wait=False): url = self._get_command_url(node) body = self._get_command_body(method, params) @@ -89,6 +94,7 @@ class AgentClient(object): 'code': response.status_code}) return result + @METRICS.timer('AgentClient.get_commands_status') def get_commands_status(self, node): url = self._get_command_url(node) LOG.debug('Fetching status of agent commands for node %s', node.uuid) @@ -103,6 +109,7 @@ class AgentClient(object): {'node': node.uuid, 'status': status}) return result + @METRICS.timer('AgentClient.prepare_image') def prepare_image(self, node, image_info, wait=False): """Call the `prepare_image` method on the node.""" LOG.debug('Preparing image %(image)s on node %(node)s.', @@ -120,6 +127,7 @@ class AgentClient(object): params=params, wait=wait) + @METRICS.timer('AgentClient.start_iscsi_target') def start_iscsi_target(self, node, iqn, portal_port=DEFAULT_IPA_PORTAL_PORT, wipe_disk_metadata=False): @@ -186,6 +194,7 @@ class AgentClient(object): # required, break from the loop returning the result return result + @METRICS.timer('AgentClient.install_bootloader') def install_bootloader(self, node, root_uuid, efi_system_part_uuid=None): """Install a boot loader on the image.""" params = {'root_uuid': root_uuid, @@ -195,6 +204,7 @@ class AgentClient(object): params=params, wait=True) + @METRICS.timer('AgentClient.get_clean_steps') def get_clean_steps(self, node, ports): params = { 'node': node.as_dict(), @@ -205,6 +215,7 @@ class AgentClient(object): params=params, wait=True) + @METRICS.timer('AgentClient.execute_clean_step') def execute_clean_step(self, step, node, ports): params = { 'step': step, @@ -217,12 +228,14 @@ class AgentClient(object): method='clean.execute_clean_step', params=params) + @METRICS.timer('AgentClient.power_off') def power_off(self, node): """Soft powers off the bare metal node by shutting down ramdisk OS.""" return self._command(node=node, method='standby.power_off', params={}) + @METRICS.timer('AgentClient.sync') def sync(self, node): """Flush file system buffers forcing changed blocks to disk.""" return self._command(node=node, @@ -230,6 +243,7 @@ class AgentClient(object): params={}, wait=True) + @METRICS.timer('AgentClient.collect_system_logs') def collect_system_logs(self, node): """Collect and package diagnostic and support data from the ramdisk.""" return self._command(node=node, From ce19e504ddc0c836fcb1d4431e8216eabf393c1f Mon Sep 17 00:00:00 2001 From: Shivanand Tendulker Date: Wed, 6 Jul 2016 21:26:33 -0700 Subject: [PATCH 150/166] Documentation fixes for iLO SSL Certificate feature This commit updates documentation related to validation of iLO SSL certificate by iLO drivers. Change-Id: Ia06aae7a288451577b14629a8ada9223d3f681a5 Closes-Bug: #1599710 --- doc/source/drivers/ilo.rst | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/doc/source/drivers/ilo.rst b/doc/source/drivers/ilo.rst index bf410642c..2d77a53a2 100644 --- a/doc/source/drivers/ilo.rst +++ b/doc/source/drivers/ilo.rst @@ -289,7 +289,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``ilo_deploy_iso``: The glance UUID of the deploy ramdisk ISO image. -- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. +- ``ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout @@ -297,6 +297,14 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``console_port``: (optional) Node's UDP port for console access. Any unused port on the ironic conductor node may be used. +.. note:: + To update SSL certificates into iLO, you can refer to `HPE Integrated + Lights-Out Security Technology Brief `_. + You can use iLO hostname or IP address as a 'Common Name (CN)' while + generating Certificate Signing Request (CSR). Use the same value as + `ilo_address` while enrolling node to Bare Metal service to avoid SSL + certificate validation errors related to hostname mismatch. + For example, you could run a similar command like below to enroll the ProLiant node:: @@ -427,7 +435,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``ilo_deploy_iso``: The glance UUID of the deploy ramdisk ISO image. -- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. +- ``ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout @@ -435,6 +443,14 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``console_port``: (optional) Node's UDP port for console access. Any unused port on the ironic conductor node may be used. +.. note:: + To update SSL certificates into iLO, you can refer to `HPE Integrated + Lights-Out Security Technology Brief `_. + You can use iLO hostname or IP address as a 'Common Name (CN)' while + generating Certificate Signing Request (CSR). Use the same value as + `ilo_address` while enrolling node to Bare Metal service to avoid SSL + certificate validation errors related to hostname mismatch. + For example, you could run a similar command like below to enroll the ProLiant node:: @@ -547,7 +563,7 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``ilo_password``: Password for the above iLO user. - ``deploy_kernel``: The glance UUID of the deployment kernel. - ``deploy_ramdisk``: The glance UUID of the deployment ramdisk. -- ``ilo_ca_file``: (optional) CA certificate file to validate iLO. +- ``ca_file``: (optional) CA certificate file to validate iLO. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout @@ -555,6 +571,14 @@ Nodes configured for iLO driver should have the ``driver`` property set to - ``console_port``: (optional) Node's UDP port for console access. Any unused port on the ironic conductor node may be used. +.. note:: + To update SSL certificates into iLO, you can refer to `HPE Integrated + Lights-Out Security Technology Brief `_. + You can use iLO hostname or IP address as a 'Common Name (CN)' while + generating Certificate Signing Request (CSR). Use the same value as + `ilo_address` while enrolling node to Bare Metal service to avoid SSL + certificate validation errors related to hostname mismatch. + For example, you could run a similar command like below to enroll the ProLiant node:: From 4f34242ca1fa32e461cff74ce9bfb6e8f8d2ff84 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Mon, 8 Aug 2016 13:30:39 +0800 Subject: [PATCH 151/166] Rename BaseApiTest.config to app_config TestCase has a method named config(). Then DbTestCase inherits from TestCase, and BaseApiTest inherits from DbTestCase. BaseApiTest also have an attribute named "config" used to configure the pecan app for tests. So when a test class inherited from BaseApiTest calls config(), it gets a "Dict is not callable" exception. This patch rename BaseApiTest.config to BaseApiTest.app_config to avoid this problem. Change-Id: I1b020d0ac8d7068bd029fbb4b44b667ec7f01513 --- ironic/tests/unit/api/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ironic/tests/unit/api/base.py b/ironic/tests/unit/api/base.py index 4d7452486..cfd149865 100644 --- a/ironic/tests/unit/api/base.py +++ b/ironic/tests/unit/api/base.py @@ -65,7 +65,7 @@ class BaseApiTest(base.DbTestCase): # Determine where we are so we can set up paths in the config root_dir = self.path_get() - self.config = { + self.app_config = { 'app': { 'root': 'ironic.api.controllers.root.RootController', 'modules': ['ironic.api'], @@ -74,7 +74,7 @@ class BaseApiTest(base.DbTestCase): 'acl_public_routes': ['/', '/v1'], }, } - return pecan.testing.load_test_app(self.config) + return pecan.testing.load_test_app(self.app_config) def _request_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None, From dcd93a58e4302eec597e83ab983754024c04a5f3 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Fri, 5 Aug 2016 13:52:21 +0300 Subject: [PATCH 152/166] Use devstack test-config phase New phase 'test-config' has been introduced by [0]. 'test-config' is called at the end of devstack used to configure tempest or any other test environments. This patch uses new phase to configure tempest if it is enabled. Reference: [0] https://review.openstack.org/#/c/311797/ Change-Id: I8a6a3aeee89c1c08bfa5e630b1cdd8f8a5fa2007 --- devstack/plugin.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 958b65a6a..30b5d9daa 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -50,7 +50,12 @@ if is_service_enabled ir-api ir-cond; then echo_summary "Starting Ironic" start_ironic prepare_baremetal_basic_ops + + elif [[ "$2" == "test-config" ]]; then + # stack/test-config - Called at the end of devstack used to configure tempest + # or any other test environments if is_service_enabled tempest; then + echo_summary "Configuring Tempest for Ironic needs" ironic_configure_tempest fi fi From 5a3c91b558c5a40ab837dc7dd9f06b1321e2c5aa Mon Sep 17 00:00:00 2001 From: zhangjl Date: Wed, 3 Aug 2016 02:00:17 +0000 Subject: [PATCH 153/166] Fix the mistakes in Installation Guide doc Following the doc in install-guide, some code is out of date. First, according to http://docs.openstack.org/mitaka/install-guide-rdo/, maybe, it`s better to using v3 commands replace the olds in creating endpoints. Second, add xinetd package in install requirements. Closes-bug: #1608916 Signed-off-by: zhangjl Change-Id: I5bb448479156dbc805c7ecae00d2f694850b24c0 --- doc/source/deploy/install-guide.rst | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index cf8804b23..b98560523 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -109,12 +109,22 @@ Configure the Identity service for the Bare Metal service registering the service (above), to create the endpoint, and replace IRONIC_NODE with your Bare Metal service's API node:: + openstack endpoint create --region RegionOne \ + baremetal admin http://IRONIC_NODE:6385 + openstack endpoint create --region RegionOne \ + baremetal public http://IRONIC_NODE:6385 + openstack endpoint create --region RegionOne \ + baremetal internal http://IRONIC_NODE:6385 + + If only keystone v2 API is available, use this command instead:: + openstack endpoint create --region RegionOne \ --publicurl http://IRONIC_NODE:6385 \ --internalurl http://IRONIC_NODE:6385 \ --adminurl http://IRONIC_NODE:6385 \ baremetal + Set up the database for Bare Metal ---------------------------------- @@ -823,10 +833,10 @@ node(s) where ``ironic-conductor`` is running. sudo apt-get install xinetd tftpd-hpa syslinux-common pxelinux Fedora 21/RHEL7/CentOS7: - sudo yum install tftp-server syslinux-tftpboot + sudo yum install tftp-server syslinux-tftpboot xinetd Fedora 22 or higher: - sudo dnf install tftp-server syslinux-tftpboot + sudo dnf install tftp-server syslinux-tftpboot xinetd #. Using xinetd to provide a tftp server setup to serve ``/tftpboot``. Create or edit ``/etc/xinetd.d/tftp`` as below:: From 8af7429d6eec94ed4fedad0185a35b68e0b7dc87 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 2 Aug 2016 16:57:05 -0700 Subject: [PATCH 154/166] Timing metrics: pxe boot and iscsi deploy driver Adds timing metrics for methods in the iscsi_deploy, pxe, and agent_base_vendor modules. Change-Id: Iae4a6652febcbf1ecbab948b2c4d9fa60af1e3a9 Related-bug: #1526219 --- ironic/drivers/modules/agent_base_vendor.py | 13 ++++++++++++ ironic/drivers/modules/iscsi_deploy.py | 20 +++++++++++++++++++ ironic/drivers/modules/pxe.py | 10 ++++++++++ .../drivers/modules/test_agent_base_vendor.py | 14 +++++++------ 4 files changed, 51 insertions(+), 6 deletions(-) diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index ec2092573..cc0a9419f 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -20,6 +20,7 @@ import ast import collections import time +from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils @@ -45,6 +46,8 @@ from ironic import objects LOG = log.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + # This contains a nested dictionary containing the post clean step # hooks registered for each clean step of every interface. # Every key of POST_CLEAN_STEP_HOOKS is an interface and its value @@ -79,6 +82,7 @@ def _get_client(): return client +@METRICS.timer('post_clean_step_hook') def post_clean_step_hook(interface, step): """Decorator method for adding a post clean step hook. @@ -203,6 +207,7 @@ def _get_completed_cleaning_command(task, commands): return last_command +@METRICS.timer('log_and_raise_deployment_error') def log_and_raise_deployment_error(task, msg): """Helper method to log the error and raise exception.""" LOG.error(msg) @@ -244,6 +249,7 @@ class AgentDeployMixin(object): """ + @METRICS.timer('AgentDeployMixin._refresh_clean_steps') def _refresh_clean_steps(self, task): """Refresh the node's cached clean steps from the booted agent. @@ -300,6 +306,7 @@ class AgentDeployMixin(object): LOG.debug('Refreshed agent clean step cache for node %(node)s: ' '%(steps)s', {'node': node.uuid, 'steps': steps}) + @METRICS.timer('AgentDeployMixin.continue_cleaning') def continue_cleaning(self, task, **kwargs): """Start the next cleaning step if the previous one is complete. @@ -433,6 +440,7 @@ class AgentDeployMixin(object): LOG.error(msg) return manager_utils.cleaning_error_handler(task, msg) + @METRICS.timer('AgentDeployMixin.heartbeat') def heartbeat(self, task, callback_url): """Process a heartbeat. @@ -511,6 +519,7 @@ class AgentDeployMixin(object): elif node.provision_state in (states.DEPLOYING, states.DEPLOYWAIT): deploy_utils.set_failed_state(task, last_error) + @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy') def reboot_and_finish_deploy(self, task): """Helper method to trigger reboot on the node and finish deploy. @@ -581,6 +590,7 @@ class AgentDeployMixin(object): task.process_event('done') LOG.info(_LI('Deployment to node %s done'), task.node.uuid) + @METRICS.timer('AgentDeployMixin.prepare_instance_to_boot') def prepare_instance_to_boot(self, task, root_uuid, efi_sys_uuid): """Prepares instance to boot. @@ -605,6 +615,7 @@ class AgentDeployMixin(object): msg = _('Failed to continue agent deployment.') log_and_raise_deployment_error(task, msg) + @METRICS.timer('AgentDeployMixin.configure_local_boot') def configure_local_boot(self, task, root_uuid=None, efi_system_part_uuid=None): """Helper method to configure local boot on the node. @@ -692,6 +703,7 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): 'payload version: %s') % version) + @METRICS.timer('BaseAgentVendor.heartbeat') @base.passthru(['POST']) @task_manager.require_exclusive_lock def heartbeat(self, task, **kwargs): @@ -715,6 +727,7 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): super(BaseAgentVendor, self).heartbeat(task, callback_url) + @METRICS.timer('BaseAgentVendor.lookup') @base.driver_passthru(['POST'], async=False) def lookup(self, context, **kwargs): """Find a matching node for the agent. diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index 3369b2586..cbaac23f6 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -16,6 +16,7 @@ import os from ironic_lib import disk_utils +from ironic_lib import metrics_utils from ironic_lib import utils as ironic_utils from oslo_config import cfg from oslo_log import log as logging @@ -37,6 +38,8 @@ from ironic.drivers.modules import image_cache LOG = logging.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + # NOTE(rameshg87): This file now registers some of opts in pxe group. # This is acceptable for now as a future refactoring into # separate boot and deploy interfaces is planned, and moving config @@ -129,6 +132,7 @@ def _save_disk_layout(node, i_info): node.save() +@METRICS.timer('check_image_size') def check_image_size(task): """Check if the requested image is larger than the root partition size. @@ -147,6 +151,7 @@ def check_image_size(task): raise exception.InstanceDeployFailure(msg) +@METRICS.timer('cache_instance_image') def cache_instance_image(ctx, node): """Fetch the instance's image from Glance @@ -172,6 +177,7 @@ def cache_instance_image(ctx, node): return (uuid, image_path) +@METRICS.timer('destroy_images') def destroy_images(node_uuid): """Delete instance's image file. @@ -182,6 +188,7 @@ def destroy_images(node_uuid): InstanceImageCache().clean_up() +@METRICS.timer('get_deploy_info') def get_deploy_info(node, address, iqn, port=None, lun='1'): """Returns the information required for doing iSCSI deploy in a dictionary. @@ -235,6 +242,7 @@ def get_deploy_info(node, address, iqn, port=None, lun='1'): return params +@METRICS.timer('continue_deploy') def continue_deploy(task, **kwargs): """Resume a deployment upon getting POST data from deploy ramdisk. @@ -306,6 +314,7 @@ def continue_deploy(task, **kwargs): return uuid_dict_returned +@METRICS.timer('do_agent_iscsi_deploy') def do_agent_iscsi_deploy(task, agent_client): """Method invoked when deployed with the agent ramdisk. @@ -375,6 +384,7 @@ def _get_boot_mode(node): return "bios" +@METRICS.timer('validate') def validate(task): """Validates the pre-requisites for iSCSI deploy. @@ -405,6 +415,7 @@ def validate(task): class AgentDeployMixin(agent_base_vendor.AgentDeployMixin): + @METRICS.timer('AgentDeployMixin.continue_deploy') @task_manager.require_exclusive_lock def continue_deploy(self, task): """Method invoked when deployed using iSCSI. @@ -436,6 +447,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): def get_properties(self): return {} + @METRICS.timer('ISCSIDeploy.validate') def validate(self, task): """Validate the deployment information for the task's node. @@ -454,6 +466,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): # broken down into separate boot and deploy implementations. validate(task) + @METRICS.timer('ISCSIDeploy.deploy') @task_manager.require_exclusive_lock def deploy(self, task): """Start deployment of the task's node. @@ -474,6 +487,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): return states.DEPLOYWAIT + @METRICS.timer('ISCSIDeploy.tear_down') @task_manager.require_exclusive_lock def tear_down(self, task): """Tear down a previous deployment on the task's node. @@ -493,6 +507,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): task.driver.network.unconfigure_tenant_networks(task) return states.DELETED + @METRICS.timer('ISCSIDeploy.prepare') @task_manager.require_exclusive_lock def prepare(self, task): """Prepare the deployment environment for this task's node. @@ -523,6 +538,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): deploy_opts = deploy_utils.build_agent_options(node) task.driver.boot.prepare_ramdisk(task, deploy_opts) + @METRICS.timer('ISCSIDeploy.clean_up') def clean_up(self, task): """Clean up the deployment environment for the task's node. @@ -540,6 +556,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): def take_over(self, task): pass + @METRICS.timer('ISCSIDeploy.get_clean_steps') def get_clean_steps(self, task): """Get the list of clean steps from the agent. @@ -555,6 +572,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): 'erase_devices': CONF.deploy.erase_devices_priority}) return steps + @METRICS.timer('ISCSIDeploy.execute_clean_step') def execute_clean_step(self, task, step): """Execute a clean step asynchronously on the agent. @@ -567,6 +585,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): """ return deploy_utils.agent_execute_clean_step(task, step) + @METRICS.timer('ISCSIDeploy.prepare_cleaning') def prepare_cleaning(self, task): """Boot into the agent to prepare for cleaning. @@ -578,6 +597,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): return deploy_utils.prepare_inband_cleaning( task, manage_boot=True) + @METRICS.timer('ISCSIDeploy.tear_down_cleaning') def tear_down_cleaning(self, task): """Clean up the PXE and DHCP files after cleaning. diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py index e31db8c1c..45634a9f6 100644 --- a/ironic/drivers/modules/pxe.py +++ b/ironic/drivers/modules/pxe.py @@ -18,6 +18,7 @@ PXE Boot Interface import os import shutil +from ironic_lib import metrics_utils from ironic_lib import utils as ironic_utils from oslo_config import cfg from oslo_log import log as logging @@ -93,6 +94,8 @@ pxe_opts = [ LOG = logging.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + CONF = cfg.CONF CONF.register_opts(pxe_opts, group='pxe') CONF.import_opt('deploy_callback_timeout', 'ironic.conductor.manager', @@ -240,6 +243,7 @@ def _build_pxe_config_options(task, pxe_info): return pxe_options +@METRICS.timer('validate_boot_option_for_uefi') def validate_boot_option_for_uefi(node): """In uefi boot mode, validate if the boot option is compatible. @@ -263,6 +267,7 @@ def validate_boot_option_for_uefi(node): {'node_uuid': node.uuid}) +@METRICS.timer('validate_boot_option_for_trusted_boot') def validate_boot_parameters_for_trusted_boot(node): """Check if boot parameters are valid for trusted boot.""" boot_mode = deploy_utils.get_boot_mode_for_deploy(node) @@ -335,6 +340,7 @@ class PXEBoot(base.BootInterface): """ return COMMON_PROPERTIES + @METRICS.timer('PXEBoot.validate') def validate(self, task): """Validate the PXE-specific info for booting deploy/instance images. @@ -385,6 +391,7 @@ class PXEBoot(base.BootInterface): props = ['kernel', 'ramdisk'] deploy_utils.validate_image_properties(task.context, d_info, props) + @METRICS.timer('PXEBoot.prepare_ramdisk') def prepare_ramdisk(self, task, ramdisk_params): """Prepares the boot of Ironic ramdisk using PXE. @@ -441,6 +448,7 @@ class PXEBoot(base.BootInterface): # the image kernel and ramdisk (Or even require it). _cache_ramdisk_kernel(task.context, node, pxe_info) + @METRICS.timer('PXEBoot.clean_up_ramdisk') def clean_up_ramdisk(self, task): """Cleans up the boot of ironic ramdisk. @@ -461,6 +469,7 @@ class PXEBoot(base.BootInterface): else: _clean_up_pxe_env(task, images_info) + @METRICS.timer('PXEBoot.prepare_instance') def prepare_instance(self, task): """Prepares the boot of instance. @@ -525,6 +534,7 @@ class PXEBoot(base.BootInterface): pxe_utils.clean_up_pxe_config(task) deploy_utils.try_set_boot_device(task, boot_devices.DISK) + @METRICS.timer('PXEBoot.clean_up_instance') def clean_up_instance(self, task): """Cleans up the boot of instance. diff --git a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py index 389b9f0bc..e219c3106 100644 --- a/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py +++ b/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py @@ -911,9 +911,10 @@ class TestHeartbeat(AgentDeployMixinBaseTest): shared=False) as task: self.deploy.prepare_instance_to_boot(task, root_uuid, efi_system_part_uuid) - configure_mock.assert_called_once_with(self.deploy, task, - root_uuid, - efi_system_part_uuid) + configure_mock.assert_called_once_with( + self.deploy, task, + root_uuid=root_uuid, + efi_system_part_uuid=efi_system_part_uuid) boot_option_mock.assert_called_once_with(task.node) prepare_instance_mock.assert_called_once_with(task.driver.boot, task) @@ -945,9 +946,10 @@ class TestHeartbeat(AgentDeployMixinBaseTest): self.assertRaises(exception.InstanceDeployFailure, self.deploy.prepare_instance_to_boot, task, root_uuid, efi_system_part_uuid) - configure_mock.assert_called_once_with(self.deploy, task, - root_uuid, - efi_system_part_uuid) + configure_mock.assert_called_once_with( + self.deploy, task, + root_uuid=root_uuid, + efi_system_part_uuid=efi_system_part_uuid) boot_option_mock.assert_called_once_with(task.node) self.assertFalse(prepare_mock.called) self.assertFalse(failed_state_mock.called) From 2f84daaa6f34ba52e0b5660c2233e9c5ba04b98a Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 1 Jul 2016 14:59:59 +0200 Subject: [PATCH 155/166] Officially deprecate agent passthru classes and API We've introduced the new set of API endpoints for lookup and heartbeat which all drivers should implement now and all ramdisk should use. IPA switched to the new API in the Newton release. The deprecated passthru endpoints and classes will be removed in the Ocata release. Change-Id: If0d189ecdd4b84d1dbac274e6dec200e13dfe37c Depends-On: I7160c99ca63585fc333482fa578fdf5e0962b2b6 Closes-Bug: #1570841 --- ironic/drivers/modules/agent.py | 3 +++ ironic/drivers/modules/agent_base_vendor.py | 6 ++++++ ironic/drivers/modules/iscsi_deploy.py | 8 +++++++- .../notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml | 6 ++++++ 4 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py index bf93c4902..16a7fdcfc 100644 --- a/ironic/drivers/modules/agent.py +++ b/ironic/drivers/modules/agent.py @@ -565,6 +565,9 @@ class AgentVendorInterface(agent_base_vendor.BaseAgentVendor, """Implementation of agent vendor interface. Contains old lookup and heartbeat endpoints currently pending deprecation. + + WARNING: This class is deprecated and will be removed in the Ocata release. + Drivers should stop relying on it. """ diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index d7929596c..20da38299 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -655,6 +655,8 @@ class AgentDeployMixin(object): node.uuid) +# TODO(dtantsur): deprecate and remove it as soon as we stop using it ourselves +# in the Ocata release. class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): def __init__(self): @@ -766,6 +768,10 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): :raises: NotFound if no matching node is found. :raises: InvalidParameterValue with unknown payload version """ + LOG.warning( + _LW('Agent lookup vendor passthru is deprecated and will be ' + 'removed in the Ocata release; please update your ' + 'ironic-python-agent image to the Newton version')) LOG.debug('Agent lookup using data %s', kwargs) uuid = kwargs.get('node_uuid') if uuid: diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py index e43afad81..5e35a24ab 100644 --- a/ironic/drivers/modules/iscsi_deploy.py +++ b/ironic/drivers/modules/iscsi_deploy.py @@ -581,4 +581,10 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface): class VendorPassthru(AgentDeployMixin, agent_base_vendor.BaseAgentVendor): - """Interface to mix IPMI and PXE vendor-specific interfaces.""" + """Interface to mix IPMI and PXE vendor-specific interfaces. + + Contains old lookup and heartbeat endpoints currently pending deprecation. + + WARNING: This class is deprecated and will be removed in the Ocata release. + Drivers should stop relying on it. + """ diff --git a/releasenotes/notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml b/releasenotes/notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml new file mode 100644 index 000000000..757696408 --- /dev/null +++ b/releasenotes/notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - Agent vendor passthru is deprecated and will be removed in Ocata release. + Operators should update their IPA image to the Newton version to use + the new replacement API. Driver developers should stop using the agent + vendor passthru. From 0a8d39a9e9d5b0582bfbbeda7ba0e1f0eaaef49d Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Mon, 8 Aug 2016 09:32:44 -0700 Subject: [PATCH 156/166] Simplify code by using mask_dict_password (again) A patch to oslo.utils was done to make mask_dict_password() work like mask_password(). Using mask_dict_password() makes the code simpler and easier to understand. There was a previous patch (6ad85298c4b33648340f0987256a07d8d650f4aa) similar to this but it missed changing this occurrence. Change-Id: I25233e49555cee9f856c755b63234cb7ab31d6aa --- ironic/drivers/modules/agent_base_vendor.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py index f09228afa..1c4006524 100644 --- a/ironic/drivers/modules/agent_base_vendor.py +++ b/ironic/drivers/modules/agent_base_vendor.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import ast import collections import time @@ -787,8 +786,8 @@ class BaseAgentVendor(AgentDeployMixin, base.VendorInterface): ndict = node.as_dict() if not context.show_password: - ndict['driver_info'] = ast.literal_eval( - strutils.mask_password(ndict['driver_info'], "******")) + ndict['driver_info'] = strutils.mask_dict_password( + ndict['driver_info'], "******") return { # heartbeat_timeout is a config, so moving it into the From a5291b91002830c5738f9725353673ead2395e5c Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Mon, 8 Aug 2016 13:45:43 +0800 Subject: [PATCH 157/166] Trivial: Remove useless function call in glance service test Change-Id: I060ccd687c85ed4b44c0904b7343a5f05bc77679 --- ironic/tests/unit/common/test_glance_service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ironic/tests/unit/common/test_glance_service.py b/ironic/tests/unit/common/test_glance_service.py index 5cdc5bb31..951bfde2f 100644 --- a/ironic/tests/unit/common/test_glance_service.py +++ b/ironic/tests/unit/common/test_glance_service.py @@ -705,7 +705,6 @@ class TestGlanceSwiftTempURL(base.TestCase): group='glance') self.config(swift_store_multiple_containers_seed=0, group='glance') - self.config() self.fake_image = { 'id': '757274c4-2856-4bd2-bb20-9a4a231e187b' } From 600e2e4057bd8ff807895f55d4b9435c9d7eedaa Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 9 Aug 2016 08:47:16 +0300 Subject: [PATCH 158/166] Update devstack configure_ironic_ssh_keypair When authorized_file doesn't contain new-line character at the end, configure_ironic_ssh_keypair() just append new key to the existed line, as result both keys becomes corrupted. The patch adds a check for last character in the authorized_key file, if new-line is missed add it. Change-Id: I64a7bba5f3c7dee76ae2faf659af1231280f2b8b Closes-Bug: #1611207 --- devstack/lib/ironic | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/devstack/lib/ironic b/devstack/lib/ironic index c6e4468c9..c49995443 100644 --- a/devstack/lib/ironic +++ b/devstack/lib/ironic @@ -1184,6 +1184,10 @@ function configure_ironic_ssh_keypair { fi echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE fi + # NOTE(vsaienko) check for new line character, add if doesn't exist. + if [[ "$(tail -c1 $IRONIC_AUTHORIZED_KEYS_FILE | wc -l)" == "0" ]]; then + echo "" >> $IRONIC_AUTHORIZED_KEYS_FILE + fi cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE # remove duplicate keys. sort -u -o $IRONIC_AUTHORIZED_KEYS_FILE $IRONIC_AUTHORIZED_KEYS_FILE From 2f94f9eb85226c5750f35051e055de9379be4ffa Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 9 Aug 2016 10:25:15 +0000 Subject: [PATCH 159/166] Updated from global requirements Change-Id: Ibf40a95bb193c74acd1c2e320d7b27876b37b308 --- requirements.txt | 2 +- test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 7a253909d..fc3704119 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ pysendfile>=2.0.0 # MIT websockify>=0.8.0 # LGPLv3 oslo.concurrency>=3.8.0 # Apache-2.0 oslo.config>=3.14.0 # Apache-2.0 -oslo.context!=2.6.0,>=2.4.0 # Apache-2.0 +oslo.context>=2.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.rootwrap>=5.0.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 4eb4e4048..86db1716d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -26,4 +26,4 @@ sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 sphinxcontrib-seqdiag # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=1.8.0 # Apache2 -os-api-ref>=0.1.0 # Apache-2.0 +os-api-ref>=0.4.0 # Apache-2.0 From 8c842ec50574cf72fe00cbfb7675cbdf826377f4 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 1 Aug 2016 15:40:08 +0200 Subject: [PATCH 160/166] Deprecate putting periodic tasks on a driver object This approach will no longer make sense when the driver composition is in effect. Anyway usually a better place to put a task is an interface. Change-Id: I7096e428ce9774d89ac624c2d38bb23984a4b842 Related-Bug: #1524745 --- doc/source/dev/architecture.rst | 17 +++++++---------- ironic/conductor/base_manager.py | 2 ++ ...periodic-tasks-drivers-ae9cddab88b546c6.yaml | 5 +++++ 3 files changed, 14 insertions(+), 10 deletions(-) create mode 100644 releasenotes/notes/periodic-tasks-drivers-ae9cddab88b546c6.yaml diff --git a/doc/source/dev/architecture.rst b/doc/source/dev/architecture.rst index f753588e6..c4bf63999 100644 --- a/doc/source/dev/architecture.rst +++ b/doc/source/dev/architecture.rst @@ -76,8 +76,8 @@ Driver-Specific Periodic Tasks ------------------------------ Drivers may run their own periodic tasks, i.e. actions run repeatedly after -a certain amount of time. Such task is created by decorating a method on the -driver itself or on any interface with periodic_ decorator, e.g. +a certain amount of time. Such task is created by decorating a method on +an interface with periodic_ decorator, e.g. :: @@ -88,18 +88,15 @@ driver itself or on any interface with periodic_ decorator, e.g. def task(self, manager, context): pass # do something - class FakeDriver(base.BaseDriver): - def __init__(self): - self.power = FakePower() - - @periodics.periodic(spacing=42) - def task2(self, manager, context): - pass # do something - Here the ``spacing`` argument is a period in seconds for a given periodic task. For example 'spacing=5' means every 5 seconds. +.. note:: + As of the Newton release, it's possible to bind periodic tasks to a driver + object instead of an interface. This is deprecated and support for it will + be removed in the Ocata release. + Message Routing =============== diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py index d1c2b1261..cf9176a6f 100644 --- a/ironic/conductor/base_manager.py +++ b/ironic/conductor/base_manager.py @@ -110,6 +110,8 @@ class BaseConductorManager(object): periodic_task_classes = set() self._collect_periodic_tasks(self, (admin_context,)) for driver_obj in drivers.values(): + # TODO(dtantsur): collecting tasks from driver objects is + # deprecated and should be removed in Ocata. self._collect_periodic_tasks(driver_obj, (self, admin_context)) for iface_name in driver_obj.all_interfaces: iface = getattr(driver_obj, iface_name, None) diff --git a/releasenotes/notes/periodic-tasks-drivers-ae9cddab88b546c6.yaml b/releasenotes/notes/periodic-tasks-drivers-ae9cddab88b546c6.yaml new file mode 100644 index 000000000..0c9f7bd7a --- /dev/null +++ b/releasenotes/notes/periodic-tasks-drivers-ae9cddab88b546c6.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - Putting periodic tasks on a driver object (rather than interface) is + deprecated. Driver developers should move periodic tasks from driver + objects to interface objects. From 054eeb65280749fd1a8be2c83e81a4a02cf7ad45 Mon Sep 17 00:00:00 2001 From: Gleb Stepanov Date: Fri, 5 Aug 2016 14:53:59 +0300 Subject: [PATCH 161/166] Check keyword arguments Check that all keyword arguments were properly used in ironic.tests.unit.db.utils. Change-Id: If04a162accedb13e63a9de4b3fa2f2961347353a --- ironic/tests/unit/conductor/test_manager.py | 3 +- ironic/tests/unit/dhcp/test_neutron.py | 18 +++----- .../unit/drivers/modules/test_deploy_utils.py | 8 ++-- ironic/tests/unit/objects/utils.py | 45 +++++++++++++++++-- 4 files changed, 51 insertions(+), 23 deletions(-) diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py index 75c194751..9284874ac 100644 --- a/ironic/tests/unit/conductor/test_manager.py +++ b/ironic/tests/unit/conductor/test_manager.py @@ -271,8 +271,7 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, node = obj_utils.create_test_node(self.context, driver=existing_driver, extra={'test': 'one'}, - instance_uuid=None, - task_state=states.POWER_ON) + instance_uuid=None) # check that it fails because driver not found node.driver = wrong_driver node.driver_info = {} diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index ca93feb66..a9329143d 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -363,8 +363,7 @@ class TestNeutron(db_base.DbTestCase): address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') + 'test-vif-A'}) mock_gfia.return_value = expected with task_manager.acquire(self.context, self.node.uuid) as task: @@ -380,8 +379,7 @@ class TestNeutron(db_base.DbTestCase): port = object_utils.create_test_port(self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', - uuid=uuidutils.generate_uuid(), - driver='fake') + uuid=uuidutils.generate_uuid()) mock_gfia.return_value = expected with task_manager.acquire(self.context, self.node.uuid) as task: @@ -397,8 +395,7 @@ class TestNeutron(db_base.DbTestCase): pg = object_utils.create_test_portgroup(self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', - uuid=uuidutils.generate_uuid(), - driver='fake') + uuid=uuidutils.generate_uuid()) mock_gfia.return_value = expected with task_manager.acquire(self.context, self.node.uuid) as task: @@ -416,8 +413,7 @@ class TestNeutron(db_base.DbTestCase): address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') + 'test-vif-A'}) mock_gfia.return_value = ip_address with task_manager.acquire(self.context, self.node.uuid) as task: api = dhcp_factory.DHCPFactory().provider @@ -434,8 +430,7 @@ class TestNeutron(db_base.DbTestCase): address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') + 'test-vif-A'}) mock_gfia.return_value = ip_address with task_manager.acquire(self.context, self.node.uuid) as task: api = dhcp_factory.DHCPFactory().provider @@ -463,8 +458,7 @@ class TestNeutron(db_base.DbTestCase): address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') + 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid) as task: api = dhcp_factory.DHCPFactory().provider api.get_ip_addresses(task) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 9a26c3680..200961a27 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1430,7 +1430,7 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), - extra={'vif_port_id': 'test-vif-A'}, driver='iscsi_ilo') + extra={'vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) @@ -1440,8 +1440,7 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), - internal_info={'cleaning_vif_port_id': 'test-vif-A'}, - driver='iscsi_ilo') + internal_info={'cleaning_vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) @@ -1451,8 +1450,7 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), - internal_info={'provisioning_vif_port_id': 'test-vif-A'}, - driver='iscsi_ilo') + internal_info={'provisioning_vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) diff --git a/ironic/tests/unit/objects/utils.py b/ironic/tests/unit/objects/utils.py index f37181822..11bccbb97 100644 --- a/ironic/tests/unit/objects/utils.py +++ b/ironic/tests/unit/objects/utils.py @@ -13,18 +13,43 @@ # License for the specific language governing permissions and limitations # under the License. """Ironic object test utilities.""" +import six +from ironic.common import exception +from ironic.common.i18n import _ from ironic import objects from ironic.tests.unit.db import utils as db_utils +def check_keyword_arguments(func): + @six.wraps(func) + def wrapper(**kw): + obj_type = kw.pop('object_type') + result = func(**kw) + + extra_args = set(kw) - set(result) + if extra_args: + raise exception.InvalidParameterValue( + _("Unknown keyword arguments (%(extra)s) were passed " + "while creating a test %(object_type)s object.") % + {"extra": ", ".join(extra_args), + "object_type": obj_type}) + + return result + + return wrapper + + def get_test_node(ctxt, **kw): """Return a Node object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ - db_node = db_utils.get_test_node(**kw) + kw['object_type'] = 'node' + get_db_node_checked = check_keyword_arguments(db_utils.get_test_node) + db_node = get_db_node_checked(**kw) + # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_node['id'] @@ -51,7 +76,11 @@ def get_test_port(ctxt, **kw): NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ - db_port = db_utils.get_test_port(**kw) + kw['object_type'] = 'port' + get_db_port_checked = check_keyword_arguments( + db_utils.get_test_port) + db_port = get_db_port_checked(**kw) + # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_port['id'] @@ -78,7 +107,11 @@ def get_test_chassis(ctxt, **kw): NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ - db_chassis = db_utils.get_test_chassis(**kw) + kw['object_type'] = 'chassis' + get_db_chassis_checked = check_keyword_arguments( + db_utils.get_test_chassis) + db_chassis = get_db_chassis_checked(**kw) + # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_chassis['id'] @@ -105,7 +138,11 @@ def get_test_portgroup(ctxt, **kw): NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ - db_portgroup = db_utils.get_test_portgroup(**kw) + kw['object_type'] = 'portgroup' + get_db_port_group_checked = check_keyword_arguments( + db_utils.get_test_portgroup) + db_portgroup = get_db_port_group_checked(**kw) + # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_portgroup['id'] From eeb81ddf268712e19cdad418d79c140de65d8a2b Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Thu, 4 Aug 2016 13:10:27 +0900 Subject: [PATCH 162/166] Remove duplicate parameters from local.conf example IRONIC_ENABLED_NETWORK_INTERFACES and IRONIC_NETWORK_INTERFACE are duplicate in local.conf example for multitenant networking. This patch removes these duplicate parameters. Closes-Bug: 1611216 Change-Id: I860acf62e4cc62f2bfba44ab908cd8f8edc91f4a --- doc/source/dev/ironic-multitenant-networking.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/dev/ironic-multitenant-networking.rst b/doc/source/dev/ironic-multitenant-networking.rst index 583619e13..beeb0a2ef 100644 --- a/doc/source/dev/ironic-multitenant-networking.rst +++ b/doc/source/dev/ironic-multitenant-networking.rst @@ -131,6 +131,3 @@ configured in Neutron. LOGFILE=$HOME/devstack.log LOGDIR=$HOME/logs IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs - - IRONIC_ENABLED_NETWORK_INTERFACES=neutron - IRONIC_NETWORK_INTERFACE=neutron From e3628363962297fbfd5da031542be12a7d0a0da3 Mon Sep 17 00:00:00 2001 From: Akilan Pughazhendi Date: Wed, 3 Aug 2016 17:32:22 +0000 Subject: [PATCH 163/166] Updated Dev quickstart for viewing doc changes The instructions for viewing doc changes were unclear so I updated them to be easier to follow Change-Id: I0cae488e4ee1d5690557cd5be608925467d370c0 --- doc/source/dev/dev-quickstart.rst | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst index 79fa7ece8..246bb11db 100644 --- a/doc/source/dev/dev-quickstart.rst +++ b/doc/source/dev/dev-quickstart.rst @@ -659,7 +659,9 @@ Building developer documentation If you would like to build the documentation locally, eg. to test your documentation changes before uploading them for review, run these -commands to build the documentation set:: +commands to build the documentation set: + +- On your local machine:: # activate your development virtualenv source .tox/venv/bin/activate @@ -667,7 +669,26 @@ commands to build the documentation set:: # build the docs tox -edocs -Now use your browser to open the top-level index.html located at:: + #Now use your browser to open the top-level index.html located at: ironic/doc/build/html/index.html + +- On a remote machine:: + + # Go to the directory that contains the docs + cd ~/ironic/doc/source/ + + # Build the docs + tox -edocs + + # Change directory to the newly built HTML files + cd ~/ironic/doc/build/html/ + + # Create a server using python on port 8000 + python -m SimpleHTTPServer 8000 + + #Now use your browser to open the top-level index.html located at: + + http://your_ip:8000 + From 0594ccec447c22b725f9194513a1366baa11c08b Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 8 Aug 2016 20:45:48 -0400 Subject: [PATCH 164/166] Refactor multitenant networking release notes This puts everything into one release note, and rewrites it a bit to make it more cohesive and about the general "multitenant networking" feature, rather than bit-by-bit. Change-Id: I91b3c74a4f9edfd4e3454b7c80d3886b15494eb7 --- ...etwork-interface-api-a3a56b8d0c796d88.yaml | 9 --- ...d-network-interfaces-0a13c4aba252573e.yaml | 39 ---------- ...on-network-interface-aa9e7e65011ab8cd.yaml | 14 ---- ...ltitenant-networking-0a13c4aba252573e.yaml | 77 +++++++++++++++++++ 4 files changed, 77 insertions(+), 62 deletions(-) delete mode 100644 releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml delete mode 100644 releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml delete mode 100644 releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml create mode 100644 releasenotes/notes/multitenant-networking-0a13c4aba252573e.yaml diff --git a/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml b/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml deleted file mode 100644 index fb891a3f0..000000000 --- a/releasenotes/notes/add-network-interface-api-a3a56b8d0c796d88.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Exposes the ``network_interface`` field of the Node resource to the REST - API, raising the API maximum version to 1.20. This field is the network - interface to use for a node. Its possible values are from the - configuration option ``[DEFAULT]enabled_network_interfaces``. Note that - the value of this option must be the same on all the ironic-conductor and - ironic-api service nodes. diff --git a/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml b/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml deleted file mode 100644 index 7522fa1c1..000000000 --- a/releasenotes/notes/add-network-interfaces-0a13c4aba252573e.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -features: - - | - Added network interface. Introduced two network interface implementations: - ``flat``, which replicates the flat network behavior present previously and - ``noop`` when neutron is not used, which is basically a noop interface. - The network interface is used to switch network for node during - provisioning/cleaning. Added ``enabled_network_interfaces`` option in - DEFAULT config section. This option defines a list of enabled network - interfaces on the conductor. -deprecations: - - | - ``create_cleaning_ports`` and ``delete_cleaning_ports`` methods in DHCP - providers are deprecated and will be removed completely in the Ocata - release. The logic they are implementing should be moved to a custom - network interface's ``add_cleaning_network`` and - ``remove_cleaning_network`` methods respectively. After that, the methods - themselves should be removed from DHCP provider so that network interface - is used instead. ``flat`` network interface does not require - ``[neutron]cleaning_network_uuid`` for now so as not to break standalone - deployments, but it will be required in the Ocata release. -upgrade: - - | - ``[DEFAULT]default_network_interface`` configuration option is introduced, - with empty default value. If set, the specified interface will be used as - the network interface for nodes that don't have ``network_interface`` field - set. If it is not set, the network interface is determined by looking at - the ``[dhcp]dhcp_provider`` value. If it is ``neutron`` - ``flat`` network - interface is the default, ``noop`` otherwise. - The network interface will be set for all nodes without network_interface - already set via a database migration. This will be set following the logic - above. When running database migrations for an existing deployment, it's - important to check the above configuration options to ensure the existing - nodes will have the expected network_interface. If - ``[DEFAULT]default_network_interface`` is not set, everything should go as - expected. If it is set, ensure that it is set to the value that you wish - existing nodes to use. - - Note that if ``[DEFAULT]default_network_interface`` is set, it must be set - in the configuration file for both the API and conductor hosts. diff --git a/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml b/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml deleted file mode 100644 index d8970fd73..000000000 --- a/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - Added ``neutron`` network interface. This interface allows to provision - and/or clean node in separate networks. A new config option - ``[neutron]provisioning_network_uuid`` has been added. This option - specifies provision network UUID. -upgrade: - - | - If ``neutron`` network interface is specified in - ``[DEFAULT]enabled_network_interfaces``, - ``[neutron]provisioning_network_uuid`` and - ``[neutron]cleaning_network_uuid`` configuration options are required. If - any of them is not specified, the ironic-conductor service will fail to - start. diff --git a/releasenotes/notes/multitenant-networking-0a13c4aba252573e.yaml b/releasenotes/notes/multitenant-networking-0a13c4aba252573e.yaml new file mode 100644 index 000000000..e3bdaeca6 --- /dev/null +++ b/releasenotes/notes/multitenant-networking-0a13c4aba252573e.yaml @@ -0,0 +1,77 @@ +--- +features: + - | + Adds multitenant networking support. + + Ironic now has the concept of "network interfaces" for a node, which + represent a networking driver. + + There are three network interfaces available: + + * ``flat``: this replicates the old flat network behavior and is the default + when using neutron for DHCP. + * ``noop``: this replicates the old flat behavior when not using neutron for + DHCP, and is the default when the configuration option + ``[DHCP]/dhcp_provider`` is set to "none". + * ``neutron``: this allows for separating the provisioning and cleaning + networks from the tenant networks, and provides isolation from tenant + network to tenant network, and tenant network to control plane. + The following configuration options must be set if the neutron interface + is enabled, or ironic-conductor will fail to start: + + * ``[neutron]/provisioning_network_uuid`` + * ``[neutron]/cleaning_network_uuid`` + + A ``[DEFAULT]/enabled_network_interfaces`` option (which must be set for + both ironic-api and ironic-conductor services) controls which network + interfaces are available for use. + + A network interface is set for a node by setting the ``network_interface`` + field for the node via the REST API. This field is available in API version + 1.20 and above. Changing the network interface may only be done in the + ``enroll``, ``inspecting``, and ``manageable`` states. + + The configuration option ``[DEFAULT]/default_network_interface`` may be + used to specify which network interface is defined when a node is created. + + **WARNING: don't set the option ``[DEFAULT]/default_network_interface`` + before upgrading to this release without reading the upgrade notes about + it, due to data migrations depending on the value.** +deprecations: + - | + ``create_cleaning_ports`` and ``delete_cleaning_ports`` methods in DHCP + providers are deprecated and will be removed completely in the Ocata + release. The logic they are implementing should be moved to a custom + network interface's ``add_cleaning_network`` and + ``remove_cleaning_network`` methods respectively. After that, the methods + themselves should be removed from DHCP provider so that the custom + network interface is used instead. ``flat`` network interface does not + require ``[neutron]/cleaning_network_uuid`` for now so as not to break + standalone deployments upon upgrade, but it will be required in the Ocata + release if the ``flat`` network interface is enabled. +upgrade: + - | + ``[DEFAULT]/default_network_interface`` configuration option is introduced, + with empty default value. If set, the specified interface will be used as + the network interface for nodes that don't have ``network_interface`` field + set. If it is not set, the network interface is determined by looking at + the ``[dhcp]/dhcp_provider`` value. If it is ``neutron`` - ``flat`` network + interface is the default, ``noop`` otherwise. + + The network interface will be set for all nodes without network_interface + already set via a database migration. This will be set following the logic + above. When running database migrations for an existing deployment, it's + important to check the above configuration options to ensure the existing + nodes will have the expected network_interface. If + ``[DEFAULT]/default_network_interface`` is not set, everything should go as + expected. If it is set, ensure that it is set to the value that you wish + existing nodes to use. + - Note that if the configuration option + ``[DEFAULT]/default_network_interface`` is set, it must be set + in the configuration file for both the API and conductor hosts. + - If ``neutron`` network interface is specified for the configuration option + ``[DEFAULT]/enabled_network_interfaces``, then + ``[neutron]/provisioning_network_uuid`` and + ``[neutron]/cleaning_network_uuid`` configuration options are required. If + either of them is not specified, the ironic-conductor service will fail to + start. From 0fe585ac7c1e8b8e068d6ae0da00edc3da9a4c6c Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 8 Aug 2016 21:09:13 -0400 Subject: [PATCH 165/166] Clean up release notes for 6.1.0 This corrects some typos, some grammar, and makes formatting of notes generally more consistent. Change-Id: Ic6b48a1877a9b142a4dd1df2ae22342eef99bc76 --- ...agent-erase-fallback-b07613a7042fe236.yaml | 2 +- ...c-allocation-feature-2fd6b4df7943f178.yaml | 2 +- ...-node-resource-class-c31e26df4196293e.yaml | 4 +-- ...cat-console-ipmitool-ab4402ec976c5c96.yaml | 4 +-- ...ing-audit-middleware-b95f2a00baed9750.yaml | 11 +++++--- .../notes/agent-api-bf9f18d8d38075e4.yaml | 6 ++--- .../notes/bug-1592335-7c5835868fe364ea.yaml | 6 ++--- ...lect-deployment-logs-2ec1634847c3f6a5.yaml | 20 +++++++++----- ...etrics-for-api-calls-69f18fd1b9d54b05.yaml | 6 ++--- ...vice-priority-config-509661955a11c28e.yaml | 6 ++--- ...ement-policy-in-code-cbb0216ef5f8224f.yaml | 8 +++--- .../notes/keystone-auth-3155762c524e44df.yaml | 26 ++++++++++++------- .../lookup-heartbeat-f9772521d12a0549.yaml | 6 ++--- ...ate-ilo-certificates-3ab98bb8cfad7d60.yaml | 7 +++-- 14 files changed, 65 insertions(+), 49 deletions(-) diff --git a/releasenotes/notes/add-agent-erase-fallback-b07613a7042fe236.yaml b/releasenotes/notes/add-agent-erase-fallback-b07613a7042fe236.yaml index 3471b16f9..20a07048c 100644 --- a/releasenotes/notes/add-agent-erase-fallback-b07613a7042fe236.yaml +++ b/releasenotes/notes/add-agent-erase-fallback-b07613a7042fe236.yaml @@ -8,7 +8,7 @@ features: normal circumstances, the failure of an ATA secure erase operation results in the node being put in ``clean failed`` state. -upgrades: +upgrade: - A new configuration option ``[deploy]continue_if_disk_secure_erase_fails``, which has a default value of False, has been added. The default diff --git a/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml index 119a15593..2f0aa9a6a 100644 --- a/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml +++ b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml @@ -1,5 +1,5 @@ --- features: - - Add Dynamic Allocation feature for the OneView drivers. + - Adds Dynamic Allocation feature for the OneView drivers. deprecations: - Deprecates pre-allocation feature for the OneView drivers. diff --git a/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml b/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml index d52eca3d2..32602ee83 100644 --- a/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml +++ b/releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml @@ -1,12 +1,12 @@ --- features: - - Adds a `resource_class` field to the node resource, + - Adds a ``resource_class`` field to the node resource, which will be used by Nova to define which nodes may quantitatively match a Nova flavor. Operators should populate this accordingly before deploying the Ocata version of Nova. upgrade: - - Adds a `resource_class` field to the node resource, + - Adds a ``resource_class`` field to the node resource, which will be used by Nova to define which nodes may quantitatively match a Nova flavor. Operators should populate this accordingly before deploying the Ocata diff --git a/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml b/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml index 3fbc66ab9..9b6e92377 100644 --- a/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml +++ b/releasenotes/notes/add-socat-console-ipmitool-ab4402ec976c5c96.yaml @@ -1,5 +1,5 @@ --- features: - Adds support for socat-based serial console to ipmitool-based drivers. - These are available by using the agent_ipmitool_socat and - pxe_ipmitool_socat drivers. + These are available by using the ``agent_ipmitool_socat`` and + ``pxe_ipmitool_socat`` drivers. diff --git a/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml b/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml index ef804c304..31b0833e1 100644 --- a/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml +++ b/releasenotes/notes/adding-audit-middleware-b95f2a00baed9750.yaml @@ -2,9 +2,12 @@ features: - | The ironic-api service now supports logging audit messages of - api calls. The following configuration parameters have been added. + API calls. The following configuration parameters have been added. By default auditing of ironic-api service is turned off. - * [audit]/enabled - * [audit]/ignore_req_list - * [audit]/audit_map_file + * ``[audit]/enabled`` + * ``[audit]/ignore_req_list`` + * ``[audit]/audit_map_file`` + + Further documentation for this feature is available at + http://docs.openstack.org/developer/ironic/deploy/api-audit-support.html. diff --git a/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml b/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml index 6cc426f40..6d262316c 100644 --- a/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml +++ b/releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml @@ -1,5 +1,5 @@ --- other: - - The "continue_deploy" and "reboot_to_instance" methods in the - "BaseAgentVendor" class stopped accepting ** arguments. They were never - used anyway; the drivers should stop passing anything there. + - The ``continue_deploy`` and ``reboot_to_instance`` methods in the + ``BaseAgentVendor`` class stopped accepting ** arguments. They were never + used anyway; drivers should stop passing anything there. diff --git a/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml b/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml index 371d9eb31..3eabc45b5 100644 --- a/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml +++ b/releasenotes/notes/bug-1592335-7c5835868fe364ea.yaml @@ -1,5 +1,5 @@ --- fixes: - - A node using 'agent_ilo' or 'iscsi_ilo' driver has - their 'driver_info/ilo_deploy_iso' field validated - during node validate. This closes bug #1592335. + - A node using the ``agent_ilo`` or ``iscsi_ilo`` driver now has + its ``driver_info/ilo_deploy_iso`` field validated + during node validation. diff --git a/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml b/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml index f2795b5d6..bec89d5b0 100644 --- a/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml +++ b/releasenotes/notes/collect-deployment-logs-2ec1634847c3f6a5.yaml @@ -1,12 +1,18 @@ --- features: - - Adds support for collecting deployment logs from the IPA - ramdisk. Five new configuration options were added, [agent]/group - deploy_logs_collect, [agent]deploy_logs_storage_backend, - [agent]/deploy_logs_local_path, [agent]/deploy_logs_swift_container and - [agent]/deploy_logs_swift_days_to_expire. -upgrades: + - | + Adds support for collecting deployment logs from the IPA + ramdisk. Five new configuration options were added: + + * ``[agent]/deploy_logs_collect`` + * ``[agent]/deploy_logs_storage_backend`` + * ``[agent]/deploy_logs_local_path`` + * ``[agent]/deploy_logs_swift_container`` + * ``[agent]/deploy_logs_swift_days_to_expire``. +upgrade: - Collecting logs on deploy failure is enabled by default and the logs - will be saved to the local disk. Operators upgrading may want to disable + will be saved to the local disk at the location specified by the + configuration option ``[agent]/deploy_logs_local_path`` (by default, + ``/var/log/ironic/deploy``). Operators upgrading may want to disable this feature, enable some form of rotation for the logs or change the configuration to store the logs in Swift to avoid disk space problems. diff --git a/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml b/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml index 2397950fd..09edaa206 100644 --- a/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml +++ b/releasenotes/notes/emit-metrics-for-api-calls-69f18fd1b9d54b05.yaml @@ -1,5 +1,5 @@ --- features: - - With this change, ironic now emits timing metrics - for all API methods to statsd, if enabled by config - in the [metrics] and [metrics_statsd] sections. + - Ironic now emits timing metrics for all API methods to statsd, + if enabled by the ``[metrics]`` and ``[metrics_statsd]`` configuration + sections. diff --git a/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml b/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml index 24f223140..690fe7670 100644 --- a/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml +++ b/releasenotes/notes/ilo-erase-device-priority-config-509661955a11c28e.yaml @@ -1,5 +1,5 @@ --- deprecations: - - The [ilo]/clean_priority_erase_devices config is deprecated and will be - removed in the Ocata cycle. Please use the [deploy]/erase_devices_priority - config instead. + - The ``[ilo]/clean_priority_erase_devices`` configuration option is + deprecated and will be removed in the Ocata cycle. Please use the + ``[deploy]/erase_devices_priority`` option instead. diff --git a/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml b/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml index 6755307f9..579444bd1 100644 --- a/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml +++ b/releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml @@ -15,8 +15,8 @@ security: upgrade: - | During an upgrade, it is recommended that all deployers re-evaluate the - settings in their /etc/ironic/policy.json file. This file should now be + settings in their ``/etc/ironic/policy.json`` file. This file should now be used only to override default configuration, such as by limiting access to - the Bare Metal service to specific tenants or restricting access to - specific API endpoints. A policy.json.sample file is provided that lists - all supported policies. + the ironic service to specific tenants or restricting access to + specific API endpoints. A ``policy.json.sample`` file is provided that + lists all supported policies. diff --git a/releasenotes/notes/keystone-auth-3155762c524e44df.yaml b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml index 36e4ad614..61d7b593c 100644 --- a/releasenotes/notes/keystone-auth-3155762c524e44df.yaml +++ b/releasenotes/notes/keystone-auth-3155762c524e44df.yaml @@ -1,12 +1,12 @@ --- upgrade: - | - New way of configuring access credentials for OpenStack services clients. - For each service both Keystone session options + Changes the way to configure access credentials for OpenStack services + clients. For each service, both Keystone session options (timeout, SSL-related ones) and Keystone auth_plugin options (auth_url, auth_type and corresponding auth_plugin options) - should be specified in the config section for this service. - Config section affected are + should be specified in the configuration section for this service. + Configuration sections affected are: * ``[neutron]`` for Neutron service user * ``[glance]`` for Glance service user @@ -19,23 +19,31 @@ upgrade: Backward-compatible options handling is provided using values from ``[keystone_authtoken]`` config section, - but operators are advised to switch to the new config options. + but operators are advised to switch to the new config options as the + old options are deprecated. The old options will be removed during the + Ocata cycle. For more information on sessions, auth plugins and their settings, - please refer to _http://docs.openstack.org/developer/keystoneauth/ + please refer to http://docs.openstack.org/developer/keystoneauth/. - | - Small change in semantics of default for ``[neutron]url`` option + Small change in semantics of default for ``[neutron]/url`` option * default is changed to None. - * In case when [neutron]auth_strategy is ``noauth``, + * For the case when ``[neutron]/auth_strategy`` is ``noauth``, default means use ``http://$my_ip:9696``. - * In case when [neutron]auth_strategy is ``keystone``, + * For the case when ``[neutron]/auth_strategy`` is ``keystone``, default means to resolve the endpoint from Keystone Catalog. - New config section ``[service_catalog]`` for access credentials used to discover Ironic API URL from Keystone Catalog. Previously credentials from ``[keystone_authtoken]`` section were used, which is now deprecated for such purpose. +deprecations: + - The ``[keystone_authtoken]`` configuration section is deprecated for + configuring clients for other services (but is still used for configuring + API token authentication), in favor of the ``[service_catalog]`` section. + The ability to configure clients for other services via the + ``[keystone_authtoken]`` section will be removed during the Ocata cycle. fixes: - Do not rely on keystonemiddleware config options for instantiating clients for other OpenStack services. diff --git a/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml b/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml index f4ec0cd6f..e854f7a8b 100644 --- a/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml +++ b/releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml @@ -7,10 +7,10 @@ features: This endpoint is not authenticated to allow ramdisks to access it without passing the credentials to them. deprecations: - - The configuration option ``[agent]heartbeat_timeout`` was renamed to - ``[api]ramdisk_heartbeat_timeout``. The old variant is deprecated. + - The configuration option ``[agent]/heartbeat_timeout`` was renamed to + ``[api]/ramdisk_heartbeat_timeout``. The old variant is deprecated. upgrade: - - A new configuration option ``[api]restrict_lookup`` is added, which + - A new configuration option ``[api]/restrict_lookup`` is added, which restricts the lookup API (normally only used by ramdisks) to only work when the node is in specific states used by the ramdisk, and defaults to True. Operators that need this endpoint to work in any state may set this to diff --git a/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml b/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml index 9d9d7d1ab..7bfdd026f 100644 --- a/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml +++ b/releasenotes/notes/validate-ilo-certificates-3ab98bb8cfad7d60.yaml @@ -1,8 +1,7 @@ --- features: - Added support to validate iLO SSL certificate in iLO - drivers. - New config parameter '[ilo]/ca_file' added to - specify iLO CA certificate file. - If 'ca_file' is specified, iLO drivers will validate + drivers. A new configuration option ``[ilo]/ca_file`` is added to + specify the iLO CA certificate file. + If ``[ilo]/ca_file`` is specified, the iLO drivers will validate iLO SSL certificates. From 9f2473153a0259d9347b849695148e206dd43ed3 Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Tue, 9 Aug 2016 15:07:05 -0300 Subject: [PATCH 166/166] Refactor OneView dynamic allocation release notes Adding some context and an overall instruction/orientation about the new feature for ironic operators. Change-Id: Ie5ae2d8982f01972484a7f7bba7937e599254902 --- ...c-allocation-feature-2fd6b4df7943f178.yaml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml index 2f0aa9a6a..ca5471a58 100644 --- a/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml +++ b/releasenotes/notes/add-dynamic-allocation-feature-2fd6b4df7943f178.yaml @@ -1,5 +1,20 @@ --- features: - - Adds Dynamic Allocation feature for the OneView drivers. + - OneView drivers now support dynamic allocation of nodes in OneView, + allowing for better resource sharing with non-OpenStack users since Server + Hardware will be allocated only when the node is scheduled to be used. To + enable the new allocation feature for a node, set the flag + ``dynamic_allocation=True`` on the node's ``driver_info``. + More information is available at + http://docs.openstack.org/developer/ironic/drivers/oneview.html. deprecations: - - Deprecates pre-allocation feature for the OneView drivers. + - Deprecates pre-allocation feature for the OneView drivers since it requires + resource allocation to Ironic prior to boot time, which makes Server + Hardware unavailable to non-OpenStack OneView users. Pre-allocation will + be removed in the OpenStack "P" release. All nodes with + ``dynamic_allocation=False`` set, or that don't have the ``dynamic_allocation`` + flag set, will be assumed to be in pre-allocation. Users may use the REST API + or the ``ironic-oneview-cli`` to migrate nodes from pre-allocation to + dynamic allocation. + More information is available at + http://docs.openstack.org/developer/ironic/drivers/oneview.html.