Rename ironic-discoverd to daisy-discoverd

Historically, Daisy's discoverd is a fork from ironic-discoverd
without changing package name. This has problems:
1) When user want to install daisy but there is already a
ironic-discoverd which from OpenStack, then daisy's discoverd
will not be installed due to already existed.
2) When user already installed Daisy's discoverd and later do
yum update, then daisy's discoverd will be replaced by
ironic-discoverd which from OpenStack.

Both scenarios above will results in Daisy does not work. So
This PS changes ironic-discoverd to daisy-discoverd.

Change-Id: Ic505feb12271dd87e5781da28f79ca604d49374e
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2017-08-03 19:04:40 +08:00
parent aa5e992cbc
commit 13c15ff73f
71 changed files with 281 additions and 1875 deletions

View File

View File

@ -1,6 +1,6 @@
include example.conf
include LICENSE
include ironic-discoverd.8
include daisy-discoverd.8
include requirements.txt
include test-requirements.txt
include tox.ini

View File

@ -0,0 +1,19 @@
.\" Manpage for daisy-discoverd.
.TH man 8 "08 Oct 2014" "1.0" "daisy-discoverd man page"
.SH NAME
daisy-discoverd \- hardware discovery daemon for OpenStack Ironic.
.SH SYNOPSIS
daisy-discoverd CONFFILE
.SH DESCRIPTION
This command starts daisy-discoverd service, which starts and finishes
hardware discovery for nodes accessing PXE boot service (usually dnsmasq).
.SH OPTIONS
The daisy-discoverd does not take any options. However, you should supply
path to the configuration file.
.SH SEE ALSO
README page located at https://pypi.python.org/pypi/daisy-discoverd
provides some information about how to configure and use the service.
.SH BUGS
No known bugs.
.SH AUTHOR
Dmitry Tantsur (divius.inside@gmail.com)

View File

@ -35,7 +35,7 @@ def introspect(uuid, base_url=_DEFAULT_URL, auth_token=''):
"""Start introspection for a node.
:param uuid: node uuid
:param base_url: *ironic-discoverd* URL in form: http://host:port[/ver],
:param base_url: *daisy-discoverd* URL in form: http://host:port[/ver],
defaults to ``http://127.0.0.1:5050/v1``.
:param auth_token: Keystone authentication token.
"""
@ -51,9 +51,9 @@ def introspect(uuid, base_url=_DEFAULT_URL, auth_token=''):
def get_status(uuid, base_url=_DEFAULT_URL, auth_token=''):
"""Get introspection status for a node.
New in ironic-discoverd version 1.0.0.
New in daisy-discoverd version 1.0.0.
:param uuid: node uuid.
:param base_url: *ironic-discoverd* URL in form: http://host:port[/ver],
:param base_url: *daisy-discoverd* URL in form: http://host:port[/ver],
defaults to ``http://127.0.0.1:5050/v1``.
:param auth_token: Keystone authentication token.
:raises: *requests* library HTTP errors.

View File

@ -21,13 +21,8 @@ DEFAULTS = {
# Keystone credentials
'os_auth_url': 'http://127.0.0.1:5000/v2.0',
'identity_uri': 'http://127.0.0.1:35357',
# Ironic and Keystone connection settings
'ironic_retry_attempts': '5',
'ironic_retry_period': '20',
# Firewall management settings
'manage_firewall': 'true',
'dnsmasq_interface': 'br-ctlplane',
'firewall_update_period': '15',
# Introspection process settings
'ports_for_inactive_interfaces': 'false',
'timeout': '3600',

View File

@ -15,13 +15,12 @@
import logging
from ironic_discoverd import conf
from ironic_discoverd import firewall
from ironic_discoverd import node_cache
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd import node_cache
from daisy_discoverd import utils
LOG = logging.getLogger("ironic_discoverd.introspect")
LOG = logging.getLogger("daisy_discoverd.introspect")
# See http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html # noqa
VALID_STATES = {'enroll', 'manageable', 'inspecting'}

View File

@ -23,18 +23,17 @@ import sys
import flask
from logging import handlers
from ironic_discoverd import conf
from ironic_discoverd import firewall
from ironic_discoverd import introspect
from ironic_discoverd import node_cache
from ironic_discoverd import process
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd import introspect
from daisy_discoverd import node_cache
from daisy_discoverd import process
from daisy_discoverd import utils
app = flask.Flask(__name__)
LOG = logging.getLogger('ironic_discoverd.main')
LOG = logging.getLogger('daisy_discoverd.main')
fh = handlers.RotatingFileHandler(
'/var/log/ironic/discoverd.log',
'/var/log/daisy-discoverd/discoverd.log',
'a', maxBytes=2*1024*1024, backupCount=5)
formatter = logging.Formatter(
'%(asctime)-12s:%(name)s:%(levelname)s:%(message)s')
@ -85,22 +84,11 @@ def api_continue():
return json.dumps(""), 200, {'Content-Type': 'applications/json'}
def periodic_update(period): # pragma: no cover
while True:
LOG.debug('Running periodic update of filters')
try:
firewall.update_filters()
except Exception:
LOG.exception('Periodic update failed')
eventlet.greenthread.sleep(period)
def periodic_clean_up(period): # pragma: no cover
while True:
LOG.debug('Running periodic clean up of node cache')
try:
if node_cache.clean_up():
firewall.update_filters()
node_cache.clean_up()
except Exception:
LOG.exception('Periodic clean up of node cache failed')
eventlet.greenthread.sleep(period)
@ -120,11 +108,6 @@ def init():
node_cache.init()
if conf.getboolean('discoverd', 'manage_firewall'):
firewall.init()
period = conf.getint('discoverd', 'firewall_update_period')
eventlet.greenthread.spawn_n(periodic_update, period)
if conf.getint('discoverd', 'timeout') > 0:
period = conf.getint('discoverd', 'clean_up_period')
eventlet.greenthread.spawn_n(periodic_clean_up, period)
@ -149,12 +132,10 @@ def main(): # pragma: no cover
'keystonemiddleware.auth_token',
'requests.packages.urllib3.connectionpool'):
logging.getLogger(third_party).setLevel(logging.WARNING)
logging.getLogger('ironicclient.common.http').setLevel(
logging.INFO if debug else logging.ERROR)
if old_args:
LOG.warning('"ironic-discoverd <config-file>" syntax is deprecated use'
' "ironic-discoverd --config-file <config-file>" instead')
LOG.warning('"daisy-discoverd <config-file>" syntax is deprecated use'
' "daisy-discoverd --config-file <config-file>" instead')
init()
app.run(debug=debug,

View File

@ -20,11 +20,11 @@ import sqlite3
import sys
import time
from ironic_discoverd import conf
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd import utils
LOG = logging.getLogger("ironic_discoverd.node_cache")
LOG = logging.getLogger("daisy_discoverd.node_cache")
_DB_NAME = None
_SCHEMA = """
create table if not exists nodes

View File

@ -18,7 +18,7 @@ import abc
import six
from stevedore import named
from ironic_discoverd import conf
from daisy_discoverd import conf
@six.add_metaclass(abc.ABCMeta)
@ -74,7 +74,7 @@ def processing_hooks_manager(*args):
names = [x.strip()
for x in conf.get('discoverd', 'processing_hooks').split(',')
if x.strip()]
_HOOKS_MGR = named.NamedExtensionManager('ironic_discoverd.hooks',
_HOOKS_MGR = named.NamedExtensionManager('daisy_discoverd.hooks',
names=names,
invoke_on_load=True,
invoke_args=args,

View File

@ -15,10 +15,10 @@
import logging
from ironic_discoverd.plugins import base
from daisy_discoverd.plugins import base
LOG = logging.getLogger('ironic_discoverd.plugins.example')
LOG = logging.getLogger('daisy_discoverd.plugins.example')
class ExampleProcessingHook(base.ProcessingHook): # pragma: no cover

View File

@ -16,12 +16,12 @@
import logging
from oslo_utils import netutils
from ironic_discoverd import conf
from ironic_discoverd.plugins import base
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd.plugins import base
from daisy_discoverd import utils
LOG = logging.getLogger('ironic_discoverd.plugins.standard')
LOG = logging.getLogger('daisy_discoverd.plugins.standard')
class SchedulerHook(base.ProcessingHook):

View File

@ -21,15 +21,14 @@ import eventlet
from logging import handlers
from ironic_discoverd import conf
from ironic_discoverd import firewall
from ironic_discoverd.plugins import base as plugins_base
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd.plugins import base as plugins_base
from daisy_discoverd import utils
LOG = logging.getLogger("ironic_discoverd.process")
LOG = logging.getLogger("daisy_discoverd.process")
fh = handlers.RotatingFileHandler(
'/var/log/ironic/parse.log',
'/var/log/daisy-discoverd/parse.log',
'a', maxBytes=2 * 1024 * 1024, backupCount=5)
formatter = logging.Formatter(
'%(asctime)-12s:%(name)s:%(levelname)s:%(message)s')
@ -111,31 +110,6 @@ def find_min_mac_in_node_info(node_info):
return min_mac
def format_node_info_for_ironic(node_info):
patch = []
for property in node_info.keys():
property_dict = node_info[property]
for key, value in property_dict.items():
data_dict = {'op': 'add'}
key = key.replace(':', '-').replace('.', '-')
if property == 'disk':
data_dict['path'] = '/' + property + 's' + '/' + key
else:
data_dict['path'] = '/' + property + '/' + key
if property == 'interfaces' and 'vf'in value:
value_copy = copy.deepcopy(value)
value_copy.pop('vf')
data_dict['value'] = value_copy
else:
data_dict['value'] = value
patch.append(data_dict)
LOG.debug('patch:%s', patch)
return patch
def _run_post_hooks(node, ports, node_info):
hooks = plugins_base.processing_hooks_manager()
port_instances = list(ports.values())

View File

@ -16,9 +16,9 @@ import unittest
import mock
from ironic_discoverd import conf
from ironic_discoverd import node_cache
from ironic_discoverd.plugins import base as plugins_base
from daisy_discoverd import conf
from daisy_discoverd import node_cache
from daisy_discoverd.plugins import base as plugins_base
def init_test_conf():

View File

@ -15,7 +15,7 @@ import unittest
import mock
from ironic_discoverd import client
from daisy_discoverd import client
@mock.patch.object(client.requests, 'post', autospec=True)

View File

@ -17,15 +17,15 @@ import unittest
import eventlet
import mock
from ironic_discoverd import conf
from ironic_discoverd import introspect
from ironic_discoverd import main
from ironic_discoverd import node_cache
from ironic_discoverd.plugins import base as plugins_base
from ironic_discoverd.plugins import example as example_plugin
from ironic_discoverd import process
from ironic_discoverd.test import base as test_base
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd import introspect
from daisy_discoverd import main
from daisy_discoverd import node_cache
from daisy_discoverd.plugins import base as plugins_base
from daisy_discoverd.plugins import example as example_plugin
from daisy_discoverd import process
from daisy_discoverd.test import base as test_base
from daisy_discoverd import utils
class TestApi(test_base.BaseTest):
@ -117,29 +117,6 @@ class TestApi(test_base.BaseTest):
@mock.patch.object(eventlet.greenthread, 'sleep', autospec=True)
@mock.patch.object(utils, 'get_client')
class TestCheckIronicAvailable(test_base.BaseTest):
def test_ok(self, client_mock, sleep_mock):
main.check_ironic_available()
client_mock.return_value.driver.list.assert_called_once_with()
self.assertFalse(sleep_mock.called)
def test_2_attempts(self, client_mock, sleep_mock):
cli = mock.Mock()
client_mock.side_effect = [Exception(), cli]
main.check_ironic_available()
self.assertEqual(2, client_mock.call_count)
cli.driver.list.assert_called_once_with()
sleep_mock.assert_called_once_with(
conf.getint('discoverd', 'ironic_retry_period'))
def test_failed(self, client_mock, sleep_mock):
attempts = conf.getint('discoverd', 'ironic_retry_attempts')
client_mock.side_effect = RuntimeError()
self.assertRaises(RuntimeError, main.check_ironic_available)
self.assertEqual(1 + attempts, client_mock.call_count)
self.assertEqual(attempts, sleep_mock.call_count)
class TestPlugins(unittest.TestCase):
@mock.patch.object(example_plugin.ExampleProcessingHook,
'before_processing', autospec=True)
@ -163,10 +140,10 @@ class TestPlugins(unittest.TestCase):
class TestConfigShim(unittest.TestCase):
def test_old_style_invocation(self):
self.assertEqual(main.config_shim(
['ironic-discoverd', '/etc/conf']),
['daisy-discoverd', '/etc/conf']),
['--config-file', '/etc/conf'])
def test_new_style_returns_None(self):
self.assertEqual(main.config_shim(
['ironic-discoverd', '--config-file', '/etc/conf']),
['daisy-discoverd', '--config-file', '/etc/conf']),
None)

View File

@ -18,10 +18,10 @@ import unittest
import mock
from ironic_discoverd import conf
from ironic_discoverd import node_cache
from ironic_discoverd.test import base as test_base
from ironic_discoverd import utils
from daisy_discoverd import conf
from daisy_discoverd import node_cache
from daisy_discoverd.test import base as test_base
from daisy_discoverd import utils
class TestNodeCache(test_base.NodeTest):

View File

@ -17,10 +17,10 @@ from daisyclient import client as daisy_client
from keystonemiddleware import auth_token
import six
from ironic_discoverd import conf
from daisy_discoverd import conf
LOG = logging.getLogger('ironic_discoverd.utils')
LOG = logging.getLogger('daisy_discoverd.utils')
OS_ARGS = ('os_password', 'os_username', 'os_auth_url', 'os_tenant_name')
MIDDLEWARE_ARGS = ('admin_password', 'admin_user', 'auth_uri',
'admin_tenant_name')

View File

@ -16,19 +16,6 @@
; Daisy endpoint
;daisy_url = http://127.0.0.1:19292
; Number of attempts to do when trying to connect to Ironic on start up.
;ironic_retry_attempts = 5
; Amount of time between attempts to connect to Ironic on start up.
;ironic_retry_period = 20
;; Firewall management settings
; Whether to manage firewall rules for PXE port.
;manage_firewall = true
; Interface on which dnsmasq listens, the default is for VM's.
;dnsmasq_interface = br-ctlplane
; Amount of time in seconds, after which repeat periodic update of firewall.
;firewall_update_period = 15
;; Introspection process settings
@ -67,7 +54,7 @@
; SQLite3 database to store nodes under introspection, required.
; Do not use :memory: here, it won't work.
database =/var/lib/ironic-discoverd/discoverd.sqlite
database =/var/lib/daisy-discoverd/discoverd.sqlite
; Comma-separated list of enabled hooks for processing pipeline.
; Hook 'scheduler' updates the node with the minimum properties required by the
; Nova scheduler. Hook 'validate_interfaces' ensures that valid NIC data was

View File

@ -28,11 +28,11 @@ import unittest
import mock
import requests
from ironic_discoverd import client
from ironic_discoverd import conf
from ironic_discoverd import main
from ironic_discoverd.test import base
from ironic_discoverd import utils
from daisy_discoverd import client
from daisy_discoverd import conf
from daisy_discoverd import main
from daisy_discoverd.test import base
from daisy_discoverd import utils
CONF = """
@ -41,14 +41,13 @@ os_auth_url = http://url
os_username = user
os_password = password
os_tenant_name = tenant
manage_firewall = false
"""
ROOT = './functest/env'
RAMDISK = ("https://raw.githubusercontent.com/openstack/diskimage-builder/"
"master/elements/ironic-discoverd-ramdisk/"
"init.d/80-ironic-discoverd-ramdisk")
"master/elements/daisy-discoverd-ramdisk/"
"init.d/80-daisy-discoverd-ramdisk")
JQ = "https://stedolan.github.io/jq/download/linux64/jq"
@ -56,7 +55,6 @@ JQ = "https://stedolan.github.io/jq/download/linux64/jq"
class Test(base.NodeTest):
def setUp(self):
super(Test, self).setUp()
conf.CONF.set('discoverd', 'manage_firewall', 'false')
self.node.properties.clear()
self.cli = utils.get_client()

View File

@ -1,5 +1,5 @@
[metadata]
name = daisy-discoveryd
name = daisy-discoverd
summary = Daisy discovery agent
description-file =
README.rst

View File

@ -13,29 +13,29 @@ except EnvironmentError:
install_requires = []
with open('ironic_discoverd/__init__.py', 'rb') as fp:
with open('daisy_discoverd/__init__.py', 'rb') as fp:
exec(fp.read())
setup(
name = "ironic-discoverd",
name = "daisy-discoverd",
version = __version__,
description = open('README.rst', 'r').readline().strip(),
author = "Dmitry Tantsur",
author_email = "dtantsur@redhat.com",
url = "https://pypi.python.org/pypi/ironic-discoverd",
packages = ['ironic_discoverd', 'ironic_discoverd.plugins',
'ironic_discoverd.test'],
url = "https://pypi.python.org/pypi/daisy-discoverd",
packages = ['daisy_discoverd', 'daisy_discoverd.plugins',
'daisy_discoverd.test'],
install_requires = install_requires,
entry_points = {
'console_scripts': [
"ironic-discoverd = ironic_discoverd.main:main"
"daisy-discoverd = daisy_discoverd.main:main"
],
'ironic_discoverd.hooks': [
"scheduler = ironic_discoverd.plugins.standard:SchedulerHook",
"validate_interfaces = ironic_discoverd.plugins.standard:ValidateInterfacesHook",
"ramdisk_error = ironic_discoverd.plugins.standard:RamdiskErrorHook",
"example = ironic_discoverd.plugins.example:ExampleProcessingHook",
'daisy_discoverd.hooks': [
"scheduler = daisy_discoverd.plugins.standard:SchedulerHook",
"validate_interfaces = daisy_discoverd.plugins.standard:ValidateInterfacesHook",
"ramdisk_error = daisy_discoverd.plugins.standard:RamdiskErrorHook",
"example = daisy_discoverd.plugins.example:ExampleProcessingHook",
],
},
classifiers = [

View File

@ -7,7 +7,7 @@ deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
coverage run --branch --include "ironic_discoverd*" -m unittest discover ironic_discoverd.test
coverage run --branch --include "daisy_discoverd*" -m unittest discover daisy_discoverd.test
coverage report -m --fail-under 90
setenv = PYTHONDONTWRITEBYTECODE=1
@ -17,16 +17,13 @@ deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
flake8 ironic_discoverd
flake8 daisy_discoverd
rst2html.py --strict README.rst /dev/null
rst2html.py --strict CONTRIBUTING.rst /dev/null
[flake8]
max-complexity=15
[hacking]
import_exceptions = ironicclient.exceptions
[testenv:func]
basepython = python2.7
deps =

View File

@ -73,11 +73,11 @@ def build_pxe_server(eth_name, ip_address, build_pxe, net_mask,
pxe_dict['client_ip_begin'] = client_ip_begin
pxe_dict['client_ip_end'] = client_ip_end
LOG.info('pxe_dict=%s' % pxe_dict)
with open('/var/log/ironic/pxe.json', 'w') as f:
with open('/var/lib/daisy/pxe.json', 'w') as f:
json.dump(pxe_dict, f, indent=2)
f.close()
_PIPE = subprocess.PIPE
cmd = "/usr/bin/pxe_server_install /var/log/ironic/pxe.json && \
cmd = "/usr/bin/pxe_server_install /var/lib/daisy/pxe.json && \
chmod 755 /tftpboot -R"
try:
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE,
@ -121,12 +121,12 @@ def set_boot_or_power_state(user, passwd, addr, action):
def install_os(**kwargs):
json_file = "/var/log/ironic/%s.json" % kwargs['dhcp_mac']
json_file = "/var/lib/daisy/%s.json" % kwargs['dhcp_mac']
with open(json_file, 'w') as f:
json.dump(kwargs, f, indent=2)
f.close()
_PIPE = subprocess.PIPE
cmd = "/usr/bin/pxe_os_install /var/log/ironic/%s.json && \
cmd = "/usr/bin/pxe_os_install /var/lib/daisy/%s.json && \
chmod 755 /tftpboot -R && \
chmod 755 /home/install_share -R" % kwargs['dhcp_mac']
try:
@ -515,10 +515,10 @@ class OSInstall():
rc = set_boot_or_power_state(user, passwd, addr, action)
if rc == 0:
LOG.info(
_("Set %s to '%s' successfully for %s times by ironic" % (
_("Set %s to '%s' successfully for %s times by discov" % (
addr, action, count + 1)))
host_status = {'messages': "Set %s to '%s' successfully for "
"%s times by ironic" % (
"%s times by discov" % (
addr, action, count + 1)}
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
host_status)
@ -540,10 +540,10 @@ class OSInstall():
else:
count += 1
LOG.info(
_("Try setting %s to '%s' failed for %s times by ironic"
_("Try setting %s to '%s' failed for %s times by discov"
% (addr, action, count)))
host_status = {'messages': "Set %s to '%s' failed for "
"%s times by ironic" % (
"%s times by discov" % (
addr, action, count + 1)}
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
host_status)
@ -734,7 +734,7 @@ class OSInstall():
'messages': error}
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
host_status)
msg = "ironic install os return failed for host %s" % \
msg = "discov install os return failed for host %s" % \
host_detail['id']
raise exception.OSInstallFailed(message=msg)

View File

@ -55,7 +55,6 @@ ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
DISCOVER_DEFAULTS = {
'listen_port': '5050',
'ironic_url': 'http://127.0.0.1:6385/v1',
}
ML2_TYPE = [
@ -378,7 +377,7 @@ class Controller(controller.BaseController):
if os_status == "active":
msg = _(
'The host %s os_status is active,'
'forbidden ironic to add host.') % exist_id
'forbidden daisy-discoverd to add host.') % exist_id
LOG.error(msg)
raise HTTPBadRequest(explanation=msg)
host_meta['id'] = exist_id
@ -640,10 +639,10 @@ class Controller(controller.BaseController):
request=req,
content_type="text/plain")
for ironic_keyword in ['cpu', 'system', 'memory',
for discov_keyword in ['cpu', 'system', 'memory',
'pci', 'disks', 'devices']:
if host_meta.get(ironic_keyword):
host_meta[ironic_keyword] = eval(host_meta.get(ironic_keyword))
if host_meta.get(discov_keyword):
host_meta[discov_keyword] = eval(host_meta.get(discov_keyword))
host_meta = registry.add_host_metadata(req.context, host_meta)
@ -1898,11 +1897,11 @@ class Controller(controller.BaseController):
daisy_cmn.add_ssh_host_to_cluster_and_assigned_network(
req, host_meta['cluster'], id)
for ironic_keyword in ['cpu', 'system', 'memory',
for discov_keyword in ['cpu', 'system', 'memory',
'pci', 'disks', 'devices']:
if host_meta.get(ironic_keyword):
host_meta[ironic_keyword] = eval(
host_meta.get(ironic_keyword))
if host_meta.get(discov_keyword):
host_meta[discov_keyword] = eval(
host_meta.get(discov_keyword))
host_meta = registry.update_host_metadata(req.context, id,
host_meta)
@ -1986,7 +1985,7 @@ class Controller(controller.BaseController):
% (discover_host_meta['ip'],),
shell=True, stderr=subprocess.STDOUT)
if 'Failed connect to' in exc_result:
# when openstack-ironic-discoverd.service has problem
# when daisy-discoverd.service has problem
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = "Do getnodeinfo.sh %s failed!" \
@ -2130,7 +2129,7 @@ class Controller(controller.BaseController):
backend_driver.getnodeinfo_ip(daisy_management_ip)
config_discoverd = ConfigParser.ConfigParser(
defaults=DISCOVER_DEFAULTS)
config_discoverd.read("/etc/ironic-discoverd/discoverd.conf")
config_discoverd.read("/etc/daisy-discoverd/discoverd.conf")
listen_port = config_discoverd.get("discoverd", "listen_port")
if listen_port:
backends = get_backend()

View File

@ -80,7 +80,6 @@ DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD'
DISCOVER_DEFAULTS = {
'listen_port': '5050',
'ironic_url': 'http://127.0.0.1:6385/v1',
}
SUPPORT_BACKENDS = ['proton', 'zenic', 'tecs', 'kolla']

View File

@ -365,7 +365,7 @@ class Controller(object):
LOG.error(msg)
return exc.HTTPBadRequest(msg)
# TODO delete ironic host by mac
# TODO delete discovered host by mac
return dict(host=deleted_host)
except exception.ForbiddenPublicImage:
msg = _LI("Delete denied for public host %(id)s") % {'id': id}

View File

@ -222,7 +222,7 @@ class TestVcpuPin(test.TestCase):
dvs_high = list(set(numa_node1) - set([18, 19, 20, 21, 22, 23]))
dvs_cpusets = {'high': numa_node1, 'low': numa_node0,
'dvs': {'dvsc': [20, 21], 'dvsp': [22, 23],
'dvsc': [18, 19]}}
'dvsv': [18, 19]}}
roles_name = ['COMPUTER']
os_cpus = vcpu_pin.allocate_os_cpus(roles_name,

View File

@ -42,5 +42,5 @@ commands = python setup.py build_sphinx
# H404 multi line docstring should start with a summary
# H405 multi line docstring summary not separated with an empty line
# H904 Wrap long lines in parentheses instead of a backslash
ignore = E711,E712,H302,H402,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703
ignore = E711,E712,H302,H402,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703,F999
exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build,daisy/db/sqlalchemy/api.py,daisy/i18n.py

View File

@ -36,6 +36,6 @@ downloadcache = ~/cache/pip
# H302 import only modules
# H303 no wildcard import
# H404 multi line docstring should start with a summary
ignore = F403,F812,F821,H233,H302,H303,H404,F841,F401,E731,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703
ignore = F403,F812,F821,H233,H302,H303,H404,F841,F401,E731,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703,F999
show-source = True
exclude = .venv,.tox,dist,doc,*egg,build

View File

@ -146,14 +146,14 @@ def get_format_memory_size(str_memory):
def get_suggest_os_cpus():
# TO DO
# get suggest os cpu core number of host from ironic
# get suggest os cpu core number of host from discov
# the default "1" is minimum mumber,so we choose it
return "1"
def get_suggest_dvs_cpus():
# TO DO
# get suggest dvs cpu core number of host from ironic
# get suggest dvs cpu core number of host from discov
# the default "1" is minimum mumber,so we choose it
return "1"

View File

@ -59,7 +59,7 @@ downloadcache = ~/cache/pip
[flake8]
exclude = .venv,.git,.tox,dist,*openstack/common*,*lib/python*,*egg,build,panel_template,dash_template,local_settings.py,*/local/*,*/test/test_plugins/*,.ropeproject
# H405 multi line docstring summary not separated with an empty line
ignore = H405,F821,F841,C901,E731,F405,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703
ignore = H405,F821,F841,C901,E731,F405,H101,H201,H231,H233,H237,H238,H301,H306,H401,H403,H701,H702,H703,F999
max-complexity = 20
[hacking]

View File

@ -1,83 +0,0 @@
=================
How To Contribute
=================
Basics
~~~~~~
* Our source code is hosted on StackForge_ GitHub, but please do not send pull
requests there.
* Please follow usual OpenStack `Gerrit Workflow`_ to submit a patch.
* Update change log in README.rst on any significant change.
* It goes without saying that any code change should by accompanied by unit
tests.
* Note the branch you're proposing changes to. ``master`` is the current focus
of development, use ``stable/VERSION`` for proposing an urgent fix, where
``VERSION`` is the current stable series. E.g. at the moment of writing the
stable branch is ``stable/0.2``.
* Please file a launchpad_ blueprint for any significant code change and a bug
for any significant bug fix.
.. _StackForge: https://github.com/stackforge/ironic-discoverd
.. _Gerrit Workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow
.. _launchpad: https://bugs.launchpad.net/ironic-discoverd
Development Environment
~~~~~~~~~~~~~~~~~~~~~~~
First of all, install *tox* utility. It's likely to be in your distribution
repositories under name of ``python-tox``. Alternatively, you can install it
from PyPI.
Next checkout and create environments::
git clone https://github.com/stackforge/ironic-discoverd.git
cd ironic-discoverd
tox
Repeat *tox* command each time you need to run tests. If you don't have Python
interpreter of one of supported versions (currently 2.7 and 3.3), use
``-e`` flag to select only some environments, e.g.
::
tox -e py27
.. note::
Support for Python 3 is highly experimental, stay with Python 2 for the
production environment for now.
There is a simple functional test that involves fetching the ramdisk from
Github::
tox -e func
Run the service with::
.tox/py27/bin/ironic-discoverd --config-file example.conf
Of course you may have to modify ``example.conf`` to match your OpenStack
environment.
Writing a Plugin
~~~~~~~~~~~~~~~~
**ironic-discoverd** allows to hook your code into data processing chain after
introspection. Inherit ``ProcessingHook`` class defined in
`ironic_discoverd.plugins.base
<https://github.com/stackforge/ironic-discoverd/blob/master/ironic_discoverd/plugins/base.py>`_
module and overwrite any or both of the following methods:
``before_processing(node_info)``
called before any data processing, providing the raw data. Each plugin in
the chain can modify the data, so order in which plugins are loaded
matters here. Returns nothing.
``before_update(node,ports,node_info)``
called after node is found and ports are created, but before data is
updated on a node. Returns JSON patches for node and ports to apply.
Please refer to the docstring for details and examples.

View File

@ -1,448 +0,0 @@
Hardware introspection for OpenStack Ironic
===========================================
This is an auxiliary service for discovering hardware properties for a
node managed by `OpenStack Ironic`_. Hardware introspection or hardware
properties discovery is a process of getting hardware parameters required for
scheduling from a bare metal node, given it's power management credentials
(e.g. IPMI address, user name and password).
A special *discovery ramdisk* is required to collect the information on a
node. The default one can be built using diskimage-builder_ and
`ironic-discoverd-ramdisk element`_ (see Configuration_ below).
Support for **ironic-discoverd** is present in `Tuskar UI`_ --
OpenStack Horizon plugin for TripleO_.
**ironic-discoverd** requires OpenStack Juno (2014.2) release or newer.
Please use launchpad_ to report bugs and ask questions. Use PyPI_ for
downloads and accessing the released version of this README. Refer to
CONTRIBUTING.rst_ for instructions on how to contribute.
.. _OpenStack Ironic: https://wiki.openstack.org/wiki/Ironic
.. _Tuskar UI: https://pypi.python.org/pypi/tuskar-ui
.. _TripleO: https://wiki.openstack.org/wiki/TripleO
.. _launchpad: https://bugs.launchpad.net/ironic-discoverd
.. _PyPI: https://pypi.python.org/pypi/ironic-discoverd
.. _CONTRIBUTING.rst: https://github.com/stackforge/ironic-discoverd/blob/master/CONTRIBUTING.rst
Workflow
--------
Usual hardware introspection flow is as follows:
* Operator installs undercloud with **ironic-discoverd**
(e.g. using instack-undercloud_).
* Operator enrolls nodes into Ironic either manually or by uploading CSV file
to `Tuskar UI`_. Power management credentials should be provided to Ironic
at this step.
* Operator sends nodes on introspection either manually using
**ironic-discoverd** `HTTP API`_ or again via `Tuskar UI`_.
* On receiving node UUID **ironic-discoverd**:
* validates node power credentials, current power and provisioning states,
* allows firewall access to PXE boot service for the nodes,
* issues reboot command for the nodes, so that they boot the
discovery ramdisk.
* The discovery ramdisk collects the required information and posts it back
to **ironic-discoverd**.
* On receiving data from the discovery ramdisk, **ironic-discoverd**:
* validates received data,
* finds the node in Ironic database using it's BMC address (MAC address in
case of SSH driver),
* fills missing node properties with received data and creates missing ports.
* Separate `HTTP API`_ can be used to query introspection results for a given
node.
Starting DHCP server and configuring PXE boot environment is not part of this
package and should be done separately.
.. _instack-undercloud: https://openstack.redhat.com/Deploying_an_RDO_Undercloud_with_Instack
Installation
------------
**ironic-discoverd** is available as an RPM from Fedora 22 repositories or from
Juno RDO_ for Fedora 20, 21 and EPEL 7. It will be installed and preconfigured
if you used instack-undercloud_ to build your undercloud.
Otherwise after enabling required repositories install it using::
yum install openstack-ironic-discoverd
Alternatively (e.g. if you need the latest version), you can install package
from PyPI_ (you may want to use virtualenv to isolate your environment)::
pip install ironic-discoverd
The third way for RPM-based distros is to use `ironic-discoverd copr`_ which
contains **unstable** git snapshots of **ironic-discoverd**.
.. _RDO: https://openstack.redhat.com/
.. _ironic-discoverd copr: https://copr.fedoraproject.org/coprs/divius/ironic-discoverd/
Configuration
~~~~~~~~~~~~~
Copy ``example.conf`` to some permanent place
(``/etc/ironic-discoverd/discoverd.conf`` is what is used in the RPM).
Fill in at least configuration values with names starting with ``os_`` and
``identity_uri``. They configure how **ironic-discoverd** authenticates
with Keystone and checks authentication of clients.
Also set *database* option to where you want **ironic-discoverd** SQLite
database to be placed.
See comments inside `example.conf
<https://github.com/stackforge/ironic-discoverd/blob/master/example.conf>`_
for the other possible configuration options.
.. note::
Configuration file contains a password and thus should be owned by ``root``
and should have access rights like ``0600``.
As for PXE boot environment, you'll need:
* TFTP server running and accessible (see below for using *dnsmasq*).
Ensure ``pxelinux.0`` is present in the TFTP root.
* Build and put into your TFTP directory kernel and ramdisk from the
diskimage-builder_ `ironic-discoverd-ramdisk element`_::
ramdisk-image-create -o discovery fedora ironic-discoverd-ramdisk
You need diskimage-builder_ 0.1.38 or newer to do it.
* You need PXE boot server (e.g. *dnsmasq*) running on **the same** machine as
**ironic-discoverd**. Don't do any firewall configuration:
**ironic-discoverd** will handle it for you. In **ironic-discoverd**
configuration file set ``dnsmasq_interface`` to the interface your
PXE boot server listens on. Here is an example *dnsmasq.conf*::
port=0
interface={INTERFACE}
bind-interfaces
dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150}
enable-tftp
tftp-root={TFTP ROOT, e.g. /tftpboot}
dhcp-boot=pxelinux.0
* Configure your ``$TFTPROOT/pxelinux.cfg/default`` with something like::
default discover
label discover
kernel discovery.kernel
append initrd=discovery.initramfs discoverd_callback_url=http://{IP}:5050/v1/continue
ipappend 3
Replace ``{IP}`` with IP of the machine (do not use loopback interface, it
will be accessed by ramdisk on a booting machine).
.. note::
There are some prebuilt images which use obsolete ``ironic_callback_url``
instead of ``discoverd_callback_url``. Modify ``pxelinux.cfg/default``
accordingly if you have one of these.
Here is *discoverd.conf* you may end up with::
[discoverd]
debug = false
identity_uri = http://127.0.0.1:35357
os_auth_url = http://127.0.0.1:5000/v2.0
os_username = admin
os_password = password
os_tenant_name = admin
dnsmasq_interface = br-ctlplane
.. note::
Set ``debug = true`` if you want to see complete logs.
.. _diskimage-builder: https://github.com/openstack/diskimage-builder
.. _ironic-discoverd-ramdisk element: https://github.com/openstack/diskimage-builder/tree/master/elements/ironic-discoverd-ramdisk
Running
~~~~~~~
If you installed **ironic-discoverd** from the RPM, you already have
a *systemd* unit, so you can::
systemctl enable openstack-ironic-discoverd
systemctl start openstack-ironic-discoverd
Otherwise run as ``root``::
ironic-discoverd --config-file /etc/ironic-discoverd/discoverd.conf
.. note::
Running as ``root`` is not required if **ironic-discoverd** does not
manage the firewall (i.e. ``manage_firewall`` is set to ``false`` in the
configuration file).
A good starting point for writing your own *systemd* unit should be `one used
in Fedora <http://pkgs.fedoraproject.org/cgit/openstack-ironic-discoverd.git/plain/openstack-ironic-discoverd.service>`_.
Usage
-----
**ironic-discoverd** has a simple client library bundled within it.
It provides functions:
* ``ironic_discoverd.client.introspect`` for starting introspection
* ``ironic_discoverd.client.get_status`` for querying introspection status
both accepting:
``uuid``
node UUID
``base_url``
optional **ironic-discoverd** service URL (defaults to ``127.0.0.1:5050``)
``auth_token``
optional Keystone token.
For testing purposes you can also use it from CLI::
python -m ironic_discoverd.client --auth-token TOKEN introspect UUID
python -m ironic_discoverd.client --auth-token TOKEN get_status UUID
.. note::
This CLI interface is not stable and may be changed without prior notice.
Proper supported CLI is `expected later
<https://bugs.launchpad.net/ironic-discoverd/+bug/1410180>`_.
HTTP API
~~~~~~~~
By default **ironic-discoverd** listens on ``0.0.0.0:5050``, port
can be changed in configuration. Protocol is JSON over HTTP.
The HTTP API consist of these endpoints:
* ``POST /v1/introspection/<UUID>`` initiate hardware discovery for node
``<UUID>``. All power management configuration for this node needs to be done
prior to calling the endpoint.
Requires X-Auth-Token header with Keystone token for authentication.
Response:
* 202 - accepted discovery request
* 400 - bad request
* 401, 403 - missing or invalid authentication
* 404 - node cannot be found
Client library function: ``ironic_discoverd.client.introspect``.
* ``GET /v1/introspection/<UUID>`` get hardware discovery status.
Requires X-Auth-Token header with Keystone token for authentication.
Response:
* 200 - OK
* 400 - bad request
* 401, 403 - missing or invalid authentication
* 404 - node cannot be found
Response body: JSON dictionary with keys:
* ``finished`` (boolean) whether discovery is finished
* ``error`` error string or ``null``
Client library function: ``ironic_discoverd.client.get_status``.
* ``POST /v1/continue`` internal endpoint for the discovery ramdisk to post
back discovered data. Should not be used for anything other than implementing
the ramdisk. Request body: JSON dictionary with at least these keys:
* ``cpus`` number of CPU
* ``cpu_arch`` architecture of the CPU
* ``memory_mb`` RAM in MiB
* ``local_gb`` hard drive size in GiB
* ``interfaces`` dictionary filled with data from all NIC's, keys being
interface names, values being dictionaries with keys:
* ``mac`` MAC address
* ``ip`` IP address
.. note::
This list highly depends on enabled plugins, provided above are
expected keys for the default set of plugins. See Plugins_ for details.
Response:
* 200 - OK
* 400 - bad request
* 403 - node is not on introspection
* 404 - node cannot be found or multiple nodes found
Plugins
~~~~~~~
**ironic-discoverd** heavily relies on plugins for data processing. Even the
standard functionality is largely based on plugins. Set ``processing_hooks``
option in the configuration file to change the set of plugins to be run on
introspection data. Note that order does matter in this option.
These are plugins that are enabled by default and should not be disabled,
unless you understand what you're doing:
``scheduler``
validates and updates basic hardware scheduling properties: CPU number and
architecture, memory and disk size.
``validate_interfaces``
validates network interfaces information.
Here are some plugins that can be additionally enabled:
``ramdisk_error``
reports error, if ``error`` field is set by the ramdisk.
``example``
example plugin logging it's input and output.
Refer to CONTRIBUTING.rst_ for information on how to write your own plugin.
Release Notes
-------------
1.0 Series
~~~~~~~~~~
1.0 is the first feature-complete release series. It's also the first series
to follow standard OpenStack processes from the beginning. All 0.2 series
users are advised to upgrade.
See `1.0.0 release tracking page`_ for details.
**1.0.1 release**
This maintenance fixed serious problem with authentication and unfortunately
brought new upgrade requirements:
* Dependency on *keystonemiddleware*;
* New configuration option ``identity_uri``, defaulting to localhost.
**Upgrade notes**
Action required:
* Fill in ``database`` option in the configuration file before upgrading.
* Stop relying on **ironic-discoverd** setting maintenance mode itself.
* Stop relying on ``discovery_timestamp`` node extra field.
* Fill in ``identity_uri`` field in the configuration.
Action recommended:
* Switch your init scripts to use ``ironic-discoverd --config-file <path>``
instead of just ``ironic-discoverd <path>``.
* Stop relying on ``on_discovery`` and ``newly_discovered`` being set in node
``extra`` field during and after introspection. Use new get status HTTP
endpoint and client API instead.
* Switch from ``discover`` to ``introspect`` HTTP endpoint and client API.
**Major features**
* Introspection now times out by default, set ``timeout`` option to alter.
* New API ``GET /v1/introspection/<uuid>`` and ``client.get_status`` for
getting discovery status.
See `get-status-api blueprint`_ for details.
* New API ``POST /v1/introspection/<uuid>`` and ``client.introspect``
is now used to initiate discovery, ``/v1/discover`` is deprecated.
See `v1 API reform blueprint`_ for details.
* ``/v1/continue`` is now sync:
* Errors are properly returned to the caller
* This call now returns value as a JSON dict (currently empty)
* Add support for plugins that hook into data processing pipeline. Refer to
Plugins_ for information on bundled plugins and to CONTRIBUTING.rst_ for
information on how to write your own.
See `plugin-architecture blueprint`_ for details.
* Support for OpenStack Kilo release and new Ironic state machine -
see `Kilo state machine blueprint`_.
As a side effect, no longer depend on maintenance mode for introspection.
Stop putting node in maintenance mode before introspection.
* Cache nodes under introspection in a local SQLite database.
``database`` configuration option sets where to place this database.
Improves performance by making less calls to Ironic API and makes possible
to get results of introspection.
**Other Changes**
* Firewall management can be disabled completely via ``manage_firewall``
option.
* Experimental support for updating IPMI credentials from within ramdisk.
Enable via configuration option ``enable_setting_ipmi_credentials``.
Beware that this feature lacks proper testing, is not supported
officially yet and is subject to changes without keeping backward
compatibility.
See `setup-ipmi-credentials blueprint`_ for details.
**Known Issues**
* `bug 1415040 <https://bugs.launchpad.net/ironic-discoverd/+bug/1415040>`_
it is required to set IP addresses instead of host names in
``ipmi_address``/``ilo_address``/``drac_host`` node ``driver_info`` field
for **ironic-discoverd** to work properly.
.. _1.0.0 release tracking page: https://bugs.launchpad.net/ironic-discoverd/+milestone/1.0.0
.. _setup-ipmi-credentials blueprint: https://blueprints.launchpad.net/ironic-discoverd/+spec/setup-ipmi-credentials
.. _plugin-architecture blueprint: https://blueprints.launchpad.net/ironic-discoverd/+spec/plugin-architecture
.. _get-status-api blueprint: https://blueprints.launchpad.net/ironic-discoverd/+spec/get-status-api
.. _Kilo state machine blueprint: https://blueprints.launchpad.net/ironic-discoverd/+spec/kilo-state-machine
.. _v1 API reform blueprint: https://blueprints.launchpad.net/ironic-discoverd/+spec/v1-api-reform
0.2 Series
~~~~~~~~~~
0.2 series is designed to work with OpenStack Juno release.
The major changes are:
**API**
* Authentication via Keystone for ``/v1/discover``.
* Expect ``interfaces`` instead of ``macs`` in post-back from the ramdisk
**[version 0.2.1]**.
* If ``interfaces`` is present, only add ports for NIC's with IP address set
**[version 0.2.1]**.
* ``/v1/discover`` now does some sync sanity checks **[version 0.2.2]**.
* Nodes will be always put into maintenance mode before discovery
**[version 0.2.1]**.
**Configuration**
* Periodic firewall update is now configurable.
* On each start-up make several attempts to check that Ironic is available
**[version 0.2.2]**.
**Misc**
* Simple client in ``ironic_discoverd.client``.
* Preliminary supported for Python 3.3 (real support depends on Eventlet).
0.1 Series
~~~~~~~~~~
First stable release series. Not supported any more.

View File

@ -1,20 +0,0 @@
.\" Manpage for ironic-discoverd.
.TH man 8 "08 Oct 2014" "1.0" "ironic-discoverd man page"
.SH NAME
ironic-discoverd \- hardware discovery daemon for OpenStack Ironic.
.SH SYNOPSIS
ironic-discoverd CONFFILE
.SH DESCRIPTION
This command starts ironic-discoverd service, which starts and finishes
hardware discovery and maintains firewall rules for nodes accessing PXE
boot service (usually dnsmasq).
.SH OPTIONS
The ironic-discoverd does not take any options. However, you should supply
path to the configuration file.
.SH SEE ALSO
README page located at https://pypi.python.org/pypi/ironic-discoverd
provides some information about how to configure and use the service.
.SH BUGS
No known bugs.
.SH AUTHOR
Dmitry Tantsur (divius.inside@gmail.com)

View File

@ -1,117 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
from eventlet import semaphore
from ironic_discoverd import conf
from ironic_discoverd import node_cache
from ironic_discoverd import utils
LOG = logging.getLogger("ironic_discoverd.firewall")
NEW_CHAIN = 'discovery_temp'
CHAIN = 'discovery'
INTERFACE = None
LOCK = semaphore.BoundedSemaphore()
def _iptables(*args, **kwargs):
cmd = ('iptables',) + args
ignore = kwargs.pop('ignore', False)
LOG.debug('Running iptables %s', args)
kwargs['stderr'] = subprocess.STDOUT
try:
subprocess.check_output(cmd, **kwargs)
except subprocess.CalledProcessError as exc:
if ignore:
LOG.debug('ignoring failed iptables %s:\n%s', args, exc.output)
else:
LOG.error('iptables %s failed:\n%s', args, exc.output)
raise
def init():
"""Initialize firewall management.
Must be called one on start-up.
"""
if not conf.getboolean('discoverd', 'manage_firewall'):
return
global INTERFACE
INTERFACE = conf.get('discoverd', 'dnsmasq_interface')
_clean_up(CHAIN)
# Not really needed, but helps to validate that we have access to iptables
_iptables('-N', CHAIN)
def _clean_up(chain):
_iptables('-D', 'INPUT', '-i', INTERFACE, '-p', 'udp',
'--dport', '67', '-j', chain,
ignore=True)
_iptables('-F', chain, ignore=True)
_iptables('-X', chain, ignore=True)
def update_filters(ironic=None):
"""Update firewall filter rules for introspection.
Gives access to PXE boot port for any machine, except for those,
whose MAC is registered in Ironic and is not on introspection right now.
This function is called from both introspection initialization code and
from periodic task. This function is supposed to be resistant to unexpected
iptables state.
``init()`` function must be called once before any call to this function.
This function is using ``eventlet`` semaphore to serialize access from
different green threads.
Does nothing, if firewall management is disabled in configuration.
:param ironic: Ironic client instance, optional.
"""
if not conf.getboolean('discoverd', 'manage_firewall'):
return
assert INTERFACE is not None
ironic = utils.get_client() if ironic is None else ironic
with LOCK:
macs_active = set(p.address for p in ironic.port.list(limit=0))
to_blacklist = macs_active - node_cache.active_macs()
LOG.debug('Blacklisting active MAC\'s %s', to_blacklist)
# Clean up a bit to account for possible troubles on previous run
_clean_up(NEW_CHAIN)
# Operate on temporary chain
_iptables('-N', NEW_CHAIN)
# - Blacklist active macs, so that nova can boot them
for mac in to_blacklist:
_iptables('-A', NEW_CHAIN, '-m', 'mac',
'--mac-source', mac, '-j', 'DROP')
# - Whitelist everything else
_iptables('-A', NEW_CHAIN, '-j', 'ACCEPT')
# Swap chains
_iptables('-I', 'INPUT', '-i', INTERFACE, '-p', 'udp',
'--dport', '67', '-j', NEW_CHAIN)
_iptables('-D', 'INPUT', '-i', INTERFACE, '-p', 'udp',
'--dport', '67', '-j', CHAIN,
ignore=True)
_iptables('-F', CHAIN, ignore=True)
_iptables('-X', CHAIN, ignore=True)
_iptables('-E', NEW_CHAIN, CHAIN)

View File

@ -1,294 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from ironicclient import exceptions
import mock
from ironic_discoverd import conf
from ironic_discoverd import firewall
from ironic_discoverd import introspect
from ironic_discoverd import node_cache
from ironic_discoverd.test import base as test_base
from ironic_discoverd import utils
@mock.patch.object(eventlet.greenthread, 'sleep', lambda _: None)
@mock.patch.object(eventlet.greenthread, 'spawn_n',
lambda f, *a, **kw: f(*a, **kw) and None)
@mock.patch.object(firewall, 'update_filters', autospec=True)
@mock.patch.object(node_cache, 'add_node', autospec=True)
@mock.patch.object(utils, 'get_client', autospec=True)
class TestIntrospect(test_base.NodeTest):
def setUp(self):
super(TestIntrospect, self).setUp()
self.node.power_state = 'power off'
self.node_compat = mock.Mock(driver='pxe_ssh',
uuid='uuid_compat',
driver_info={},
maintenance=True,
# allowed with maintenance=True
power_state='power on',
provision_state='foobar',
extra={'on_discovery': True})
self.ports = [mock.Mock(address=m) for m in self.macs]
self.patch = [{'op': 'add', 'path': '/extra/on_discovery',
'value': 'true'}]
self.cached_node = mock.Mock(uuid=self.uuid)
def test_ok(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.return_value = mock.Mock(power={'result': True})
cli.node.list_ports.return_value = self.ports
add_mock.return_value = self.cached_node
introspect.introspect(self.node.uuid)
cli.node.get.assert_called_once_with(self.uuid)
cli.node.validate.assert_called_once_with(self.uuid)
cli.node.list_ports.assert_called_once_with(self.uuid, limit=0)
cli.node.update.assert_called_once_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
filters_mock.assert_called_with(cli)
cli.node.set_boot_device.assert_called_once_with(self.uuid,
'pxe',
persistent=False)
cli.node.set_power_state.assert_called_once_with(self.uuid,
'reboot')
add_mock.return_value.set_option.assert_called_once_with(
'setup_ipmi_credentials', False)
def test_ok_ilo_and_drac(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.return_value = mock.Mock(power={'result': True})
cli.node.list_ports.return_value = self.ports
add_mock.return_value = self.cached_node
for name in ('ilo_address', 'drac_host'):
self.node.driver_info = {name: self.bmc_address}
introspect.introspect(self.node.uuid)
add_mock.assert_called_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
def test_retries(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.side_effect = [exceptions.Conflict,
mock.Mock(power={'result': True})]
cli.node.list_ports.return_value = self.ports
cli.node.update.side_effect = [exceptions.Conflict,
exceptions.Conflict,
None]
cli.node.set_boot_device.side_effect = [exceptions.Conflict,
None]
cli.node.set_power_state.side_effect = [exceptions.Conflict,
None]
add_mock.return_value = self.cached_node
introspect.introspect(self.node.uuid)
cli.node.get.assert_called_once_with(self.uuid)
cli.node.validate.assert_called_with(self.uuid)
cli.node.list_ports.assert_called_once_with(self.uuid, limit=0)
cli.node.update.assert_called_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
filters_mock.assert_called_with(cli)
cli.node.set_boot_device.assert_called_with(self.uuid,
'pxe',
persistent=False)
cli.node.set_power_state.assert_called_with(self.uuid,
'reboot')
def test_power_failure(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.return_value = mock.Mock(power={'result': True})
cli.node.list_ports.return_value = self.ports
cli.node.set_boot_device.side_effect = exceptions.BadRequest()
cli.node.set_power_state.side_effect = exceptions.BadRequest()
add_mock.return_value = self.cached_node
introspect.introspect(self.node.uuid)
cli.node.get.assert_called_once_with(self.uuid)
cli.node.update.assert_called_once_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
cli.node.set_boot_device.assert_called_once_with(self.uuid,
'pxe',
persistent=False)
cli.node.set_power_state.assert_called_once_with(self.uuid,
'reboot')
add_mock.return_value.finished.assert_called_once_with(
error=mock.ANY)
def test_unexpected_error(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.return_value = mock.Mock(power={'result': True})
cli.node.list_ports.return_value = self.ports
add_mock.return_value = self.cached_node
filters_mock.side_effect = RuntimeError()
introspect.introspect(self.node.uuid)
cli.node.get.assert_called_once_with(self.uuid)
cli.node.update.assert_called_once_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
self.assertFalse(cli.node.set_boot_device.called)
add_mock.return_value.finished.assert_called_once_with(
error=mock.ANY)
def test_juno_compat(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node_compat
cli.node.validate.return_value = mock.Mock(power={'result': True})
cli.node.list_ports.return_value = self.ports
add_mock.return_value = mock.Mock(uuid=self.node_compat.uuid)
introspect.introspect(self.node_compat.uuid)
cli.node.get.assert_called_once_with(self.node_compat.uuid)
cli.node.validate.assert_called_once_with(self.node_compat.uuid)
cli.node.list_ports.assert_called_once_with(self.node_compat.uuid,
limit=0)
cli.node.update.assert_called_once_with(self.node_compat.uuid,
self.patch)
add_mock.assert_called_once_with(self.node_compat.uuid,
bmc_address=None,
mac=self.macs)
filters_mock.assert_called_with(cli)
cli.node.set_boot_device.assert_called_once_with(self.node_compat.uuid,
'pxe',
persistent=False)
cli.node.set_power_state.assert_called_once_with(self.node_compat.uuid,
'reboot')
def test_no_macs(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.list_ports.return_value = []
add_mock.return_value = self.cached_node
introspect.introspect(self.node.uuid)
cli.node.list_ports.assert_called_once_with(self.uuid, limit=0)
cli.node.update.assert_called_once_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=[])
self.assertFalse(filters_mock.called)
cli.node.set_boot_device.assert_called_once_with(self.uuid,
'pxe',
persistent=False)
cli.node.set_power_state.assert_called_once_with(self.uuid,
'reboot')
def test_setup_ipmi_credentials(self, client_mock, add_mock, filters_mock):
conf.CONF.set('discoverd', 'enable_setting_ipmi_credentials', 'true')
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.list_ports.return_value = self.ports
cli.node.validate.side_effect = Exception()
introspect.introspect(self.uuid, setup_ipmi_credentials=True)
cli.node.update.assert_called_once_with(self.uuid, self.patch)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address,
mac=self.macs)
filters_mock.assert_called_with(cli)
self.assertFalse(cli.node.set_boot_device.called)
self.assertFalse(cli.node.set_power_state.called)
def test_setup_ipmi_credentials_disabled(self, client_mock, add_mock,
filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.list_ports.return_value = []
cli.node.validate.side_effect = Exception()
self.assertRaisesRegexp(utils.Error, 'disabled',
introspect.introspect, self.uuid,
setup_ipmi_credentials=True)
def test_failed_to_get_node(self, client_mock, add_mock, filters_mock):
cli = client_mock.return_value
cli.node.get.side_effect = exceptions.NotFound()
self.assertRaisesRegexp(utils.Error,
'Cannot find node',
introspect.introspect, self.uuid)
cli.node.get.side_effect = exceptions.BadRequest()
self.assertRaisesRegexp(utils.Error,
'Cannot get node',
introspect.introspect, self.uuid)
self.assertEqual(0, cli.node.list_ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertEqual(0, cli.node.update.call_count)
self.assertFalse(add_mock.called)
def test_failed_to_validate_node(self, client_mock, add_mock,
filters_mock):
cli = client_mock.return_value
cli.node.get.return_value = self.node
cli.node.validate.return_value = mock.Mock(power={'result': False,
'reason': 'oops'})
self.assertRaisesRegexp(
utils.Error,
'Failed validation of power interface for node',
introspect.introspect, self.uuid)
cli.node.validate.assert_called_once_with(self.uuid)
self.assertEqual(0, cli.node.list_ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertEqual(0, cli.node.update.call_count)
self.assertFalse(add_mock.called)
def test_wrong_provision_state(self, client_mock, add_mock, filters_mock):
self.node.provision_state = 'active'
cli = client_mock.return_value
cli.node.get.return_value = self.node
self.assertRaisesRegexp(
utils.Error,
'node uuid with provision state "active"',
introspect.introspect, self.uuid)
self.assertEqual(0, cli.node.list_ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertEqual(0, cli.node.update.call_count)
self.assertFalse(add_mock.called)

View File

@ -1,417 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
import eventlet
from ironicclient import exceptions
import mock
from ironic_discoverd import conf
from ironic_discoverd import firewall
from ironic_discoverd import node_cache
from ironic_discoverd.plugins import example as example_plugin
from ironic_discoverd import process
from ironic_discoverd.test import base as test_base
from ironic_discoverd import utils
class BaseTest(test_base.NodeTest):
def setUp(self):
super(BaseTest, self).setUp()
conf.CONF.set('discoverd', 'processing_hooks',
'ramdisk_error,scheduler,validate_interfaces')
self.started_at = time.time()
self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD']
self.data = {
'ipmi_address': self.bmc_address,
'cpus': 2,
'cpu_arch': 'x86_64',
'memory_mb': 1024,
'local_gb': 20,
'interfaces': {
'em1': {'mac': self.macs[0], 'ip': '1.2.0.1'},
'em2': {'mac': self.macs[1], 'ip': '1.2.0.2'},
'em3': {'mac': self.all_macs[2]},
}
}
self.ports = [
mock.Mock(uuid='port_uuid%d' % i, address=mac)
for i, mac in enumerate(self.macs)
]
@mock.patch.object(process, '_process_node', autospec=True)
@mock.patch.object(node_cache, 'find_node', autospec=True)
@mock.patch.object(utils, 'get_client', autospec=True)
class TestProcess(BaseTest):
def setUp(self):
super(TestProcess, self).setUp()
self.fake_result_json = 'node json'
def prepare_mocks(func):
@functools.wraps(func)
def wrapper(self, client_mock, pop_mock, process_mock, *args, **kw):
cli = client_mock.return_value
pop_mock.return_value = node_cache.NodeInfo(
uuid=self.node.uuid,
started_at=self.started_at)
cli.port.create.side_effect = self.ports
cli.node.get.return_value = self.node
process_mock.return_value = self.fake_result_json
return func(self, cli, pop_mock, process_mock, *args, **kw)
return wrapper
@prepare_mocks
def test_ok(self, cli, pop_mock, process_mock):
res = process.process(self.data)
self.assertEqual(self.fake_result_json, res)
# By default interfaces w/o IP are dropped
self.assertEqual(['em1', 'em2'], sorted(self.data['interfaces']))
self.assertEqual(self.macs, sorted(self.data['macs']))
pop_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=self.data['macs'])
cli.node.get.assert_called_once_with(self.uuid)
process_mock.assert_called_once_with(cli, cli.node.get.return_value,
self.data, pop_mock.return_value)
@prepare_mocks
def test_no_ipmi(self, cli, pop_mock, process_mock):
del self.data['ipmi_address']
process.process(self.data)
pop_mock.assert_called_once_with(bmc_address=None,
mac=self.data['macs'])
cli.node.get.assert_called_once_with(self.uuid)
process_mock.assert_called_once_with(cli, cli.node.get.return_value,
self.data, pop_mock.return_value)
@prepare_mocks
def test_deprecated_macs(self, cli, pop_mock, process_mock):
del self.data['interfaces']
self.data['macs'] = self.macs
process.process(self.data)
self.assertEqual(self.macs, sorted(i['mac'] for i in
self.data['interfaces'].values()))
self.assertEqual(self.macs, sorted(self.data['macs']))
pop_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=self.data['macs'])
cli.node.get.assert_called_once_with(self.uuid)
process_mock.assert_called_once_with(cli, cli.node.get.return_value,
self.data, pop_mock.return_value)
@prepare_mocks
def test_ports_for_inactive(self, cli, pop_mock, process_mock):
conf.CONF.set('discoverd', 'ports_for_inactive_interfaces', 'true')
process.process(self.data)
self.assertEqual(['em1', 'em2', 'em3'],
sorted(self.data['interfaces']))
self.assertEqual(self.all_macs, sorted(self.data['macs']))
pop_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=self.data['macs'])
cli.node.get.assert_called_once_with(self.uuid)
process_mock.assert_called_once_with(cli, cli.node.get.return_value,
self.data, pop_mock.return_value)
@prepare_mocks
def test_invalid_interfaces(self, cli, pop_mock, process_mock):
self.data['interfaces'] = {
'br1': {'mac': 'broken', 'ip': '1.2.0.1'},
'br2': {'mac': '', 'ip': '1.2.0.2'},
'br3': {},
}
process.process(self.data)
self.assertEqual({}, self.data['interfaces'])
self.assertEqual([], self.data['macs'])
pop_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=[])
cli.node.get.assert_called_once_with(self.uuid)
process_mock.assert_called_once_with(cli, cli.node.get.return_value,
self.data, pop_mock.return_value)
@prepare_mocks
def test_error(self, cli, pop_mock, process_mock):
self.data['error'] = 'BOOM'
self.assertRaisesRegexp(utils.Error,
'BOOM',
process.process, self.data)
self.assertFalse(process_mock.called)
@prepare_mocks
def test_missing_required(self, cli, pop_mock, process_mock):
del self.data['cpus']
self.assertRaisesRegexp(utils.Error,
'missing',
process.process, self.data)
self.assertFalse(process_mock.called)
@prepare_mocks
def test_not_found_in_cache(self, cli, pop_mock, process_mock):
pop_mock.side_effect = utils.Error('not found')
self.assertRaisesRegexp(utils.Error,
'not found',
process.process, self.data)
self.assertFalse(cli.node.get.called)
self.assertFalse(process_mock.called)
@prepare_mocks
def test_not_found_in_ironic(self, cli, pop_mock, process_mock):
cli.node.get.side_effect = exceptions.NotFound()
self.assertRaisesRegexp(utils.Error,
'not found',
process.process, self.data)
cli.node.get.assert_called_once_with(self.uuid)
self.assertFalse(process_mock.called)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_expected_exception(self, finished_mock, client_mock, pop_mock,
process_mock):
pop_mock.return_value = node_cache.NodeInfo(
uuid=self.node.uuid,
started_at=self.started_at)
process_mock.side_effect = utils.Error('boom')
self.assertRaisesRegexp(utils.Error, 'boom',
process.process, self.data)
finished_mock.assert_called_once_with(mock.ANY, error='boom')
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_unexpected_exception(self, finished_mock, client_mock, pop_mock,
process_mock):
pop_mock.return_value = node_cache.NodeInfo(
uuid=self.node.uuid,
started_at=self.started_at)
process_mock.side_effect = RuntimeError('boom')
self.assertRaisesRegexp(utils.Error, 'Unexpected exception',
process.process, self.data)
finished_mock.assert_called_once_with(
mock.ANY,
error='Unexpected exception during processing')
@mock.patch.object(eventlet.greenthread, 'spawn_n',
lambda f, *a: f(*a) and None)
@mock.patch.object(eventlet.greenthread, 'sleep', lambda _: None)
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update')
@mock.patch.object(firewall, 'update_filters', autospec=True)
class TestProcessNode(BaseTest):
def setUp(self):
super(TestProcessNode, self).setUp()
conf.CONF.set('discoverd', 'processing_hooks',
'ramdisk_error,scheduler,validate_interfaces,example')
self.validate_attempts = 5
self.power_off_attempts = 2
self.data['macs'] = self.macs # validate_interfaces hook
self.cached_node = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at)
self.patch_before = [
{'op': 'add', 'path': '/properties/cpus', 'value': '2'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'},
] # scheduler hook
self.patch_after = [
{'op': 'add', 'path': '/extra/newly_discovered', 'value': 'true'},
{'op': 'remove', 'path': '/extra/on_discovery'},
]
self.cli = mock.Mock()
self.cli.node.validate.side_effect = self.fake_validate()
self.cli.port.create.side_effect = self.ports
self.cli.node.update.return_value = self.node
# Simulate longer power off
self.cli.node.get.side_effect = (
[self.node] * self.power_off_attempts
+ [mock.Mock(power_state='power off')])
def fake_validate(self):
# Simulate long ramdisk task
for _ in range(self.validate_attempts):
yield mock.Mock(power={'result': False, 'reason': 'boom!'})
yield mock.Mock(power={'result': True})
def call(self):
return process._process_node(self.cli, self.node, self.data,
self.cached_node)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_ok(self, finished_mock, filters_mock, post_hook_mock):
self.call()
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.cli.node.update.assert_any_call(self.uuid, self.patch_before)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.assertFalse(self.cli.node.validate.called)
self.assertEqual(self.power_off_attempts + 1,
self.cli.node.get.call_count)
post_hook_mock.assert_called_once_with(self.node, mock.ANY,
self.data)
# List is built from a dict - order is undefined
self.assertEqual(self.ports, sorted(post_hook_mock.call_args[0][1],
key=lambda p: p.address))
finished_mock.assert_called_once_with(mock.ANY)
def test_overwrite(self, filters_mock, post_hook_mock):
conf.CONF.set('discoverd', 'overwrite_existing', 'true')
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '20', 'op': 'add'}]
self.call()
self.cli.node.update.assert_any_call(self.uuid, patch)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
def test_update_retry_on_conflict(self, filters_mock, post_hook_mock):
self.cli.node.update.side_effect = [exceptions.Conflict, self.node,
exceptions.Conflict, self.node]
self.call()
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.cli.node.update.assert_any_call(self.uuid, self.patch_before)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
self.assertEqual(4, self.cli.node.update.call_count)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
def test_power_off_retry_on_conflict(self, filters_mock, post_hook_mock):
self.cli.node.set_power_state.side_effect = [exceptions.Conflict, None]
self.call()
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.cli.node.update.assert_any_call(self.uuid, self.patch_before)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
self.cli.node.set_power_state.assert_called_with(self.uuid, 'off')
self.assertEqual(2, self.cli.node.set_power_state.call_count)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
@mock.patch.object(time, 'time')
def test_power_off_timeout(self, time_mock, finished_mock, filters_mock,
post_hook_mock):
conf.CONF.set('discoverd', 'timeout', '100')
time_mock.return_value = self.started_at + 1000
self.cli.node.get.return_value = self.node
self.assertRaisesRegexp(utils.Error, 'power off', self.call)
self.cli.node.update.assert_called_once_with(self.uuid,
self.patch_before)
finished_mock.assert_called_once_with(
mock.ANY,
error='Timeout waiting for node uuid to power off '
'after introspection')
def test_port_failed(self, filters_mock, post_hook_mock):
self.ports[0] = exceptions.Conflict()
self.call()
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.cli.node.update.assert_any_call(self.uuid, self.patch_before)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
post_hook_mock.assert_called_once_with(self.node, self.ports[1:],
self.data)
def test_hook_patches(self, filters_mock, post_hook_mock):
node_patches = ['node patch1', 'node patch2']
port_patch = ['port patch']
post_hook_mock.return_value = (node_patches,
{self.macs[1]: port_patch})
self.call()
self.cli.node.update.assert_any_call(self.uuid,
self.patch_before + node_patches)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
self.cli.port.update.assert_called_once_with(self.ports[1].uuid,
port_patch)
def test_ipmi_setup_credentials(self, filters_mock, post_hook_mock):
self.cached_node.set_option('setup_ipmi_credentials', True)
self.call()
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.cli.node.validate.assert_called_with(self.uuid)
self.assertEqual(self.validate_attempts + 1,
self.cli.node.validate.call_count)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
@mock.patch.object(time, 'time')
def test_ipmi_setup_credentials_timeout(self, time_mock, finished_mock,
filters_mock, post_hook_mock):
conf.CONF.set('discoverd', 'timeout', '100')
self.cached_node.set_option('setup_ipmi_credentials', True)
time_mock.return_value = self.started_at + 1000
self.call()
self.cli.node.update.assert_called_once_with(self.uuid,
self.patch_before)
self.assertFalse(self.cli.node.set_power_state.called)
finished_mock.assert_called_once_with(
mock.ANY,
error='Timeout waiting for power credentials update of node uuid '
'after introspection')
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_power_off_failed(self, finished_mock, filters_mock,
post_hook_mock):
self.cli.node.set_power_state.side_effect = RuntimeError('boom')
self.assertRaisesRegexp(utils.Error, 'Failed to power off',
self.call)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.cli.node.update.assert_called_once_with(self.uuid,
self.patch_before)
finished_mock.assert_called_once_with(
mock.ANY,
error='Failed to power off node uuid, check it\'s power management'
' configuration: boom')

View File

@ -1,86 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import eventlet
from ironicclient import exceptions
from keystonemiddleware import auth_token
import mock
from ironic_discoverd import conf
from ironic_discoverd.test import base
from ironic_discoverd import utils
class TestCheckAuth(base.BaseTest):
def setUp(self):
super(TestCheckAuth, self).setUp()
conf.CONF.set('discoverd', 'authenticate', 'true')
@mock.patch.object(auth_token, 'AuthProtocol')
def test_middleware(self, mock_auth):
conf.CONF.set('discoverd', 'os_username', 'admin')
conf.CONF.set('discoverd', 'os_tenant_name', 'admin')
conf.CONF.set('discoverd', 'os_password', 'password')
app = mock.Mock(wsgi_app=mock.sentinel.app)
utils.add_auth_middleware(app)
mock_auth.assert_called_once_with(
mock.sentinel.app,
{'admin_user': 'admin', 'admin_tenant_name': 'admin',
'admin_password': 'password', 'delay_auth_decision': True,
'auth_uri': 'http://127.0.0.1:5000/v2.0',
'identity_uri': 'http://127.0.0.1:35357'}
)
def test_ok(self):
request = mock.Mock(headers={'X-Identity-Status': 'Confirmed',
'X-Roles': 'admin,member'})
utils.check_auth(request)
def test_invalid(self):
request = mock.Mock(headers={'X-Identity-Status': 'Invalid'})
self.assertRaises(utils.Error, utils.check_auth, request)
def test_not_admin(self):
request = mock.Mock(headers={'X-Identity-Status': 'Confirmed',
'X-Roles': 'member'})
self.assertRaises(utils.Error, utils.check_auth, request)
def test_disabled(self):
conf.CONF.set('discoverd', 'authenticate', 'false')
request = mock.Mock(headers={'X-Identity-Status': 'Invalid'})
utils.check_auth(request)
@mock.patch.object(eventlet.greenthread, 'sleep', lambda _: None)
class TestRetryOnConflict(unittest.TestCase):
def test_retry_on_conflict(self):
call = mock.Mock()
call.side_effect = ([exceptions.Conflict()] * (utils.RETRY_COUNT - 1)
+ [mock.sentinel.result])
res = utils.retry_on_conflict(call, 1, 2, x=3)
self.assertEqual(mock.sentinel.result, res)
call.assert_called_with(1, 2, x=3)
self.assertEqual(utils.RETRY_COUNT, call.call_count)
def test_retry_on_conflict_fail(self):
call = mock.Mock()
call.side_effect = ([exceptions.Conflict()] * (utils.RETRY_COUNT + 1)
+ [mock.sentinel.result])
self.assertRaises(exceptions.Conflict, utils.retry_on_conflict,
call, 1, 2, x=3)
call.assert_called_with(1, 2, x=3)
self.assertEqual(utils.RETRY_COUNT, call.call_count)

View File

@ -60,7 +60,7 @@ help:
$(ECHO) ===============================================================================
$(ECHO) make daisyclientrpm ... generate daisyclient rpms
$(ECHO) ===============================================================================
$(ECHO) make ironicdiscoverdrpm ... generate ironicdiscoverd rpms
$(ECHO) make daisydiscoverdrpm ... generate daisydiscoverd rpms
$(ECHO) ===============================================================================
$(ECHO) make pxe_server_install ... generate pxe_server_install rpms
$(ECHO) ===============================================================================
@ -93,8 +93,8 @@ daisyrpm:
daisyclientrpm:
$(MAKE) -C $(_TECS_RPM_PATH)/ daisyclient
ironicdiscoverdrpm:
$(MAKE) -C $(_TECS_RPM_PATH)/ ironic-discoverd
daisydiscoverdrpm:
$(MAKE) -C $(_TECS_RPM_PATH)/ daisy-discoverd
horizonrpm:
$(MAKE) -C $(_TECS_RPM_PATH)/ horizon

114
rpm/SPECS/daisy-discoverd.spec Executable file
View File

@ -0,0 +1,114 @@
%{?!_licensedir:%global license %%doc}
Name: daisy-discoverd
Summary: Hardware introspection service for Daisy
Version: 1.0.0
Release: %{_release}%{?dist}
License: ASL 2.0
Group: System Environment/Base
URL: http://www.daisycloud.org
Source0: https://pypi.python.org/packages/source/i/daisy-discoverd/daisy-discoverd-%{version}.tar.gz
Source1: daisy-discoverd.service
Source2: daisy-discoverd-dnsmasq.service
Source3: dnsmasq.conf
BuildArch: noarch
BuildRequires: python-setuptools
BuildRequires: python2-devel
BuildRequires: systemd
Requires: python-daisy-discoverd = %{version}-%{release}
Requires: dnsmasq
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%prep
%autosetup -v -p 1 -n daisy-discoverd-%{version}
rm -rf *.egg-info
# Remove the requirements file so that pbr hooks don't add it
# to distutils requires_dist config
rm -rf {test-,}requirements.txt tools/{pip,test}-requires
%build
%{__python2} setup.py build
%install
%{__python2} setup.py install -O1 --skip-build --root=%{buildroot}
mkdir -p %{buildroot}%{_mandir}/man8
install -p -D -m 644 daisy-discoverd.8 %{buildroot}%{_mandir}/man8/
# install systemd scripts
mkdir -p %{buildroot}%{_unitdir}
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}
install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}
# configuration contains passwords, thus 640
install -p -D -m 640 example.conf %{buildroot}/%{_sysconfdir}/daisy-discoverd/discoverd.conf
install -p -D -m 644 %{SOURCE3} %{buildroot}/%{_sysconfdir}/daisy-discoverd/dnsmasq.conf
install -d -m 755 %{buildroot}%{_localstatedir}/log/daisy-discoverd
install -d -m 755 %{buildroot}%{_localstatedir}/lib/daisy-discoverd
install -d -m 755 %{buildroot}%{_localstatedir}/run/daisy-discoverd
%package -n python-daisy-discoverd
Summary: Hardware introspection service for OpenStack Ironic - Python modules
Requires: python-eventlet
Requires: python-flask
Requires: python-keystoneclient
Requires: python-keystonemiddleware
Requires: python-requests
Requires: python-setuptools
Requires: python-six
%description -n python-daisy-discoverd
daisy-discoverd is a service for discovering hardware properties for a node
managed by Daisy installer. Hardware introspection or hardware properties
discovery is a process of getting hardware parameters required for scheduling
from a bare metal node, given it's power management credentials (e.g. IPMI
address, user name and password).
This package contains Python modules and documentation.
%files -n python-daisy-discoverd
%doc README.rst CONTRIBUTING.rst
%license LICENSE
%{python2_sitelib}/daisy_discoverd*
%description
daisy-discoverd is a service for discovering hardware properties for a node
managed by Daisy installer. Hardware introspection or hardware properties
discovery is a process of getting hardware parameters required for scheduling
from a bare metal node, given it's power management credentials (e.g. IPMI
address, user name and password).
This package contains main executable and service files.
%files
%license LICENSE
%config(noreplace) %attr(-,root,root) %{_sysconfdir}/daisy-discoverd
%{_bindir}/daisy-discoverd
%{_unitdir}/daisy-discoverd.service
%{_unitdir}/daisy-discoverd-dnsmasq.service
%doc %{_mandir}/man8/daisy-discoverd.8.gz
%dir %attr(0755, daisy, daisy) %{_localstatedir}/log/daisy-discoverd
%dir %attr(0755, daisy, daisy) %{_localstatedir}/lib/daisy-discoverd
%dir %attr(0755, daisy, daisy) %{_localstatedir}/run/daisy-discoverd
%post
%systemd_post daisy-discoverd.service
%systemd_post daisy-discoverd-dnsmasq.service
%preun
%systemd_preun daisy-discoverd.service
%systemd_preun daisy-discoverd-dnsmasq.service
%postun
%systemd_postun_with_restart daisy-discoverd.service
%systemd_postun_with_restart daisy-discoverd-dnsmasq.service

View File

@ -1,144 +0,0 @@
%{?!_licensedir:%global license %%doc}
Name: openstack-ironic-discoverd
Summary: Hardware introspection service for OpenStack Ironic
Version: 1.0.0
Release: %{_release}%{?dist}
License: ASL 2.0
Group: System Environment/Base
URL: https://pypi.python.org/pypi/ironic-discoverd
Source0: https://pypi.python.org/packages/source/i/ironic-discoverd/ironic-discoverd-%{version}.tar.gz
Source1: openstack-ironic-discoverd.service
Source2: openstack-ironic-discoverd-dnsmasq.service
Source3: dnsmasq.conf
BuildArch: noarch
BuildRequires: python-setuptools
BuildRequires: python2-devel
BuildRequires: systemd
Requires: python-ironic-discoverd = %{version}-%{release}
Requires: dnsmasq
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%prep
%autosetup -v -p 1 -n ironic-discoverd-%{version}
rm -rf *.egg-info
# Remove the requirements file so that pbr hooks don't add it
# to distutils requires_dist config
rm -rf {test-,}requirements.txt tools/{pip,test}-requires
%build
%{__python2} setup.py build
%install
%{__python2} setup.py install -O1 --skip-build --root=%{buildroot}
mkdir -p %{buildroot}%{_mandir}/man8
install -p -D -m 644 ironic-discoverd.8 %{buildroot}%{_mandir}/man8/
# install systemd scripts
mkdir -p %{buildroot}%{_unitdir}
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}
install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}
# configuration contains passwords, thus 640
install -p -D -m 640 example.conf %{buildroot}/%{_sysconfdir}/ironic-discoverd/discoverd.conf
install -p -D -m 644 %{SOURCE3} %{buildroot}/%{_sysconfdir}/ironic-discoverd/dnsmasq.conf
%package -n python-ironic-discoverd
Summary: Hardware introspection service for OpenStack Ironic - Python modules
Requires: python-eventlet
Requires: python-flask
Requires: python-keystoneclient
Requires: python-keystonemiddleware
Requires: python-requests
Requires: python-setuptools
Requires: python-six
Conflicts: openstack-ironic-discoverd < 1.0.0-1
%description -n python-ironic-discoverd
ironic-discoverd is a service for discovering hardware properties for a node
managed by OpenStack Ironic. Hardware introspection or hardware properties
discovery is a process of getting hardware parameters required for scheduling
from a bare metal node, given it's power management credentials (e.g. IPMI
address, user name and password).
This package contains Python modules and documentation.
%files -n python-ironic-discoverd
%doc README.rst CONTRIBUTING.rst
%license LICENSE
%{python2_sitelib}/ironic_discoverd*
%description
ironic-discoverd is a service for discovering hardware properties for a node
managed by OpenStack Ironic. Hardware introspection or hardware properties
discovery is a process of getting hardware parameters required for scheduling
from a bare metal node, given it's power management credentials (e.g. IPMI
address, user name and password).
This package contains main executable and service files.
%files
%license LICENSE
%config(noreplace) %attr(-,root,root) %{_sysconfdir}/ironic-discoverd
%{_bindir}/ironic-discoverd
%{_unitdir}/openstack-ironic-discoverd.service
%{_unitdir}/openstack-ironic-discoverd-dnsmasq.service
%doc %{_mandir}/man8/ironic-discoverd.8.gz
%post
%systemd_post openstack-ironic-discoverd.service
%systemd_post openstack-ironic-discoverd-dnsmasq.service
%preun
%systemd_preun openstack-ironic-discoverd.service
%systemd_preun openstack-ironic-discoverd-dnsmasq.service
%postun
%systemd_postun_with_restart openstack-ironic-discoverd.service
%systemd_postun_with_restart openstack-ironic-discoverd-dnsmasq.service
%changelog
* Tue Mar 3 2015 Dmitry Tantsur <dtantsur@redhat.com> - 1.0.2-1
- New upstream bugfix release: 1.0.2
- Remove requirements.txt before building
- Dependency on python-keystonemiddleware
* Tue Feb 3 2015 Dmitry Tantsur <dtantsur@redhat.com> - 1.0.0-1
- New upstream release: 1.0.0
- Set default database location to simplify upgrades
- Split into two packages: the service and Python modules
* Thu Dec 4 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.5-1
- Upstream bugfix release 0.2.5
- Install CONTRIBUTING.rst
* Fri Nov 14 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.4-1
- Upstream bugfix release 0.2.4
Only cosmetic code update, reflects move to StackForge and Launchpad.
- Take description from upstream README.
* Mon Oct 27 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.2-1
- Upstream bugfix release 0.2.2
- Sync all descriptions with upstream variant
* Thu Oct 23 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.1-2
- Require dnsmasq
- Add openstack-ironic-discoverd-dnsmasq.service - sample service for dnsmasq
- Updated description to upstream version
* Thu Oct 16 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.1-1
- Upstream bugfix release
* Wed Oct 8 2014 Dmitry Tantsur <dtantsur@redhat.com> - 0.2.0-1- Initial package build
- Initial package build

View File

@ -17,7 +17,7 @@ rpmforce:
all:clean rpms
rpms:daisy daisyclient ironic-discoverd horizon pxe_server_install
rpms:daisy daisyclient daisy-discoverd horizon pxe_server_install
clean:rpmforce
$(RM) $(_TECS_RPM_PATH)/SOURCES/*
@ -46,12 +46,12 @@ daisyclient:rpmforce
$(RM) $(_TECS_RPM_PATH)/SOURCES/python-$@-$(_VER_DAISYCLIENT_REL)
$(RM) $(_TECS_RPM_PATH)/BUILD/python-$@-$(_VER_DAISYCLIENT_REL)
ironic-discoverd:rpmforce
daisy-discoverd:rpmforce
$(CP) $(_TECS_TOOLS_PATH)/daisy-utils/* $(_TECS_RPM_PATH)/SOURCES
$(RM) $(_TECS_RPM_PATH)/SOURCES/$@-$(_VER_IRONICDISCOVERD_REL)
$(LN) $(_TECS_CONTRIB_PATH)/ironic/ $(_TECS_RPM_PATH)/SOURCES/$@-$(_VER_IRONICDISCOVERD_REL)
$(LN) $(_TECS_CODE_PATH)/daisy-discoverd/ $(_TECS_RPM_PATH)/SOURCES/$@-$(_VER_IRONICDISCOVERD_REL)
@cd $(_TECS_RPM_PATH)/SOURCES; $(TARC) $(_TECS_RPM_PATH)/SOURCES/$@-$(_VER_IRONICDISCOVERD_REL).tar.gz --exclude=*.svn $@-$(_VER_IRONICDISCOVERD_REL)/*; cd -
$(RPMBUILD) --rmsource $(_TECS_RPM_PATH)/SPECS/openstack-$@.spec
$(RPMBUILD) --rmsource $(_TECS_RPM_PATH)/SPECS/$@.spec
$(RM) $(_TECS_RPM_PATH)/SOURCES/python-$@-$(_VER_IRONICDISCOVERD_REL)
$(RM) $(_TECS_RPM_PATH)/BUILD/python-$@-$(_VER_IRONICDISCOVERD_REL)

View File

@ -128,6 +128,6 @@ import_exceptions = tempest.services
# E123 skipped because it is ignored by default in the default pep8
# E129 skipped because it is too limiting when combined with other rules
# Skipped because of new hacking 0.9: H405
ignore = E125,E123,E129,H404,H405
ignore = E125,E123,E129,H404,H405,F999
show-source = True
exclude = .git,.venv,.tox,dist,doc,openstack,*egg

View File

@ -0,0 +1,11 @@
[Unit]
Description=PXE boot dnsmasq service for daisy-discoverd
After=openvswitch.service
[Service]
Type=forking
ExecStart=/sbin/dnsmasq --conf-file=/etc/daisy-discoverd/dnsmasq.conf
[Install]
WantedBy=multi-user.target
Alias=daisy-discoverd-dnsmasq.service

View File

@ -2,9 +2,9 @@
Description=Hardware introspection service for OpenStack Ironic
[Service]
ExecStart=/usr/bin/ironic-discoverd --config-file /etc/ironic-discoverd/discoverd.conf
ExecStart=/usr/bin/daisy-discoverd --config-file /etc/daisy-discoverd/discoverd.conf
User=root
[Install]
WantedBy=multi-user.target
Alias=openstack-ironic-discoverd.service
Alias=daisy-discoverd.service

View File

@ -1,11 +0,0 @@
[Unit]
Description=PXE boot dnsmasq service for ironic-discoverd
After=openvswitch.service
[Service]
Type=forking
ExecStart=/sbin/dnsmasq --conf-file=/etc/ironic-discoverd/dnsmasq.conf
[Install]
WantedBy=multi-user.target
Alias=openstack-ironic-discoverd-dnsmasq.service

View File

@ -427,9 +427,7 @@ function stop_service_all
{
service_stop "daisy-api"
service_stop "daisy-registry"
service_stop "openstack-ironic-api"
service_stop "openstack-ironic-conductor"
service_stop "openstack-ironic-discoverd"
service_stop "daisy-discoverd"
service_stop "openstack-keystone"
service_stop "daisy-orchestration"
service_stop "daisy-auto-backup"
@ -442,9 +440,7 @@ function start_service_all
service_start "openstack-keystone"
service_start "daisy-api"
service_start "daisy-registry"
service_start "openstack-ironic-api"
service_start "openstack-ironic-conductor"
service_start "openstack-ironic-discoverd"
service_start "daisy-discoverd"
service_start "daisy-orchestration"
service_start "daisy-auto-backup"
}

View File

@ -703,7 +703,7 @@ EOF
fi
}
function config_ironic_discoverd
function config_daisy_discoverd
{
local file=$1
local ip=$2
@ -718,7 +718,6 @@ function config_ironic_discoverd
[ ! -e $file ] && { write_install_log "Error:$file is not exist"; exit 1;}
openstack-config --set "$file" discoverd "os_auth_token" "admin"
openstack-config --set "$file" discoverd "ironic_url " "http://$ip:6385/v1"
openstack-config --set "$file" discoverd "manage_firewall " "false"
openstack-config --set "$file" discoverd "daisy_url " "http://$ip:$bind_port"
}
@ -739,7 +738,7 @@ function daisyrc_admin
function config_pxe
{
local config_file="/var/log/ironic/pxe.json"
local config_file="/var/lib/daisy/pxe.json"
if [ ! -e $config_file ];then
touch $config_file
fi
@ -774,7 +773,7 @@ function build_pxe_server
get_config "$config_file" client_ip_end
client_ip_end_params=$config_answer
config_pxe $pxe_bond_name yes $ip_address_params $net_mask_params $client_ip_begin_params $client_ip_end_params
/usr/bin/pxe_server_install /var/log/ironic/pxe.json >> $install_logfile 2>&1
/usr/bin/pxe_server_install /var/lib/daisy/pxe.json >> $install_logfile 2>&1
# write dhcp cidr to DEPLOYMENT network of system for daisy
# to decide which is pxe mac
if [ "$ip_address_params" -a "$net_mask_params" ];then

View File

@ -11,13 +11,12 @@ cd $_INSTALL_INTERFACE_DIR
daisy_file="/etc/daisy/daisy-registry.conf"
db_name="daisy"
ironic_name="ironic"
keystone_db_name="keystone"
keystone_admin_token="e93e9abf42f84be48e0996e5bd44f096"
daisy_install="/var/log/daisy/daisy_install"
installdatefile=`date -d "today" +"%Y%m%d-%H%M%S"`
install_logfile=$daisy_install/daisyinstall_$installdatefile.log
discover_logfile="/var/log/ironic"
discover_logfile="/var/log/daisy-discoverd"
#the contents of the output is displayed on the screen and output to the specified file
function write_install_log
{
@ -64,11 +63,11 @@ function all_install
write_install_log "install python-openstackclient rpm"
install_rpm_by_yum "python-openstackclient"
write_install_log "install ironic-discoverd depend rpm"
write_install_log "install daisy-discoverd depend rpm"
install_rpm_by_yum "python-flask"
write_install_log "install ironic-discoverd rpm"
install_rpm_by_daisy_yum "openstack-ironic-discoverd python-ironic-discoverd"
write_install_log "install daisy-discoverd rpm"
install_rpm_by_daisy_yum "daisy-discoverd python-daisy-discoverd"
write_install_log "install daisy rpm"
install_rpm_by_yum "daisy"
@ -228,8 +227,8 @@ function all_install
config_rabbitmq_env
config_rabbitmq_config
#Configure ironic related configuration items
config_ironic_discoverd "/etc/ironic-discoverd/discoverd.conf" "$public_ip"
#Configure daisy-discoverd related configuration items
config_daisy_discoverd "/etc/daisy-discoverd/discoverd.conf" "$public_ip"
#modify clustershell configuration
clustershell_conf="/etc/clustershell/clush.conf"
@ -245,8 +244,8 @@ function all_install
systemctl start daisy-registry.service
[ "$?" -ne 0 ] && { write_install_log "Error:systemctl start daisy-registry.service failed"; exit 1; }
systemctl start openstack-ironic-discoverd.service
[ "$?" -ne 0 ] && { write_install_log "Error:systemctl restart openstack-ironic-discoverd.service failed"; exit 1; }
systemctl start daisy-discoverd.service
[ "$?" -ne 0 ] && { write_install_log "Error:systemctl restart daisy-discoverd.service failed"; exit 1; }
systemctl start daisy-orchestration.service
[ "$?" -ne 0 ] && { write_install_log "Error:systemctl start daisy-orchestration.service failed"; exit 1; }
@ -259,7 +258,7 @@ function all_install
systemctl enable daisy-registry.service >> $install_logfile 2>&1
systemctl enable daisy-orchestration.service >> $install_logfile 2>&1
systemctl enable daisy-auto-backup.service >> $install_logfile 2>&1
systemctl enable openstack-ironic-discoverd.service >> $install_logfile 2>&1
systemctl enable daisy-discoverd.service >> $install_logfile 2>&1
#init daisy
daisy_init_func

View File

@ -19,13 +19,13 @@ function uninstall_daisy
stop_service_all
remove_rpms_by_yum "python-django-horizon daisy-dashboard"
remove_rpms_by_yum "daisy python-daisyclient python-daisy"
remove_rpms_by_yum "openstack-ironic-discoverd python-ironic-discoverd"
remove_rpms_by_yum "daisy-discoverd python-daisy-discoverd"
remove_rpms_by_yum "jasmine"
rpm -e pxe_server_install
for i in `ps -elf | grep daisy-api |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $i;done
for j in `ps -elf | grep daisy-registry |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $j;done
for j in `ps -elf | grep daisy-orchestration |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $j;done
for j in `ps -elf | grep ironic-discoverd |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $j;done
for j in `ps -elf | grep daisy-discoverd |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $j;done
# delect keystone database
delete_keystone_sql="drop database IF EXISTS keystone"
write_install_log "delect keystone database in mariadb"
@ -51,11 +51,12 @@ function uninstall_daisy
docker rmi $image_id
fi
rm -rf /etc/daisy
rm -rf /etc/ironic-discoverd
rm -rf /etc/daisy-discoverd
rm -rf /etc/sudoers.d/daisy
rm -rf /var/lib/daisy
rm -rf /var/log/daisy
rm -rf /var/log/ironic/*
rm -rf /var/lib/daisy-discoverd
rm -rf /var/log/daisy-discoverd
rm -rf /root/daisyrc_admin
echo "Finish clean daisy!"
}

View File

@ -1,21 +1,20 @@
#!/bin/bash
#
if [ ! "$_UPGRADE_FUNC_FILE" ];then
#头文件包含
_UPGRADE_FUNC_DIR=`pwd`
cd $_UPGRADE_FUNC_DIR/../common/
. daisy_common_func.sh
cd $_UPGRADE_FUNC_DIR
#输出的内容既显示在屏幕上又输出到指定文件中
function write_upgrade_log
{
local promt="$1"
echo -e "$promt"
echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile
}
# 获取当前安装的所有daisy相关服务的列表
function get_daisy_services
{
all_daisy_services="
@ -24,14 +23,9 @@ function get_daisy_services
daisy
python-daisy
python-daisyclient
openstack-ironic-api
openstack-ironic-common
openstack-ironic-conductor
python-ironicclient
openstack-ironic-discoverd
python-ironic-discoverd
daisy-discoverd
python-daisy-discoverd
pxe_server_install
pxe_docker_install
python-django-horizon
daisy-dashboard
"

View File

@ -1,5 +1,5 @@
#!/bin/bash
# 提供和yum安装相关的公共函数和变量
if [ ! "$_UPGRADE_INTERFACE_FILE" ];then
_UPGRADE_INTERFACE_DIR=`pwd`
cd $_UPGRADE_INTERFACE_DIR/../common/
@ -29,14 +29,10 @@ function upgrade_daisy
write_upgrade_log "wait to stop daisy services..."
stop_service_all
#获取当前所有daisy服务包
get_daisy_services
# 升级daisy服务包
upgrade_rpms_by_yum "$all_daisy_services"
#同步daisy数据库
which daisy-manage >> $logfile 2>&1
if [ "$?" == 0 ];then
write_upgrade_log "start daisy-manage db_sync..."
@ -44,15 +40,6 @@ function upgrade_daisy
[ "$?" -ne 0 ] && { write_upgrade_log "Error:daisy-manage db_sync command faild"; exit 1; }
fi
#同步ironic数据库
which ironic-dbsync >> $logfile 2>&1
if [ "$?" == 0 ];then
write_upgrade_log "start ironic-dbsync ..."
ironic-dbsync --config-file /etc/ironic/ironic.conf
[ "$?" -ne 0 ] && { write_upgrade_log "Error:ironic-dbsync --config-file /etc/ironic/ironic.conf faild"; exit 1; }
fi
#同步keystone数据库
which keystone-manage >> $logfile 2>&1
if [ "$?" == 0 ];then
write_upgrade_log "start keystone-manage db_sync..."