Merge "Migrate to oslo_db"

This commit is contained in:
Jenkins 2015-07-08 12:58:44 +00:00 committed by Gerrit Code Review
commit 5410e163c2
10 changed files with 429 additions and 245 deletions

View File

@ -135,7 +135,7 @@ function configure_inspector {
inspector_iniset firewall manage_firewall $IRONIC_INSPECTOR_MANAGE_FIREWALL
inspector_iniset firewall dnsmasq_interface $IRONIC_INSPECTOR_INTERFACE
inspector_iniset DEFAULT database $IRONIC_INSPECTOR_DATA_DIR/inspector.sqlite
inspector_iniset database connection sqlite:///$IRONIC_INSPECTOR_DATA_DIR/inspector.sqlite
iniset "$IRONIC_CONF_FILE" inspector enabled True
iniset "$IRONIC_CONF_FILE" inspector service_url $IRONIC_INSPECTOR_URI

View File

@ -24,11 +24,6 @@
# Its value may be silently ignored in the future.
#authenticate = <None>
# SQLite3 database to store nodes under introspection, required. Do
# not use :memory: here, it won't work. (string value)
# Deprecated group/name - [discoverd]/database
#database =
# Debug mode enabled/disabled. (boolean value)
# Deprecated group/name - [discoverd]/debug
#debug = false
@ -62,6 +57,126 @@
#max_concurrency = 1000
[database]
#
# From oslo.db
#
# The file name to use with SQLite. (string value)
# Deprecated group/name - [DEFAULT]/sqlite_db
#sqlite_db = oslo.sqlite
# If True, SQLite uses synchronous mode. (boolean value)
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database.
# (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
# The SQLAlchemy connection string to use to connect to the slave
# database. (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including
# the default, overrides any server-set SQL mode. To use whatever SQL
# mode is set by the server configuration, set this to no value.
# Example: mysql_sql_mode= (string value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle SQL connections are reaped. (integer value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
# Maximum number of database connection retries during startup. Set to
# -1 to specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = <None>
# Verbosity of SQL debugging information: 0=None, 100=Everything.
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection
# lost. (boolean value)
#use_db_reconnect = false
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database
# operation up to db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries
# of a database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before
# error is raised. Set to -1 to specify an infinite retry count.
# (integer value)
#db_max_retries = 20
[discoverd]
#
# From ironic_inspector
#
# SQLite3 database to store nodes under introspection, required. Do
# not use :memory: here, it won't work. DEPRECATED: use
# [database]/connection. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#database =
[firewall]
#

View File

@ -150,6 +150,15 @@ PROCESSING_OPTS = [
]
DISCOVERD_OPTS = [
cfg.StrOpt('database',
default='',
help='SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work. '
'DEPRECATED: use [database]/connection.',
deprecated_for_removal=True),
]
SERVICE_OPTS = [
cfg.StrOpt('listen_address',
default='0.0.0.0',
@ -170,11 +179,6 @@ SERVICE_OPTS = [
help='DEPRECATED: use auth_strategy.',
deprecated_group='discoverd',
deprecated_for_removal=True),
cfg.StrOpt('database',
default='',
help='SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work.',
deprecated_group='discoverd'),
cfg.BoolOpt('debug',
default=False,
help='Debug mode enabled/disabled.',
@ -214,6 +218,7 @@ cfg.CONF.register_opts(SERVICE_OPTS)
cfg.CONF.register_opts(FIREWALL_OPTS, group='firewall')
cfg.CONF.register_opts(PROCESSING_OPTS, group='processing')
cfg.CONF.register_opts(IRONIC_OPTS, group='ironic')
cfg.CONF.register_opts(DISCOVERD_OPTS, group='discoverd')
def list_opts():
@ -222,4 +227,5 @@ def list_opts():
('firewall', FIREWALL_OPTS),
('ironic', IRONIC_OPTS),
('processing', PROCESSING_OPTS),
('discoverd', DISCOVERD_OPTS),
]

View File

@ -0,0 +1,47 @@
# Copyright 2015 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for inspection data.
"""
from oslo_db.sqlalchemy import models
from sqlalchemy import Column, Float, ForeignKey, String, Text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=models.ModelBase)
class Node(Base):
__tablename__ = 'nodes'
uuid = Column(String(36), primary_key=True)
started_at = Column(Float, nullable=True)
finished_at = Column(Float, nullable=True)
error = Column(Text, nullable=True)
class Attribute(Base):
__tablename__ = 'attributes'
name = Column(Text, primary_key=True)
value = Column(Text, primary_key=True)
uuid = Column(String(36), ForeignKey('nodes.uuid'))
class Option(Base):
__tablename__ = 'options'
uuid = Column(String(36), ForeignKey('nodes.uuid'), primary_key=True)
name = Column(Text, primary_key=True)
value = Column(Text)

View File

@ -16,36 +16,24 @@
import contextlib
import json
import logging
import os
import sqlite3
import sys
import time
from ironicclient import exceptions
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options as db_opts
from oslo_db.sqlalchemy import session as db_session
from sqlalchemy import text
from ironic_inspector.common.i18n import _, _LC, _LE, _LW
from ironic_inspector.common.i18n import _, _LE, _LW
from ironic_inspector import models
from ironic_inspector import utils
CONF = cfg.CONF
LOG = logging.getLogger("ironic_inspector.node_cache")
_DB_NAME = None
_SCHEMA = """
create table if not exists nodes
(uuid text primary key, started_at real, finished_at real, error text);
create table if not exists attributes
(name text, value text, uuid text,
primary key (name, value),
foreign key (uuid) references nodes);
create table if not exists options
(uuid text, name text, value text,
primary key (uuid, name),
foreign key (uuid) references nodes);
"""
_FACADE = None
MACS_ATTRIBUTE = 'mac'
@ -70,9 +58,9 @@ class NodeInfo(object):
def options(self):
"""Node introspection options as a dict."""
if self._options is None:
rows = _db().execute('select name, value from options '
'where uuid=?', (self.uuid,))
self._options = {row['name']: json.loads(row['value'])
rows = model_query(models.Option).filter_by(
uuid=self.uuid)
self._options = {row.name: json.loads(row.value)
for row in rows}
return self._options
@ -80,11 +68,11 @@ class NodeInfo(object):
"""Set an option for a node."""
encoded = json.dumps(value)
self.options[name] = value
with _db() as db:
db.execute('delete from options where uuid=? and name=?',
(self.uuid, name))
db.execute('insert into options(uuid, name, value) values(?,?,?)',
(self.uuid, name, encoded))
with _ensure_transaction() as session:
model_query(models.Option, session=session).filter_by(
uuid=self.uuid, name=name).delete()
models.Option(uuid=self.uuid, name=name, value=encoded).save(
session)
def finished(self, error=None):
"""Record status for this node.
@ -96,29 +84,32 @@ class NodeInfo(object):
self.finished_at = time.time()
self.error = error
with _db() as db:
db.execute('update nodes set finished_at=?, error=? where uuid=?',
(self.finished_at, error, self.uuid))
db.execute("delete from attributes where uuid=?", (self.uuid,))
db.execute("delete from options where uuid=?", (self.uuid,))
with _ensure_transaction() as session:
model_query(models.Node, session=session).filter_by(
uuid=self.uuid).update(
{'finished_at': self.finished_at, 'error': error})
model_query(models.Attribute, session=session).filter_by(
uuid=self.uuid).delete()
model_query(models.Option, session=session).filter_by(
uuid=self.uuid).delete()
def add_attribute(self, name, value, database=None):
def add_attribute(self, name, value, session=None):
"""Store look up attribute for a node in the database.
:param name: attribute name
:param value: attribute value or list of possible values
:param database: optional existing database connection
:param session: optional existing database session
:raises: Error if attributes values are already in database
"""
if not isinstance(value, list):
value = [value]
with _maybe_db(database) as db:
with _ensure_transaction(session) as session:
try:
db.executemany("insert into attributes(name, value, uuid) "
"values(?, ?, ?)",
[(name, v, self.uuid) for v in value])
except sqlite3.IntegrityError as exc:
for v in value:
models.Attribute(name=name, value=v, uuid=self.uuid).save(
session)
except db_exc.DBDuplicateEntry as exc:
LOG.error(_LE('Database integrity error %s during '
'adding attributes'), exc)
raise utils.Error(_(
@ -186,35 +177,49 @@ class NodeInfo(object):
def init():
"""Initialize the database."""
global _DB_NAME
_DB_NAME = CONF.database.strip()
if not _DB_NAME:
LOG.critical(_LC('Configuration option inspector.database'
' should be set'))
sys.exit(1)
db_dir = os.path.dirname(_DB_NAME)
if db_dir and not os.path.exists(db_dir):
os.makedirs(db_dir)
sqlite3.connect(_DB_NAME).executescript(_SCHEMA)
if CONF.discoverd.database:
db_opts.set_defaults(CONF,
connection='sqlite:///%s' %
str(CONF.discoverd.database).strip())
# TODO(yuikotakada) alembic migration
engine = get_engine()
models.Base.metadata.create_all(engine)
return get_session()
def _db():
if _DB_NAME is None:
init()
conn = sqlite3.connect(_DB_NAME)
conn.row_factory = sqlite3.Row
return conn
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(cfg.CONF)
return _FACADE
@contextlib.contextmanager
def _maybe_db(db=None):
if db is None:
with _db() as db:
yield db
else:
yield db
def _ensure_transaction(session=None):
session = session or get_session()
with session.begin(subtransactions=True):
yield session
def add_node(uuid, **attributes):
@ -228,27 +233,29 @@ def add_node(uuid, **attributes):
:returns: NodeInfo
"""
started_at = time.time()
with _db() as db:
db.execute("delete from nodes where uuid=?", (uuid,))
db.execute("delete from attributes where uuid=?", (uuid,))
db.execute("delete from options where uuid=?", (uuid,))
with _ensure_transaction() as session:
(model_query(models.Node, session=session).filter_by(uuid=uuid).
delete())
(model_query(models.Attribute, session=session).filter_by(uuid=uuid).
delete(synchronize_session=False))
(model_query(models.Option, session=session).filter_by(uuid=uuid).
delete())
db.execute("insert into nodes(uuid, started_at) "
"values(?, ?)", (uuid, started_at))
models.Node(uuid=uuid, started_at=started_at).save(session)
node_info = NodeInfo(uuid=uuid, started_at=started_at)
for (name, value) in attributes.items():
if not value:
continue
node_info.add_attribute(name, value, database=db)
node_info.add_attribute(name, value, session=session)
return node_info
def active_macs():
"""List all MAC's that are on introspection right now."""
return {x[0] for x in _db().execute("select value from attributes "
"where name=?", (MACS_ATTRIBUTE,))}
return ({x.value for x in model_query(models.Attribute.value).
filter_by(name=MACS_ATTRIBUTE)})
def get_node(uuid):
@ -257,7 +264,7 @@ def get_node(uuid):
:param uuid: node UUID.
:returns: structure NodeInfo.
"""
row = _db().execute('select * from nodes where uuid=?', (uuid,)).fetchone()
row = model_query(models.Node).filter_by(uuid=uuid).first()
if row is None:
raise utils.Error(_('Could not find node %s in cache') % uuid,
code=404)
@ -273,7 +280,7 @@ def find_node(**attributes):
"""
# NOTE(dtantsur): sorting is not required, but gives us predictability
found = set()
db = _db()
for (name, value) in sorted(attributes.items()):
if not value:
LOG.debug('Empty value for attribute %s', name)
@ -283,34 +290,40 @@ def find_node(**attributes):
LOG.debug('Trying to use %s of value %s for node look up'
% (name, value))
rows = db.execute('select distinct uuid from attributes where ' +
' OR '.join('name=? AND value=?' for _ in value),
sum(([name, v] for v in value), [])).fetchall()
value_list = []
for v in value:
value_list.append('name="%s" AND value="%s"' % (name, v))
stmt = ('select distinct uuid from attributes where ' +
' OR '.join(value_list))
rows = (model_query(models.Attribute.uuid).from_statement(
text(stmt)).all())
if rows:
found.update(item[0] for item in rows)
found.update(item.uuid for item in rows)
if not found:
raise utils.NotFoundInCacheError(_(
'Could not find a node for attributes %s') % attributes)
elif len(found) > 1:
raise utils.Error(_(
'Multiple matching nodes found for attributes %(attr)s: %(found)s')
'Multiple matching nodes found for attributes '
'%(attr)s: %(found)s')
% {'attr': attributes, 'found': list(found)}, code=404)
uuid = found.pop()
row = db.execute('select started_at, finished_at from nodes where uuid=?',
(uuid,)).fetchone()
row = (model_query(models.Node.started_at, models.Node.finished_at).
filter_by(uuid=uuid).first())
if not row:
raise utils.Error(_(
'Could not find node %s in introspection cache, '
'probably it\'s not on introspection now') % uuid, code=404)
if row['finished_at']:
if row.finished_at:
raise utils.Error(_(
'Introspection for node %(node)s already finished on %(finish)s') %
{'node': uuid, 'finish': row['finished_at']})
'Introspection for node %(node)s already finished on '
'%(finish)s') % {'node': uuid, 'finish': row.finished_at})
return NodeInfo(uuid=uuid, started_at=row['started_at'])
return NodeInfo(uuid=uuid, started_at=row.started_at)
def clean_up():
@ -324,30 +337,32 @@ def clean_up():
status_keep_threshold = (time.time() -
CONF.node_status_keep_time)
with _db() as db:
db.execute('delete from nodes where finished_at < ?',
(status_keep_threshold,))
with _ensure_transaction() as session:
model_query(models.Node, session=session).filter(
models.Node.finished_at.isnot(None),
models.Node.finished_at < status_keep_threshold).delete()
timeout = CONF.timeout
if timeout <= 0:
return []
threshold = time.time() - timeout
with _db() as db:
uuids = [row[0] for row in
db.execute('select uuid from nodes where '
'started_at < ? and finished_at is null',
(threshold,))]
timeout = CONF.timeout
if timeout <= 0:
return []
threshold = time.time() - timeout
uuids = [row.uuid for row in
model_query(models.Node.uuid, session=session).filter(
models.Node.started_at < threshold,
models.Node.finished_at.is_(None)).all()]
if not uuids:
return []
LOG.error(_LE('Introspection for nodes %s has timed out'), uuids)
db.execute('update nodes set finished_at=?, error=? '
'where started_at < ? and finished_at is null',
(time.time(), 'Introspection timeout', threshold))
db.executemany('delete from attributes where uuid=?',
[(u,) for u in uuids])
db.executemany('delete from options where uuid=?',
[(u,) for u in uuids])
query = model_query(models.Node, session=session).filter(
models.Node.started_at < threshold,
models.Node.finished_at.is_(None))
query.update({'finished_at': time.time(),
'error': 'Introspection timeout'})
for u in uuids:
model_query(models.Attribute, session=session).filter_by(
uuid=u).delete()
model_query(models.Option, session=session).filter_by(
uuid=u).delete()
return uuids

View File

@ -11,15 +11,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import mock
from oslo_config import cfg
from oslo_db import options as db_opts
from ironic_inspector.common import i18n
# Import configuration options
from ironic_inspector import conf # noqa
from ironic_inspector import models
from ironic_inspector import node_cache
from ironic_inspector.plugins import base as plugins_base
@ -35,23 +36,24 @@ def init_test_conf():
CONF.reset()
for group in ('firewall', 'processing', 'ironic'):
CONF.register_group(cfg.OptGroup(group))
if not CONF.database:
db_opts.set_defaults(CONF)
CONF.set_default('slave_connection', False, group='database')
CONF.set_default('max_retries', 10, group='database')
if not CONF.database.connection:
# Might be set in functional tests
db_file = tempfile.NamedTemporaryFile()
CONF.set_override('database', db_file.name)
else:
db_file = None
node_cache._DB_NAME = None
return db_file
db_opts.set_defaults(CONF,
connection='sqlite:///')
class BaseTest(unittest.TestCase):
def setUp(self):
super(BaseTest, self).setUp()
self.db_file = init_test_conf()
self.db = node_cache._db()
if self.db_file:
self.addCleanup(lambda: self.db_file.close())
init_test_conf()
self.session = node_cache.get_session()
engine = node_cache.get_engine()
models.Base.metadata.create_all(engine)
engine.connect()
self.addCleanup(node_cache.get_engine().dispose)
plugins_base._HOOKS_MGR = None
for name in ('_', '_LI', '_LW', '_LE', '_LC'):
patch = mock.patch.object(i18n, name, lambda s: s)

View File

@ -41,8 +41,9 @@ manage_firewall = False
[processing]
enable_setting_ipmi_credentials = True
[DEFAULT]
database = %(db_file)s
debug = True
[database]
connection = sqlite:///%(db_file)s
"""

View File

@ -11,14 +11,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import time
import unittest
import mock
from oslo_config import cfg
from ironic_inspector import models
from ironic_inspector import node_cache
from ironic_inspector.test import base as test_base
from ironic_inspector import utils
@ -29,63 +28,68 @@ CONF = cfg.CONF
class TestNodeCache(test_base.NodeTest):
def test_add_node(self):
# Ensure previous node information is cleared
with self.db:
self.db.execute("insert into nodes(uuid) values(?)",
(self.node.uuid,))
self.db.execute("insert into nodes(uuid) values('uuid2')")
self.db.execute("insert into attributes(name, value, uuid) "
"values(?, ?, ?)",
('mac', '11:22:11:22:11:22', self.uuid))
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.node.uuid).save(session)
models.Node(uuid='uuid2').save(session)
models.Attribute(name='mac',
value='11:22:11:22:11:22',
uuid=self.uuid).save(session)
res = node_cache.add_node(self.node.uuid, mac=self.macs,
bmc_address='1.2.3.4', foo=None)
self.assertEqual(self.uuid, res.uuid)
self.assertTrue(time.time() - 60 < res.started_at < time.time() + 60)
res = self.db.execute("select uuid, started_at "
"from nodes order by uuid").fetchall()
res = (node_cache.model_query(models.Node.uuid,
models.Node.started_at).order_by(models.Node.uuid).all())
self.assertEqual(['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e',
'uuid2'], [t[0] for t in res])
self.assertTrue(time.time() - 60 < res[0][1] < time.time() + 60)
'uuid2'], [t.uuid for t in res])
self.assertTrue(time.time() - 60 < res[0].started_at <
time.time() + 60)
res = self.db.execute("select name, value, uuid from attributes "
"order by name, value").fetchall()
res = (node_cache.model_query(models.Attribute.name,
models.Attribute.value, models.Attribute.uuid).
order_by(models.Attribute.name, models.Attribute.value).all())
self.assertEqual([('bmc_address', '1.2.3.4', self.uuid),
('mac', self.macs[0], self.uuid),
('mac', self.macs[1], self.uuid)],
[tuple(row) for row in res])
[(row.name, row.value, row.uuid) for row in res])
def test_add_node_duplicate_mac(self):
with self.db:
self.db.execute("insert into nodes(uuid) values(?)",
('another-uuid',))
self.db.execute("insert into attributes(name, value, uuid) "
"values(?, ?, ?)",
('mac', '11:22:11:22:11:22', 'another-uuid'))
session = node_cache.get_session()
with session.begin():
models.Node(uuid='another-uuid').save(session)
models.Attribute(name='mac', value='11:22:11:22:11:22',
uuid='another-uuid').save(session)
self.assertRaises(utils.Error,
node_cache.add_node,
self.node.uuid, mac=['11:22:11:22:11:22'])
def test_active_macs(self):
with self.db:
self.db.execute("insert into nodes(uuid) values(?)",
(self.node.uuid,))
self.db.executemany("insert into attributes(name, value, uuid) "
"values(?, ?, ?)",
[('mac', '11:22:11:22:11:22', self.uuid),
('mac', '22:11:22:11:22:11', self.uuid)])
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.node.uuid).save(session)
values = [('mac', '11:22:11:22:11:22', self.uuid),
('mac', '22:11:22:11:22:11', self.uuid)]
for value in values:
models.Attribute(name=value[0], value=value[1],
uuid=value[2]).save(session)
self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'},
node_cache.active_macs())
def test_add_attribute(self):
with self.db:
self.db.execute("insert into nodes(uuid) values(?)",
(self.node.uuid,))
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.node.uuid).save(session)
node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42)
node_info.add_attribute('key', 'value')
res = self.db.execute("select name, value, uuid from attributes "
"order by name, value").fetchall()
res = node_cache.model_query(models.Attribute.name,
models.Attribute.value,
models.Attribute.uuid,
session=session).order_by(
models.Attribute.name,
models.Attribute.value).all()
self.assertEqual([('key', 'value', self.uuid)],
[tuple(row) for row in res])
self.assertRaises(utils.Error, node_info.add_attribute,
@ -131,15 +135,18 @@ class TestNodeCacheFind(test_base.NodeTest):
self.assertTrue(time.time() - 60 < res.started_at < time.time() + 1)
def test_inconsistency(self):
with self.db:
self.db.execute('delete from nodes where uuid=?', (self.uuid,))
session = node_cache.get_session()
with session.begin():
(node_cache.model_query(models.Node).filter_by(uuid=self.uuid).
delete())
self.assertRaises(utils.Error, node_cache.find_node,
bmc_address='1.2.3.4')
def test_already_finished(self):
with self.db:
self.db.execute('update nodes set finished_at=42.0 where uuid=?',
(self.uuid,))
session = node_cache.get_session()
with session.begin():
(node_cache.model_query(models.Node).filter_by(uuid=self.uuid).
update({'finished_at': 42.0}))
self.assertRaises(utils.Error, node_cache.find_node,
bmc_address='1.2.3.4')
@ -148,27 +155,28 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
def setUp(self):
super(TestNodeCacheCleanUp, self).setUp()
self.started_at = 100.0
with self.db:
self.db.execute('insert into nodes(uuid, started_at) '
'values(?, ?)', (self.uuid, self.started_at))
self.db.executemany('insert into attributes(name, value, uuid) '
'values(?, ?, ?)',
[('mac', v, self.uuid) for v in self.macs])
self.db.execute('insert into options(uuid, name, value) '
'values(?, ?, ?)', (self.uuid, 'foo', 'bar'))
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.uuid, started_at=self.started_at).save(
session)
for v in self.macs:
models.Attribute(name='mac', value=v, uuid=self.uuid).save(
session)
models.Option(uuid=self.uuid, name='foo', value='bar').save(
session)
def test_no_timeout(self):
CONF.set_override('timeout', 0)
self.assertFalse(node_cache.clean_up())
res = [tuple(row) for row in self.db.execute(
'select finished_at, error from nodes').fetchall()]
res = [tuple(row) for row in
node_cache.model_query(models.Node.finished_at,
models.Node.error).all()]
self.assertEqual([(None, None)], res)
self.assertEqual(len(self.macs), len(self.db.execute(
'select * from attributes').fetchall()))
self.assertEqual(1, len(self.db.execute(
'select * from options').fetchall()))
self.assertEqual(len(self.macs),
node_cache.model_query(models.Attribute).count())
self.assertEqual(1, node_cache.model_query(models.Option).count())
@mock.patch.object(time, 'time')
def test_ok(self, time_mock):
@ -176,55 +184,52 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
self.assertFalse(node_cache.clean_up())
res = [tuple(row) for row in self.db.execute(
'select finished_at, error from nodes').fetchall()]
res = [tuple(row) for row in node_cache.model_query(
models.Node.finished_at, models.Node.error).all()]
self.assertEqual([(None, None)], res)
self.assertEqual(len(self.macs), len(self.db.execute(
'select * from attributes').fetchall()))
self.assertEqual(1, len(self.db.execute(
'select * from options').fetchall()))
self.assertEqual(len(self.macs),
node_cache.model_query(models.Attribute).count())
self.assertEqual(1, node_cache.model_query(models.Option).count())
@mock.patch.object(time, 'time')
def test_timeout(self, time_mock):
# Add a finished node to confirm we don't try to timeout it
with self.db:
self.db.execute('insert into nodes(uuid, started_at, finished_at) '
'values(?, ?, ?)', (self.uuid + '1',
self.started_at,
self.started_at + 60))
time_mock.return_value = self.started_at
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.uuid + '1', started_at=self.started_at,
finished_at=self.started_at + 60).save(session)
CONF.set_override('timeout', 99)
time_mock.return_value = self.started_at + 100
time_mock.return_value = (self.started_at + 100)
self.assertEqual([self.uuid], node_cache.clean_up())
res = [tuple(row) for row in self.db.execute(
'select finished_at, error from nodes order by uuid').fetchall()]
res = [(row.finished_at, row.error) for row in
node_cache.model_query(models.Node).all()]
self.assertEqual([(self.started_at + 100, 'Introspection timeout'),
(self.started_at + 60, None)],
res)
self.assertEqual([], self.db.execute(
'select * from attributes').fetchall())
self.assertEqual([], self.db.execute(
'select * from options').fetchall())
self.assertEqual([], node_cache.model_query(models.Attribute).all())
self.assertEqual([], node_cache.model_query(models.Option).all())
def test_old_status(self):
CONF.set_override('node_status_keep_time', 42)
with self.db:
self.db.execute('update nodes set finished_at=?',
(time.time() - 100,))
session = node_cache.get_session()
with session.begin():
node_cache.model_query(models.Node).update(
{'finished_at': time.time() - 100})
self.assertEqual([], node_cache.clean_up())
self.assertEqual([], self.db.execute(
'select * from nodes').fetchall())
self.assertEqual([], node_cache.model_query(models.Node).all())
class TestNodeCacheGetNode(test_base.NodeTest):
def test_ok(self):
started_at = time.time() - 42
with self.db:
self.db.execute('insert into nodes(uuid, started_at) '
'values(?, ?)', (self.uuid, started_at))
session = node_cache.get_session()
with session.begin():
models.Node(uuid=self.uuid, started_at=started_at).save(session)
info = node_cache.get_node(self.uuid)
self.assertEqual(self.uuid, info.uuid)
@ -244,53 +249,43 @@ class TestNodeInfoFinished(test_base.NodeTest):
bmc_address='1.2.3.4',
mac=self.macs)
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14)
with self.db:
self.db.execute('insert into options(uuid, name, value) '
'values(?, ?, ?)', (self.uuid, 'foo', 'bar'))
session = node_cache.get_session()
with session.begin():
models.Option(uuid=self.uuid, name='foo', value='bar').save(
session)
def test_success(self):
self.node_info.finished()
self.assertEqual((42.0, None), tuple(self.db.execute(
'select finished_at, error from nodes').fetchone()))
self.assertEqual([], self.db.execute(
"select * from attributes").fetchall())
self.assertEqual([], self.db.execute(
"select * from options").fetchall())
session = node_cache.get_session()
with session.begin():
self.assertEqual((42.0, None),
tuple(node_cache.model_query(
models.Node.finished_at,
models.Node.error).first()))
self.assertEqual([], node_cache.model_query(models.Attribute,
session=session).all())
self.assertEqual([], node_cache.model_query(models.Option,
session=session).all())
def test_error(self):
self.node_info.finished(error='boom')
self.assertEqual((42.0, 'boom'), tuple(self.db.execute(
'select finished_at, error from nodes').fetchone()))
self.assertEqual([], self.db.execute(
"select * from attributes").fetchall())
self.assertEqual([], self.db.execute(
"select * from options").fetchall())
self.assertEqual((42.0, 'boom'),
tuple(node_cache.model_query(models.Node.finished_at,
models.Node.error).first()))
self.assertEqual([], node_cache.model_query(models.Attribute).all())
self.assertEqual([], node_cache.model_query(models.Option).all())
class TestInit(unittest.TestCase):
def setUp(self):
super(TestInit, self).setUp()
node_cache._DB_NAME = None
def test_ok(self):
with tempfile.NamedTemporaryFile() as db_file:
CONF.set_override('database', db_file.name)
node_cache.init()
self.assertIsNotNone(node_cache._DB_NAME)
# Verify that table exists
node_cache._db().execute("select * from nodes")
def test_create_dir(self):
temp = tempfile.mkdtemp()
CONF.set_override('database', os.path.join(temp, 'dir', 'file'))
node_cache.init()
def test_no_database(self):
CONF.set_override('database', '')
self.assertRaises(SystemExit, node_cache.init)
session = node_cache.get_session()
node_cache.model_query(models.Node, session=session)
class TestNodeInfoOptions(test_base.NodeTest):
@ -300,14 +295,15 @@ class TestNodeInfoOptions(test_base.NodeTest):
bmc_address='1.2.3.4',
mac=self.macs)
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14)
with self.db:
self.db.execute('insert into options(uuid, name, value) '
'values(?, ?, ?)', (self.uuid, 'foo', '"bar"'))
session = node_cache.get_session()
with session.begin():
models.Option(uuid=self.uuid, name='foo', value='"bar"').save(
session)
def test_get(self):
self.assertEqual({'foo': 'bar'}, self.node_info.options)
# should be cached
self.assertIs(self.node_info.options, self.node_info.options)
self.assertEqual(self.node_info.options, self.node_info.options)
# invalidate cache
old_options = self.node_info.options
self.node_info.invalidate_cache()

View File

@ -10,6 +10,7 @@ python-ironicclient>=0.6.0
python-keystoneclient>=1.6.0
python-openstackclient>=1.0.3
oslo.config>=1.11.0 # Apache-2.0
oslo.db>=1.12.0 # Apache-2.0
oslo.i18n>=1.5.0 # Apache-2.0
oslo.utils>=1.6.0 # Apache-2.0
six>=1.9.0

View File

@ -47,4 +47,5 @@ commands =
--output-file example.conf \
--namespace ironic_inspector \
--namespace keystonemiddleware.auth_token \
--namespace ironic_inspector.common.swift
--namespace ironic_inspector.common.swift \
--namespace oslo.db