Find a node by multiple attributes

This patch modifies current attributes matching from a single
name-value->node hit to a best-match score.

Also using an UUID as the attributes table primary key to allow exposing
attributes in API later (bug 1525231).

Change-Id: I205e31652b21b9e030b9530149e533b29c52a387
Closes-Bug: 1651719
Partial-Bug: 1525231
This commit is contained in:
dparalen 2017-01-16 18:55:08 +01:00
parent f9915931b2
commit 0ce5cdb7c8
6 changed files with 247 additions and 57 deletions

View File

@ -67,9 +67,11 @@ class Node(Base):
class Attribute(Base):
__tablename__ = 'attributes'
name = Column(String(255), primary_key=True)
value = Column(String(255), primary_key=True)
uuid = Column(String(36), ForeignKey('nodes.uuid'))
uuid = Column(String(36), primary_key=True)
node_uuid = Column(String(36), ForeignKey('nodes.uuid',
name='fk_node_attribute'))
name = Column(String(255), nullable=False)
value = Column(String(255), nullable=True)
class Option(Base):

View File

@ -0,0 +1,90 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""attribute_constraints_relaxing
Revision ID: 882b2d84cb1b
Revises: d00d6e3f38c4
Create Date: 2017-01-13 11:27:00.053286
"""
# revision identifiers, used by Alembic.
revision = '882b2d84cb1b'
down_revision = 'd00d6e3f38c4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector as insp
from oslo_utils import uuidutils
ATTRIBUTES = 'attributes'
NODES = 'nodes'
NAME = 'name'
VALUE = 'value'
UUID = 'uuid'
NODE_UUID = 'node_uuid'
naming_convention = {
"pk": 'pk_%(table_name)s',
"fk": 'fk_%(table_name)s'
}
def upgrade():
connection = op.get_bind()
inspector = insp.from_engine(connection)
pk_constraint = (inspector.get_pk_constraint(ATTRIBUTES).get('name')
or naming_convention['pk'] % {'table_name': ATTRIBUTES})
fk_constraint = (inspector.get_foreign_keys(ATTRIBUTES)[0].get('name')
or naming_convention['fk'] % {'table_name': ATTRIBUTES})
columns_meta = inspector.get_columns(ATTRIBUTES)
name_type = {meta.get('type') for meta in columns_meta
if meta['name'] == NAME}.pop()
value_type = {meta.get('type') for meta in columns_meta
if meta['name'] == VALUE}.pop()
node_uuid_column = sa.Column(NODE_UUID, sa.String(36))
op.add_column(ATTRIBUTES, node_uuid_column)
attributes = sa.table(ATTRIBUTES, node_uuid_column,
sa.Column(UUID, sa.String(36)))
with op.batch_alter_table(ATTRIBUTES,
naming_convention=naming_convention) as batch_op:
batch_op.drop_constraint(fk_constraint, type_='foreignkey')
rows = connection.execute(sa.select([attributes.c.uuid,
attributes.c.node_uuid]))
for row in rows:
# move uuid to node_uuid, reuse uuid as a new primary key
connection.execute(
attributes.update().where(attributes.c.uuid == row.uuid).
values(node_uuid=row.uuid, uuid=uuidutils.generate_uuid())
)
with op.batch_alter_table(ATTRIBUTES,
naming_convention=naming_convention) as batch_op:
batch_op.drop_constraint(pk_constraint, type_='primary')
batch_op.create_primary_key(pk_constraint, [UUID])
batch_op.create_foreign_key('fk_node_attribute', NODES,
[NODE_UUID], [UUID])
batch_op.alter_column('name', nullable=False, type_=name_type)
batch_op.alter_column('value', nullable=True, type_=value_type)

View File

@ -13,6 +13,7 @@
"""Cache for nodes currently under introspection."""
import collections
import contextlib
import copy
import datetime
@ -23,7 +24,6 @@ from automaton import exceptions as automaton_errors
from ironicclient import exceptions
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import excutils
from oslo_utils import reflection
@ -251,7 +251,7 @@ class NodeInfo(object):
if self._attributes is None:
self._attributes = {}
rows = db.model_query(db.Attribute).filter_by(
uuid=self.uuid)
node_uuid=self.uuid)
for row in rows:
self._attributes.setdefault(row.name, []).append(row.value)
return self._attributes
@ -288,7 +288,7 @@ class NodeInfo(object):
with db.ensure_transaction() as session:
self._commit(finished_at=self.finished_at, error=self.error)
db.model_query(db.Attribute, session=session).filter_by(
uuid=self.uuid).delete()
node_uuid=self.uuid).delete()
db.model_query(db.Option, session=session).filter_by(
uuid=self.uuid).delete()
@ -298,23 +298,14 @@ class NodeInfo(object):
:param name: attribute name
:param value: attribute value or list of possible values
:param session: optional existing database session
:raises: Error if attributes values are already in database
"""
if not isinstance(value, list):
value = [value]
with db.ensure_transaction(session) as session:
try:
for v in value:
db.Attribute(name=name, value=v, uuid=self.uuid).save(
session)
except db_exc.DBDuplicateEntry as exc:
LOG.error(_LE('Database integrity error %s during '
'adding attributes'), exc, node_info=self)
raise utils.Error(_(
'Some or all of %(name)s\'s %(value)s are already '
'on introspection') % {'name': name, 'value': value},
node_info=self)
for v in value:
db.Attribute(uuid=uuidutils.generate_uuid(), name=name,
value=v, node_uuid=self.uuid).save(session)
# Invalidate attributes so they're loaded on next usage
self._attributes = None
@ -719,7 +710,9 @@ def _delete_node(uuid, session=None):
:param session: optional existing database session
"""
with db.ensure_transaction(session) as session:
for model in (db.Attribute, db.Option, db.Node):
db.model_query(db.Attribute, session=session).filter_by(
node_uuid=uuid).delete()
for model in (db.Option, db.Node):
db.model_query(model,
session=session).filter_by(uuid=uuid).delete()
@ -781,16 +774,17 @@ def get_node(node_id, ironic=None, locked=False):
def find_node(**attributes):
"""Find node in cache.
Looks up a node based on attributes in a best-match fashion.
This function acquires a lock on a node.
:param attributes: attributes known about this node (like macs, BMC etc)
also ironic client instance may be passed under 'ironic'
:returns: structure NodeInfo with attributes ``uuid`` and ``created_at``
:raises: Error if node is not found
:raises: Error if node is not found or multiple nodes match the attributes
"""
ironic = attributes.pop('ironic', None)
# NOTE(dtantsur): sorting is not required, but gives us predictability
found = set()
found = collections.Counter()
for (name, value) in sorted(attributes.items()):
if not value:
@ -804,21 +798,31 @@ def find_node(**attributes):
value_list = []
for v in value:
value_list.append("name='%s' AND value='%s'" % (name, v))
stmt = ('select distinct uuid from attributes where ' +
stmt = ('select distinct node_uuid from attributes where ' +
' OR '.join(value_list))
rows = (db.model_query(db.Attribute.uuid).from_statement(
rows = (db.model_query(db.Attribute.node_uuid).from_statement(
text(stmt)).all())
if rows:
found.update(item.uuid for item in rows)
found.update(row.node_uuid for row in rows)
if not found:
raise utils.NotFoundInCacheError(_(
'Could not find a node for attributes %s') % attributes)
elif len(found) > 1:
most_common = found.most_common()
LOG.debug('The following nodes match the attributes: %(attributes)s, '
'scoring: %(most_common)s',
{'most_common': ', '.join('%s: %d' % tpl for tpl in most_common),
'attributes': ', '.join('%s=%s' % tpl for tpl in
attributes.items())})
# NOTE(milan) most_common is sorted, higher scores first
highest_score = most_common[0][1]
found = [item[0] for item in most_common if highest_score == item[1]]
if len(found) > 1:
raise utils.Error(_(
'Multiple matching nodes found for attributes '
'Multiple nodes match the same number of attributes '
'%(attr)s: %(found)s')
% {'attr': attributes, 'found': list(found)}, code=404)
% {'attr': attributes, 'found': found}, code=404)
uuid = found.pop()
node_info = NodeInfo(uuid=uuid, ironic=ironic)

View File

@ -399,6 +399,46 @@ class MigrationCheckersMixin(object):
finished_at,
row['finished_at'])
def _pre_upgrade_882b2d84cb1b(self, engine):
attributes = db_utils.get_table(engine, 'attributes')
nodes = db_utils.get_table(engine, 'nodes')
self.node_uuid = uuidutils.generate_uuid()
node = {
'uuid': self.node_uuid,
'started_at': datetime.datetime.utcnow(),
'finished_at': None,
'error': None,
'state': istate.States.starting
}
nodes.insert().values(node).execute()
data = {
'uuid': self.node_uuid,
'name': 'foo',
'value': 'bar'
}
attributes.insert().values(data).execute()
def _check_882b2d84cb1b(self, engine, data):
attributes = db_utils.get_table(engine, 'attributes')
col_names = [column.name for column in attributes.c]
self.assertIn('uuid', col_names)
self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String)
self.assertIn('node_uuid', col_names)
self.assertIsInstance(attributes.c.node_uuid.type,
sqlalchemy.types.String)
self.assertIn('name', col_names)
self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String)
self.assertIn('value', col_names)
self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String)
row = attributes.select(attributes.c.node_uuid ==
self.node_uuid).execute().first()
self.assertEqual(self.node_uuid, row.node_uuid)
self.assertNotEqual(self.node_uuid, row.uuid)
self.assertIsNotNone(row.uuid)
self.assertEqual('foo', row.name)
self.assertEqual('bar', row.value)
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_ext.upgrade('head')

View File

@ -44,9 +44,9 @@ class TestNodeCache(test_base.NodeTest):
state=istate.States.starting).save(session)
db.Node(uuid=uuid2,
state=istate.States.starting).save(session)
db.Attribute(name='mac',
db.Attribute(uuid=uuidutils.generate_uuid(), name='mac',
value='11:22:11:22:11:22',
uuid=self.uuid).save(session)
node_uuid=self.uuid).save(session)
node = node_cache.add_node(self.node.uuid,
istate.States.starting,
@ -66,21 +66,22 @@ class TestNodeCache(test_base.NodeTest):
self.assertEqual(expected, res)
res = (db.model_query(db.Attribute.name,
db.Attribute.value, db.Attribute.uuid).
db.Attribute.value, db.Attribute.node_uuid).
order_by(db.Attribute.name, db.Attribute.value).all())
self.assertEqual([('bmc_address', '1.2.3.4', self.uuid),
('mac', self.macs[0], self.uuid),
('mac', self.macs[1], self.uuid),
('mac', self.macs[2], self.uuid)],
[(row.name, row.value, row.uuid) for row in res])
[(row.name, row.value, row.node_uuid) for row in res])
def test__delete_node(self):
session = db.get_session()
with session.begin():
db.Node(uuid=self.node.uuid,
state=istate.States.finished).save(session)
db.Attribute(name='mac', value='11:22:11:22:11:22',
uuid=self.uuid).save(session)
db.Attribute(uuid=uuidutils.generate_uuid(), name='mac',
value='11:22:11:22:11:22', node_uuid=self.uuid).save(
session)
data = {'s': 'value', 'b': True, 'i': 42}
encoded = json.dumps(data)
db.Option(uuid=self.uuid, name='name', value=encoded).save(
@ -92,7 +93,7 @@ class TestNodeCache(test_base.NodeTest):
uuid=self.uuid).first()
self.assertIsNone(row_node)
row_attribute = db.model_query(db.Attribute).filter_by(
uuid=self.uuid).first()
node_uuid=self.uuid).first()
self.assertIsNone(row_attribute)
row_option = db.model_query(db.Option).filter_by(
uuid=self.uuid).first()
@ -114,20 +115,6 @@ class TestNodeCache(test_base.NodeTest):
mock__get_lock_ctx.assert_called_once_with(uuid2)
mock__get_lock_ctx.return_value.__enter__.assert_called_once_with()
def test_add_node_duplicate_mac(self):
session = db.get_session()
uuid = uuidutils.generate_uuid()
with session.begin():
db.Node(uuid=uuid,
state=istate.States.starting).save(session)
db.Attribute(name='mac', value='11:22:11:22:11:22',
uuid=uuid).save(session)
self.assertRaises(utils.Error,
node_cache.add_node,
self.node.uuid,
istate.States.starting,
mac=['11:22:11:22:11:22'])
def test_active_macs(self):
session = db.get_session()
with session.begin():
@ -136,8 +123,8 @@ class TestNodeCache(test_base.NodeTest):
values = [('mac', '11:22:11:22:11:22', self.uuid),
('mac', '22:11:22:11:22:11', self.uuid)]
for value in values:
db.Attribute(name=value[0], value=value[1],
uuid=value[2]).save(session)
db.Attribute(uuid=uuidutils.generate_uuid(), name=value[0],
value=value[1], node_uuid=value[2]).save(session)
self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'},
node_cache.active_macs())
@ -162,16 +149,46 @@ class TestNodeCache(test_base.NodeTest):
node_info.add_attribute('key', 'value')
res = db.model_query(db.Attribute.name,
db.Attribute.value,
db.Attribute.uuid,
db.Attribute.node_uuid,
session=session)
res = res.order_by(db.Attribute.name, db.Attribute.value).all()
self.assertEqual([('key', 'value', self.uuid)],
[tuple(row) for row in res])
self.assertRaises(utils.Error, node_info.add_attribute,
'key', 'value')
# check that .attributes got invalidated and reloaded
self.assertEqual({'key': ['value']}, node_info.attributes)
def test_add_attribute_same_name(self):
session = db.get_session()
with session.begin():
db.Node(uuid=self.node.uuid,
state=istate.States.starting).save(session)
node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42)
node_info.add_attribute('key', ['foo', 'bar'])
node_info.add_attribute('key', 'baz')
res = db.model_query(db.Attribute.name, db.Attribute.value,
db.Attribute.node_uuid, session=session)
res = res.order_by(db.Attribute.name, db.Attribute.value).all()
self.assertEqual([('key', 'bar', self.uuid),
('key', 'baz', self.uuid),
('key', 'foo', self.uuid)],
[tuple(row) for row in res])
def test_add_attribute_same_value(self):
session = db.get_session()
with session.begin():
db.Node(uuid=self.node.uuid,
state=istate.States.starting).save(session)
node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42)
node_info.add_attribute('key', 'value')
node_info.add_attribute('key', 'value')
res = db.model_query(db.Attribute.name, db.Attribute.value,
db.Attribute.node_uuid, session=session)
self.assertEqual([('key', 'value', self.uuid),
('key', 'value', self.uuid)],
[tuple(row) for row in res])
def test_attributes(self):
node_info = node_cache.add_node(self.uuid,
istate.States.starting,
@ -183,7 +200,8 @@ class TestNodeCache(test_base.NodeTest):
# check invalidation
session = db.get_session()
with session.begin():
db.Attribute(name='foo', value='bar', uuid=self.uuid).save(session)
db.Attribute(uuid=uuidutils.generate_uuid(), name='foo',
value='bar', node_uuid=self.uuid).save(session)
# still cached
self.assertEqual({'bmc_address': ['1.2.3.4'],
'mac': self.macs},
@ -216,6 +234,25 @@ class TestNodeCacheFind(test_base.NodeTest):
datetime.datetime.utcnow() + datetime.timedelta(seconds=1))
self.assertTrue(res._locked)
def test_same_bmc_different_macs(self):
uuid2 = uuidutils.generate_uuid()
node_cache.add_node(uuid2,
istate.States.starting,
bmc_address='1.2.3.4',
mac=self.macs2)
res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs)
self.assertEqual(self.uuid, res.uuid)
res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs2)
self.assertEqual(uuid2, res.uuid)
def test_same_bmc_raises(self):
uuid2 = uuidutils.generate_uuid()
node_cache.add_node(uuid2,
istate.States.starting,
bmc_address='1.2.3.4')
six.assertRaisesRegex(self, utils.Error, 'Multiple nodes',
node_cache.find_node, bmc_address='1.2.3.4')
def test_macs(self):
res = node_cache.find_node(mac=['11:22:33:33:33:33', self.macs[1]])
self.assertEqual(self.uuid, res.uuid)
@ -275,8 +312,8 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
started_at=self.started_at).save(
session)
for v in self.macs:
db.Attribute(name='mac', value=v, uuid=self.uuid).save(
session)
db.Attribute(uuid=uuidutils.generate_uuid(), name='mac',
value=v, node_uuid=self.uuid).save(session)
db.Option(uuid=self.uuid, name='foo', value='bar').save(
session)

View File

@ -0,0 +1,17 @@
---
features:
- |
Looking up nodes during introspection or discovery now supports multiple
attributes matching. For example, two nodes can use the same ``bmc_address``
and still can be distinguished by MAC addresses.
upgrade:
- |
Uniqueness of a node ``bmc_address`` isn't enforced any more.
- |
The primary key of the ``attributes`` table is relaxed from the
``attributes.name, attributes.value`` column pair to a new column
``attributes.uuid``.
fixes:
- |
Introspection fails on nodes with the same IPMI address but different IPMI
ports.