Merge "Make Neutron resources reference standard attr table"

This commit is contained in:
Jenkins 2015-11-24 20:03:23 +00:00 committed by Gerrit Code Review
commit 02db0cd458
10 changed files with 371 additions and 14 deletions

View File

@ -60,3 +60,34 @@ Tests to verify that database migrations and models are in sync
.. autoclass:: _TestModelsMigrations
:members:
The Standard Attribute Table
----------------------------
There are many attributes that we would like to store in the database which
are common across many Neutron objects (e.g. tags, timestamps, rbac entries).
We have previously been handling this by duplicating the schema to every table
via model mixins. This means that a DB migration is required for each object
that wants to adopt one of these common attributes. This becomes even more
cumbersome when the relationship between the attribute and the object is
many-to-one because each object then needs its own table for the attributes
(assuming referential integrity is a concern).
To address this issue, the 'standardattribute' table is available. Any model
can add support for this table by inheriting the 'HasStandardAttributes' mixin
in neutron.db.model_base. This mixin will add a standard_attr_id BigInteger
column to the model with a foreign key relationship to the 'standardattribute'
table. The model will then be able to access any columns of the
'standardattribute' table and any tables related to it.
The introduction of a new standard attribute only requires one column addition
to the 'standardattribute' table for one-to-one relationships or a new table
for one-to-many or one-to-zero relationships. Then all of the models using the
'HasStandardAttribute' mixin will automatically gain access to the new attribute.
Any attributes that will apply to every neutron resource (e.g. timestamps)
can be added directly to the 'standardattribute' table. For things that will
frequently be NULL for most entries (e.g. a column to store an error reason),
a new table should be added and joined to in a query to prevent a bunch of
NULL entries in the database.

View File

@ -78,7 +78,8 @@ class RouterPort(model_base.BASEV2):
lazy='joined')
class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
class Router(model_base.HasStandardAttributes, model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron router."""
name = sa.Column(sa.String(255))
@ -92,7 +93,8 @@ class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
lazy='dynamic')
class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
class FloatingIP(model_base.HasStandardAttributes, model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a floating IP address.
This IP address may or may not be allocated to a tenant, and may or

View File

@ -1 +1 @@
1b294093239c
8a6d8bdae39

View File

@ -1 +1 @@
13cfb89f881a
32e5974ada25

View File

@ -0,0 +1,83 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""standardattributes migration
Revision ID: 8a6d8bdae39
Revises: 1b294093239c
Create Date: 2015-09-10 03:12:04.012457
"""
# revision identifiers, used by Alembic.
revision = '8a6d8bdae39'
down_revision = '1b294093239c'
depends_on = ('32e5974ada25',)
from alembic import op
import sqlalchemy as sa
# basic model of the tables with required field for migration
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
TABLE_MODELS = [
(table, sa.Table(table, sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True)))
for table in TABLES
]
standardattrs = sa.Table(
'standardattributes', sa.MetaData(),
sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False))
def upgrade():
generate_records_for_existing()
for table, model in TABLE_MODELS:
# add the constraint now that everything is populated on that table
op.create_foreign_key(
constraint_name=None, source_table=table,
referent_table='standardattributes',
local_cols=['standard_attr_id'], remote_cols=['id'],
ondelete='CASCADE')
op.alter_column(table, 'standard_attr_id', nullable=False,
existing_type=sa.BigInteger(), existing_nullable=True,
existing_server_default=False)
op.create_unique_constraint(
constraint_name='uniq_%s0standard_attr_id' % table,
table_name=table, columns=['standard_attr_id'])
def generate_records_for_existing():
session = sa.orm.Session(bind=op.get_bind())
values = []
with session.begin(subtransactions=True):
for table, model in TABLE_MODELS:
for row in session.query(model):
# NOTE(kevinbenton): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
res = session.execute(
standardattrs.insert().values(resource_type=table))
session.execute(
model.update().values(
standard_attr_id=res.inserted_primary_key).where(
model.c.id == row[0]))
# this commit is necessary to allow further operations
session.commit()
return values

View File

@ -0,0 +1,44 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
from alembic import op
import sqlalchemy as sa
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))

View File

@ -77,3 +77,61 @@ class NeutronBaseV2(NeutronBase):
BASEV2 = declarative.declarative_base(cls=NeutronBaseV2)
class StandardAttribute(BASEV2):
"""Common table to associate all Neutron API resources.
By having Neutron objects related to this table, we can associate new
tables that apply to many Neutron objects (e.g. timestamps, rbac entries)
to this table to avoid schema duplication while maintaining referential
integrity.
NOTE(kevinbenton): This table should not have more columns added to it
unless we are absolutely certain the new column will have a value for
every single type of Neutron resource. Otherwise this table will be filled
with NULL entries for combinations that don't make sense. Additionally,
by keeping this table small we can ensure that performance isn't adversely
impacted for queries on objects.
"""
# sqlite doesn't support auto increment on big integers so we use big int
# for everything but sqlite
id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
primary_key=True, autoincrement=True)
# NOTE(kevinbenton): this column is redundant information, but it allows
# operators/devs to look at the contents of this table and know which table
# the corresponding object is in.
# 255 was selected as a max just because it's the varchar ceiling in mysql
# before a 2-byte prefix is required. We shouldn't get anywhere near this
# limit with our table names...
resource_type = sa.Column(sa.String(255), nullable=False)
class HasStandardAttributes(object):
@declarative.declared_attr
def standard_attr_id(cls):
return sa.Column(
sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"),
unique=True,
nullable=False
)
# NOTE(kevinbenton): we have to disable the following pylint check because
# it thinks we are overriding this method in the __init__ method.
#pylint: disable=method-hidden
@declarative.declared_attr
def standard_attr(cls):
return orm.relationship(StandardAttribute,
lazy='joined',
cascade='all, delete-orphan',
single_parent=True,
uselist=False)
def __init__(self, *args, **kwargs):
super(HasStandardAttributes, self).__init__(*args, **kwargs)
# here we automatically create the related standard attribute object
self.standard_attr = StandardAttribute(
resource_type=self.__tablename__)

View File

@ -111,7 +111,8 @@ class SubnetRoute(model_base.BASEV2, Route):
primary_key=True)
class Port(model_base.BASEV2, HasId, HasTenant):
class Port(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a port on a Neutron v2 network."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
@ -141,6 +142,7 @@ class Port(model_base.BASEV2, HasId, HasTenant):
mac_address=None, admin_state_up=None, status=None,
device_id=None, device_owner=None, fixed_ips=None,
dns_name=None):
super(Port, self).__init__()
self.id = id
self.tenant_id = tenant_id
self.name = name
@ -169,7 +171,8 @@ class DNSNameServer(model_base.BASEV2):
order = sa.Column(sa.Integer, nullable=False, server_default='0')
class Subnet(model_base.BASEV2, HasId, HasTenant):
class Subnet(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a neutron subnet.
When a subnet is created the first and last entries will be created. These
@ -226,7 +229,8 @@ class SubnetPoolPrefix(model_base.BASEV2):
primary_key=True)
class SubnetPool(model_base.BASEV2, HasId, HasTenant):
class SubnetPool(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a neutron subnet pool.
"""
@ -246,7 +250,8 @@ class SubnetPool(model_base.BASEV2, HasId, HasTenant):
lazy='joined')
class Network(model_base.BASEV2, HasId, HasTenant):
class Network(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a v2 neutron network."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))

View File

@ -43,7 +43,8 @@ IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP,
constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6}
class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
class SecurityGroup(model_base.HasStandardAttributes, model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron security group."""
name = sa.Column(sa.String(255))
@ -84,8 +85,8 @@ class SecurityGroupPortBinding(model_base.BASEV2):
lazy='joined', cascade='delete'))
class SecurityGroupRule(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
class SecurityGroupRule(model_base.HasStandardAttributes, model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron security group rule."""
security_group_id = sa.Column(sa.String(36),

View File

@ -42,7 +42,9 @@ from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_common
from neutron.db import ipam_non_pluggable_backend as non_ipam
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sgdb
from neutron import manager
from neutron.tests import base
from neutron.tests import tools
@ -5596,7 +5598,7 @@ class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
self.assertEqual(res.status_int, 400)
class DbModelTestCase(base.BaseTestCase):
class DbModelTestCase(testlib_api.SqlTestCase):
"""DB model tests."""
def test_repr(self):
"""testing the string representation of 'model' classes."""
@ -5608,9 +5610,140 @@ class DbModelTestCase(base.BaseTestCase):
exp_end_with = (" {tenant_id=None, id=None, "
"name='net_net', status='OK', "
"admin_state_up=True, mtu=None, "
"vlan_transparent=None}>")
"vlan_transparent=None, standard_attr_id=None}>")
final_exp = exp_start_with + exp_middle + exp_end_with
self.assertEqual(actual_repr_output, final_exp)
self.assertEqual(final_exp, actual_repr_output)
def _make_network(self, ctx):
with ctx.session.begin():
network = models_v2.Network(name="net_net", status="OK",
tenant_id='dbcheck',
admin_state_up=True)
ctx.session.add(network)
return network
def _make_subnet(self, ctx, network_id):
with ctx.session.begin():
subnet = models_v2.Subnet(name="subsub", ip_version=4,
tenant_id='dbcheck',
cidr='turn_down_for_what',
network_id=network_id)
ctx.session.add(subnet)
return subnet
def _make_port(self, ctx, network_id):
with ctx.session.begin():
port = models_v2.Port(network_id=network_id, mac_address='1',
tenant_id='dbcheck',
admin_state_up=True, status="COOL",
device_id="devid", device_owner="me")
ctx.session.add(port)
return port
def _make_subnetpool(self, ctx):
with ctx.session.begin():
subnetpool = models_v2.SubnetPool(
ip_version=4, default_prefixlen=4, min_prefixlen=4,
max_prefixlen=4, shared=False, default_quota=4,
address_scope_id='f', tenant_id='dbcheck',
is_default=False
)
ctx.session.add(subnetpool)
return subnetpool
def _make_security_group_and_rule(self, ctx):
with ctx.session.begin():
sg = sgdb.SecurityGroup(name='sg', description='sg')
rule = sgdb.SecurityGroupRule(security_group=sg, port_range_min=1,
port_range_max=2, protocol='TCP',
ethertype='v4', direction='ingress',
remote_ip_prefix='0.0.0.0/0')
ctx.session.add(sg)
ctx.session.add(rule)
return sg, rule
def _make_floating_ip(self, ctx, port_id):
with ctx.session.begin():
flip = l3_db.FloatingIP(floating_ip_address='1.2.3.4',
floating_network_id='somenet',
floating_port_id=port_id)
ctx.session.add(flip)
return flip
def _make_router(self, ctx):
with ctx.session.begin():
router = l3_db.Router()
ctx.session.add(router)
return router
def _get_neutron_attr(self, ctx, attr_id):
return ctx.session.query(
models_v2.model_base.StandardAttribute).filter(
models_v2.model_base.StandardAttribute.id == attr_id).one()
def _test_standardattr_removed_on_obj_delete(self, ctx, obj):
attr_id = obj.standard_attr_id
self.assertEqual(
obj.__table__.name,
self._get_neutron_attr(ctx, attr_id).resource_type)
with ctx.session.begin():
ctx.session.delete(obj)
with testtools.ExpectedException(orm.exc.NoResultFound):
# we want to make sure that the attr resource was removed
self._get_neutron_attr(ctx, attr_id)
def test_standardattr_removed_on_subnet_delete(self):
ctx = context.get_admin_context()
network = self._make_network(ctx)
subnet = self._make_subnet(ctx, network.id)
self._test_standardattr_removed_on_obj_delete(ctx, subnet)
def test_standardattr_removed_on_network_delete(self):
ctx = context.get_admin_context()
network = self._make_network(ctx)
self._test_standardattr_removed_on_obj_delete(ctx, network)
def test_standardattr_removed_on_subnetpool_delete(self):
ctx = context.get_admin_context()
spool = self._make_subnetpool(ctx)
self._test_standardattr_removed_on_obj_delete(ctx, spool)
def test_standardattr_removed_on_port_delete(self):
ctx = context.get_admin_context()
network = self._make_network(ctx)
port = self._make_port(ctx, network.id)
self._test_standardattr_removed_on_obj_delete(ctx, port)
def test_standardattr_removed_on_sg_delete(self):
ctx = context.get_admin_context()
sg, rule = self._make_security_group_and_rule(ctx)
self._test_standardattr_removed_on_obj_delete(ctx, sg)
# make sure the attr entry was wiped out for the rule as well
with testtools.ExpectedException(orm.exc.NoResultFound):
self._get_neutron_attr(ctx, rule.standard_attr_id)
def test_standardattr_removed_on_floating_ip_delete(self):
ctx = context.get_admin_context()
network = self._make_network(ctx)
port = self._make_port(ctx, network.id)
flip = self._make_floating_ip(ctx, port.id)
self._test_standardattr_removed_on_obj_delete(ctx, flip)
def test_standardattr_removed_on_router_delete(self):
ctx = context.get_admin_context()
router = self._make_router(ctx)
self._test_standardattr_removed_on_obj_delete(ctx, router)
def test_resource_type_fields(self):
ctx = context.get_admin_context()
network = self._make_network(ctx)
port = self._make_port(ctx, network.id)
subnet = self._make_subnet(ctx, network.id)
spool = self._make_subnetpool(ctx)
for disc, obj in (('ports', port), ('networks', network),
('subnets', subnet), ('subnetpools', spool)):
self.assertEqual(
disc, obj.standard_attr.resource_type)
class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,