Merge "Remove deprecated aggregate DB compatibility"

This commit is contained in:
Zuul 2018-02-19 22:43:12 +00:00 committed by Gerrit Code Review
commit 0b53cd8c30
12 changed files with 81 additions and 1342 deletions

View File

@ -54,7 +54,6 @@ from nova.db.sqlalchemy import api as sa_db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import aggregate as aggregate_obj
from nova.objects import block_device as block_device_obj
from nova.objects import build_request as build_request_obj
from nova.objects import host_mapping as host_mapping_obj
@ -388,10 +387,6 @@ class DbCommands(object):
# Added in Newton
keypair_obj.migrate_keypairs_to_api_db,
# Added in Newton
aggregate_obj.migrate_aggregates,
# Added in Newton
aggregate_obj.migrate_aggregate_reset_autoincrement,
# Added in Newton
instance_group_obj.migrate_instance_groups_to_api_db,
# Added in Ocata
# NOTE(mriedem): This online migration is going to be backported to

View File

@ -1790,91 +1790,6 @@ def s3_image_create(context, image_uuid):
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_get_by_uuid(context, uuid):
"""Get a specific aggregate by uuid."""
return IMPL.aggregate_get_by_uuid(context, uuid)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter
"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_get_by_metadata_key(context, key):
return IMPL.aggregate_get_by_metadata_key(context, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates.
If values contains a metadata key, it updates the aggregate metadata too.
"""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values):
"""Create a new Instance Fault."""
return IMPL.instance_fault_create(context, values)

View File

@ -30,7 +30,6 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
@ -184,20 +183,6 @@ def require_instance_exists_using_uuid(f):
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def select_db_reader_mode(f):
"""Decorator to select synchronous or asynchronous reader mode.
@ -5353,309 +5338,6 @@ def s3_image_create(context, image_uuid):
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@pick_context_manager_writer
def aggregate_create(context, values, metadata=None):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(context.session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
# NOTE(pkholkin): '_metadata' attribute was updated during
# 'aggregate_metadata_add' method, so it should be expired and
# read from db
context.session.expire(aggregate, ['_metadata'])
aggregate._metadata
return aggregate
@pick_context_manager_reader
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
@pick_context_manager_reader
def aggregate_get_by_uuid(context, uuid):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.uuid,
uuid)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=uuid)
return aggregate
@pick_context_manager_reader
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
@pick_context_manager_reader
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
@pick_context_manager_reader
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
@pick_context_manager_writer
def aggregate_update(context, aggregate_id, values):
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(context.session)
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@pick_context_manager_writer
def aggregate_delete(context, aggregate_id):
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context, models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
@pick_context_manager_reader
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
@pick_context_manager_reader
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists
@pick_context_manager_writer
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
@pick_context_manager_writer
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in range(max_retries):
try:
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no')
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = set()
if all_keys:
query = query.filter(
models.AggregateMetadata.key.in_(all_keys))
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.items():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
context.session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
LOG.warning("Add metadata failed for aggregate %(id)s "
"after %(retries)s retries",
{"id": aggregate_id, "retries": max_retries})
@require_aggregate_exists
@pick_context_manager_reader
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
@pick_context_manager_writer
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
@pick_context_manager_writer
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
@pick_context_manager_writer
def instance_fault_create(context, values):
"""Create a new InstanceFault."""

View File

@ -16,17 +16,12 @@ from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import func
from sqlalchemy.sql import text
from nova.compute import utils as compute_utils
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova.i18n import _
from nova import objects
@ -249,10 +244,6 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
obj_extra_fields = ['availability_zone']
def __init__(self, *args, **kwargs):
super(Aggregate, self).__init__(*args, **kwargs)
self._in_api = False
@staticmethod
def _from_db_object(context, aggregate, db_aggregate):
for key in aggregate.fields:
@ -264,11 +255,9 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
db_key = key
setattr(aggregate, key, db_aggregate[db_key])
# NOTE: This can be removed when we remove compatibility with
# the old aggregate model.
if any(f not in db_aggregate for f in DEPRECATED_FIELDS):
aggregate.deleted_at = None
aggregate.deleted = False
# NOTE: This can be removed when we bump Aggregate to v2.0
aggregate.deleted_at = None
aggregate.deleted = False
aggregate._context = context
aggregate.obj_reset_changes()
@ -281,60 +270,23 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
action=action,
reason='hosts updated inline')
@property
def in_api(self):
if self._in_api:
return True
else:
try:
_aggregate_get_from_db(self._context, self.id)
self._in_api = True
except exception.AggregateNotFound:
pass
return self._in_api
@base.remotable_classmethod
def get_by_id(cls, context, aggregate_id):
try:
db_aggregate = _aggregate_get_from_db(context, aggregate_id)
except exception.AggregateNotFound:
db_aggregate = db.aggregate_get(context, aggregate_id)
db_aggregate = _aggregate_get_from_db(context, aggregate_id)
return cls._from_db_object(context, cls(), db_aggregate)
@base.remotable_classmethod
def get_by_uuid(cls, context, aggregate_uuid):
try:
db_aggregate = _aggregate_get_from_db_by_uuid(context,
aggregate_uuid)
except exception.AggregateNotFound:
db_aggregate = db.aggregate_get_by_uuid(context, aggregate_uuid)
db_aggregate = _aggregate_get_from_db_by_uuid(context,
aggregate_uuid)
return cls._from_db_object(context, cls(), db_aggregate)
@staticmethod
@db_api.pick_context_manager_reader
def _ensure_migrated(context):
result = context.session.query(main_models.Aggregate).\
filter_by(deleted=0).count()
if result:
LOG.warning(
'Main database contains %(count)i unmigrated aggregates',
{'count': result})
return result == 0
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
# NOTE(mdoff): Once we have made it past a point where we know
# all aggregates have been migrated, we can remove this. Ideally
# in Ocata with a blocker migration to be sure.
if not self._ensure_migrated(self._context):
raise exception.ObjectActionError(
action='create',
reason='main database still contains aggregates')
self._assert_no_hosts('create')
updates = self.obj_get_changes()
payload = dict(updates)
@ -381,12 +333,8 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
"updateprop.start",
payload)
updates.pop('id', None)
try:
db_aggregate = _aggregate_update_to_db(self._context,
self.id, updates)
except exception.AggregateNotFound:
db_aggregate = db.aggregate_update(self._context, self.id, updates)
db_aggregate = _aggregate_update_to_db(self._context,
self.id, updates)
compute_utils.notify_about_aggregate_update(self._context,
"updateprop.end",
payload)
@ -394,13 +342,6 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@base.remotable
def update_metadata(self, updates):
if self.in_api:
metadata_delete = _metadata_delete_from_db
metadata_add = _metadata_add_to_db
else:
metadata_delete = db.aggregate_metadata_delete
metadata_add = db.aggregate_metadata_add
payload = {'aggregate_id': self.id,
'meta_data': updates}
compute_utils.notify_about_aggregate_update(self._context,
@ -410,7 +351,7 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
for key, value in updates.items():
if value is None:
try:
metadata_delete(self._context, self.id, key)
_metadata_delete_from_db(self._context, self.id, key)
except exception.AggregateMetadataNotFound:
pass
try:
@ -420,7 +361,7 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
else:
to_add[key] = value
self.metadata[key] = value
metadata_add(self._context, self.id, to_add)
_metadata_add_to_db(self._context, self.id, to_add)
compute_utils.notify_about_aggregate_update(self._context,
"updatemetadata.end",
payload)
@ -428,17 +369,11 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@base.remotable
def destroy(self):
try:
_aggregate_delete_from_db(self._context, self.id)
except exception.AggregateNotFound:
db.aggregate_delete(self._context, self.id)
_aggregate_delete_from_db(self._context, self.id)
@base.remotable
def add_host(self, host):
if self.in_api:
_host_add_to_db(self._context, self.id, host)
else:
db.aggregate_host_add(self._context, self.id, host)
_host_add_to_db(self._context, self.id, host)
if self.hosts is None:
self.hosts = []
@ -447,10 +382,7 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@base.remotable
def delete_host(self, host):
if self.in_api:
_host_delete_from_db(self._context, self.id, host)
else:
db.aggregate_host_delete(self._context, self.id, host)
_host_delete_from_db(self._context, self.id, host)
self.hosts.remove(host)
self.obj_reset_changes(fields=['hosts'])
@ -507,14 +439,6 @@ class AggregateList(base.ObjectListBase, base.NovaObject):
'objects': fields.ListOfObjectsField('Aggregate'),
}
# NOTE(mdoff): Calls to this can be removed when we remove
# compatibility with the old aggregate model.
@staticmethod
def _fill_deprecated(db_aggregate):
db_aggregate['deleted_at'] = None
db_aggregate['deleted'] = False
return db_aggregate
@classmethod
def _filter_db_aggregates(cls, db_aggregates, hosts):
if not isinstance(hosts, set):
@ -529,89 +453,20 @@ class AggregateList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_all(cls, context):
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
_get_all_from_db(context)]
db_aggregates = db.aggregate_get_all(context)
db_aggregates = _get_all_from_db(context)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates + api_db_aggregates)
db_aggregates)
@base.remotable_classmethod
def get_by_host(cls, context, host, key=None):
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
_get_by_host_from_db(context, host, key=key)]
db_aggregates = db.aggregate_get_by_host(context, host, key=key)
db_aggregates = _get_by_host_from_db(context, host, key=key)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates + api_db_aggregates)
db_aggregates)
@base.remotable_classmethod
def get_by_metadata_key(cls, context, key, hosts=None):
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
_get_by_metadata_key_from_db(context, key=key)]
db_aggregates = db.aggregate_get_by_metadata_key(context, key=key)
all_aggregates = db_aggregates + api_db_aggregates
db_aggregates = _get_by_metadata_key_from_db(context, key=key)
if hosts is not None:
all_aggregates = cls._filter_db_aggregates(all_aggregates, hosts)
db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts)
return base.obj_make_list(context, cls(context), objects.Aggregate,
all_aggregates)
@db_api.pick_context_manager_reader
def _get_main_db_aggregate_ids(context, limit):
from nova.db.sqlalchemy import models
return [x[0] for x in context.session.query(models.Aggregate.id).
filter_by(deleted=0).
limit(limit)]
def migrate_aggregates(ctxt, count):
main_db_ids = _get_main_db_aggregate_ids(ctxt, count)
if not main_db_ids:
return 0, 0
count_all = len(main_db_ids)
count_hit = 0
for aggregate_id in main_db_ids:
try:
aggregate = Aggregate.get_by_id(ctxt, aggregate_id)
remove = ['metadata', 'hosts']
values = {field: getattr(aggregate, field)
for field in aggregate.fields if field not in remove}
_aggregate_create_in_db(ctxt, values, metadata=aggregate.metadata)
for host in aggregate.hosts:
_host_add_to_db(ctxt, aggregate_id, host)
count_hit += 1
db.aggregate_delete(ctxt, aggregate.id)
except exception.AggregateNotFound:
LOG.warning(
'Aggregate id %(id)i disappeared during migration',
{'id': aggregate_id})
except (exception.AggregateNameExists) as e:
LOG.error(six.text_type(e))
return count_all, count_hit
def _adjust_autoincrement(context, value):
engine = db_api.get_api_engine()
if engine.name == 'postgresql':
# NOTE(danms): If we migrated some aggregates in the above function,
# then we will have confused postgres' sequence for the autoincrement
# primary key. MySQL does not care about this, but since postgres does,
# we need to reset this to avoid a failure on the next aggregate
# creation.
engine.execute(
text('ALTER SEQUENCE aggregates_id_seq RESTART WITH %i;' % (
value)))
@db_api.api_context_manager.reader
def _get_max_aggregate_id(context):
return context.session.query(func.max(api_models.Aggregate.id)).one()[0]
def migrate_aggregate_reset_autoincrement(ctxt, count):
max_id = _get_max_aggregate_id(ctxt) or 0
_adjust_autoincrement(ctxt, max_id + 1)
return 0, 0
db_aggregates)

View File

@ -17,7 +17,6 @@ from oslo_db import exception as db_exc
from oslo_utils import timeutils
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
@ -115,19 +114,6 @@ class AggregateObjectDbTestCase(test.TestCase):
super(AggregateObjectDbTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
def test_in_api(self):
ca1 = _create_aggregate(self.context, values={'name': 'fake_agg_1',
'id': 1, 'uuid': uuidsentinel.agg})
ca2 = db.aggregate_create(self.context, {'name': 'fake_agg_2', 'id': 2,
'uuid': uuidsentinel.agg})
api_db_agg = aggregate_obj.Aggregate.get_by_id(self.context, ca1['id'])
cell_db_agg = aggregate_obj.Aggregate.get_by_id(
self.context, ca2['id'])
self.assertTrue(api_db_agg.in_api)
self.assertFalse(cell_db_agg.in_api)
def test_aggregate_get_from_db(self):
result = _create_aggregate_with_hosts(self.context)
expected = aggregate_obj._aggregate_get_from_db(self.context,
@ -463,19 +449,12 @@ class AggregateObjectDbTestCase(test.TestCase):
self.context, result['id'], 'foo_key')
def create_aggregate(context, db_id, in_api=True):
if in_api:
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
aggregate_obj._aggregate_create_in_db(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
aggregate_obj._host_add_to_db(context, fake_aggregate['id'], host)
else:
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
db.aggregate_create(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
db.aggregate_host_add(context, fake_aggregate['id'], host)
def create_aggregate(context, db_id):
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
aggregate_obj._aggregate_create_in_db(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
aggregate_obj._host_add_to_db(context, fake_aggregate['id'], host)
def compare_obj(test, result, source):
@ -488,17 +467,22 @@ def compare_obj(test, result, source):
comparators={'updated_at': updated_at_comparator})
class AggregateObjectCellTestCase(test.TestCase):
"""Tests for the case where all aggregate data is in Cell DB"""
class AggregateObjectTestCase(test.TestCase):
def setUp(self):
super(AggregateObjectCellTestCase, self).setUp()
super(AggregateObjectTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self._seed_data()
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i, in_api=False)
create_aggregate(self.context, i)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
def test_get_by_id(self):
for i in range(1, 10):
@ -554,106 +538,3 @@ class AggregateObjectCellTestCase(test.TestCase):
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
class AggregateObjectApiTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where all data is in the API DB"""
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
class AggregateObjectMixedTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where data is in both databases"""
def _seed_data(self):
for i in range(1, 6):
create_aggregate(self.context, i)
for i in range(6, 10):
create_aggregate(self.context, i, in_api=False)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
self.assertRaises(exception.ObjectActionError,
new_agg.create)
class AggregateObjectMigrationTestCase(AggregateObjectCellTestCase):
"""Tests the aggregate in the case where data is migrated to the API db"""
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i, in_api=False)
aggregate_obj.migrate_aggregates(self.context, 50)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
class AggregateMigrationTestCase(test.TestCase):
def setUp(self):
super(AggregateMigrationTestCase, self).setUp()
self.context = context.get_admin_context()
def test_migration(self):
db.aggregate_create(self.context, {'name': 'foo',
'uuid': uuidsentinel.agg_uuid})
main_aggregates_len = len(db.aggregate_get_all(self.context))
match, done = aggregate_obj.migrate_aggregates(self.context, 50)
self.assertEqual(1, main_aggregates_len)
self.assertEqual(main_aggregates_len, match)
self.assertEqual(main_aggregates_len, done)
self.assertEqual(0, len(db.aggregate_get_all(self.context)))
self.assertEqual(main_aggregates_len,
len(aggregate_obj.AggregateList.get_all(
self.context)))
def test_migrate_aggregate_reset_autoincrement(self):
agg = aggregate_obj.Aggregate(self.context, name='foo')
agg.create()
match, done = aggregate_obj.migrate_aggregate_reset_autoincrement(
self.context, 0)
self.assertEqual(0, match)
self.assertEqual(0, done)
def test_migrate_aggregate_reset_autoincrement_no_aggregates(self):
# NOTE(danms): This validates the "or 0" default if there are no
# aggregates (and thus no max id).
match, done = aggregate_obj.migrate_aggregate_reset_autoincrement(
self.context, 0)
self.assertEqual(0, match)
self.assertEqual(0, done)
@mock.patch('nova.objects.aggregate.LOG.error')
def test_migrate_aggregates_duplicate_unicode(self, mock_log_error):
"""Tests that we handle a duplicate aggregate when migrating and that
we handle when the exception message is in unicode.
"""
# First create an aggregate that will be migrated from main to API DB.
create_aggregate(self.context, 1, in_api=False)
# Now create that same aggregate in the API DB.
create_aggregate(self.context, 1, in_api=True)
# Now let's run the online data migration which will fail to create
# a duplicate aggregate in the API database and will raise
# AggregateNameExists which we want to modify to have a unicode
# message.
with mock.patch.object(exception.AggregateNameExists, 'msg_fmt',
u'\xF0\x9F\x92\xA9'):
match, done = aggregate_obj.migrate_aggregates(self.context, 50)
# we found one
self.assertEqual(1, match)
# but we didn't migrate it
self.assertEqual(0, done)
# and we logged an error for the duplicate aggregate
mock_log_error.assert_called()

View File

@ -25,6 +25,7 @@ from nova.compute import api as compute_api
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
@ -213,10 +214,12 @@ class ServersControllerCreateTestV21(test.TestCase):
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(admin_context,
{'name': 'agg1', 'uuid': uuidsentinel.agg_uuid},
{'availability_zone': 'nova'})
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
agg = objects.Aggregate(admin_context,
name='agg1',
uuid=uuidsentinel.agg_uuid,
metadata={'availability_zone': 'nova'})
agg.create()
agg.add_host('host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):

View File

@ -12161,9 +12161,10 @@ class ComputeAggrTestCase(BaseTestCase):
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
values = {'name': 'test_aggr'}
az = {'availability_zone': 'test_zone'}
self.aggr = db.aggregate_create(self.context, values, metadata=az)
self.aggr = objects.Aggregate(self.context, name='test_aggr',
metadata=az)
self.aggr.create()
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(self, context, aggregate, host,
@ -12194,7 +12195,7 @@ class ComputeAggrTestCase(BaseTestCase):
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(cls, context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(aggregate.id, self.aggr.id)
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
@ -12209,7 +12210,7 @@ class ComputeAggrTestCase(BaseTestCase):
def driver_remove_from_aggregate(cls, context, aggregate, host,
**kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(aggregate.id, self.aggr.id)
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))

View File

@ -64,7 +64,6 @@ from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_console_auth_token
from nova.tests.unit import matchers
from nova.tests import uuidsentinel
from nova import utils
@ -595,420 +594,6 @@ class EngineFacadeTestCase(DbTestCase):
self.assertEqual(parent_session, child_session)
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEqual(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
values = {'name': r1['name']}
metadata = {'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
self.assertEqual(r2['availability_zone'],
metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_get_by_uuid_raise_not_found(self):
ctxt = context.get_admin_context()
aggregate_uuid = uuidsentinel.missing_aggregate_uuid
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get_by_uuid,
ctxt, aggregate_uuid)
def test_aggregate_metadata_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
# test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata={'availability_zone':
'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertEqual(expected_metadata, {'availability_zone':
'fake_avail_zone'})
def test_aggregate_get(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_uuid(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get_by_uuid(ctxt, result['uuid'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
# a3 has no hosts and should not be in the results.
_create_aggregate(context=ctxt, values=values3)
# a4 has no matching hosts.
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'])
# a5 has no matching hosts after deleting the only matching host.
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
hosts=['foo5.openstack.org', 'foo.openstack.org'])
db.aggregate_host_delete(ctxt, a5['id'],
'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
_create_aggregate_with_hosts(context=ctxt, values=values2)
_create_aggregate(context=ctxt, values=values3)
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertNotIn('badkey', r1)
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertNotIn('fake_key1', r1)
self.assertNotIn('bad', r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
key='good')
self.assertNotIn('good', r2)
def test_aggregate_get_by_host_not_found(self):
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate['deleted'], result['id'])
def test_aggregate_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, result['id'], new_values)
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['availability_zone'] = 'different_avail_zone'
expected_metadata = copy.deepcopy(values['metadata'])
expected_metadata['availability_zone'] = values['availability_zone']
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(metadata,
matchers.DictMatches(expected_metadata))
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
expected_metadata = copy.deepcopy(values['metadata'])
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected_metadata))
def test_aggregate_update_zone_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = _get_fake_aggr_metadata()
metadata.update(new_zone)
db.aggregate_update(ctxt, result['id'], new_zone)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_update_raise_name_exist(self):
ctxt = context.get_admin_context()
_create_aggregate(context=ctxt, values={'name': 'test1'},
metadata={'availability_zone': 'fake_avail_zone'})
_create_aggregate(context=ctxt, values={'name': 'test2'},
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_id = 1
new_values = {'name': 'test2'}
self.assertRaises(exception.AggregateNameExists,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
for c in range(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in range(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in range(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_empty_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = {}
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_and_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = list(metadata.keys())[0]
new_metadata = {key: 'foo',
'fake_new_key': 'fake_new_value'}
metadata.update(new_metadata)
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
def get_query(context, id, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
return get_query
get_query = counted()
self.stub_out('nova.db.sqlalchemy.api._aggregate_metadata_get_query',
get_query)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
aggregate_metadata_add, ctxt, result['id'], {},
max_retries=5)
self.assertEqual(get_query.counter, 5)
def test_aggregate_metadata_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = list(metadata.keys())[0]
db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
db.aggregate_metadata_delete(ctxt, result['id'],
list(metadata.keys())[0])
expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[list(metadata.keys())[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
expected = db.aggregate_metadata_get(ctxt, result['id'])
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertIsNone(aggregate['availability_zone'])
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result['id'], host)
db.aggregate_host_add(ctxt, result['id'], host)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
"""No-DB test class for simple test cases that do not require a backend."""

View File

@ -240,12 +240,6 @@ class TestNewtonCheck(test.TestCase):
'330_enforce_mitaka_online_migrations')
self.engine = db_api.get_engine()
def test_aggregate_not_migrated(self):
agg = db_api.aggregate_create(self.context, {"name": "foobar"})
db_api.aggregate_update(self.context, agg.id, {'uuid': None})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def setup_pci_device(self, dev_type):
# NOTE(jaypipes): We cannot use db_api.pci_device_update() here because
# newer models of PciDevice contain fields (uuid) that are not present
@ -348,16 +342,6 @@ class TestOcataCheck(test.TestCase):
keypair['user_id'], keypair['name'])
self.migration.upgrade(self.engine)
def test_upgrade_dirty_aggregates(self):
db_api.aggregate_create(self.context, self.aggregate_values)
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_with_deleted_aggregates(self):
agg = db_api.aggregate_create(self.context, self.aggregate_values)
db_api.aggregate_delete(self.context, agg['id'])
self.migration.upgrade(self.engine)
def test_upgrade_dirty_instance_groups(self):
db_api.instance_group_create(self.context, self.ig_values)
self.assertRaises(exception.ValidationError,

View File

@ -15,7 +15,6 @@
import mock
from oslo_utils import timeutils
from nova import db
from nova import exception
from nova.objects import aggregate
from nova.tests.unit import fake_notifier
@ -25,18 +24,8 @@ from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_aggregate = {
'created_at': NOW,
'updated_at': None,
'deleted': 0,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_aggregate,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
}
fake_api_aggregate = {
'created_at': NOW,
'updated_at': None,
'id': 123,
@ -52,53 +41,24 @@ SUBS = {'metadata': 'metadetails'}
class _TestAggregateObject(object):
@mock.patch('nova.objects.aggregate._aggregate_get_from_db')
@mock.patch('nova.db.aggregate_get')
def test_get_by_id_from_api(self, mock_get, mock_get_api):
mock_get_api.return_value = fake_api_aggregate
def test_get_by_id_from_api(self, mock_get_api):
mock_get_api.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
mock_get_api.assert_called_once_with(self.context, 123)
self.assertFalse(mock_get.called)
@mock.patch('nova.objects.aggregate._aggregate_get_from_db')
@mock.patch('nova.db.aggregate_get')
def test_get_by_id(self, mock_get, mock_get_api):
mock_get_api.side_effect = exception.AggregateNotFound(
aggregate_id=123)
mock_get.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
mock_get_api.assert_called_once_with(self.context, 123)
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.aggregate._aggregate_get_from_db_by_uuid')
@mock.patch('nova.db.aggregate_get_by_uuid')
def test_get_by_uuid(self, get_by_uuid, get_by_uuid_api):
get_by_uuid_api.side_effect = exception.AggregateNotFound(
aggregate_id=123)
get_by_uuid.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_uuid(self.context,
uuidsentinel.fake_aggregate)
self.assertEqual(uuidsentinel.fake_aggregate, agg.uuid)
self.assertEqual(fake_aggregate['id'], agg.id)
@mock.patch('nova.objects.aggregate._aggregate_get_from_db_by_uuid')
@mock.patch('nova.db.aggregate_get_by_uuid')
def test_get_by_uuid_from_api(self, get_by_uuid, get_by_uuid_api):
def test_get_by_uuid_from_api(self, get_by_uuid_api):
get_by_uuid_api.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_uuid(self.context,
uuidsentinel.fake_aggregate)
self.assertEqual(uuidsentinel.fake_aggregate, agg.uuid)
self.assertEqual(fake_aggregate['id'], agg.id)
self.assertFalse(get_by_uuid.called)
@mock.patch('nova.objects.aggregate._aggregate_create_in_db')
@mock.patch('nova.db.aggregate_create')
def test_create(self, create_mock, api_create_mock):
def test_create(self, api_create_mock):
api_create_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
@ -109,7 +69,6 @@ class _TestAggregateObject(object):
self.context,
{'name': 'foo', 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'})
self.assertFalse(create_mock.called)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
api_create_mock.assert_called_once_with(self.context,
@ -117,8 +76,7 @@ class _TestAggregateObject(object):
metadata={'one': 'two'})
@mock.patch('nova.objects.aggregate._aggregate_create_in_db')
@mock.patch.object(db, 'aggregate_create')
def test_recreate_fails(self, create_mock, api_create_mock):
def test_recreate_fails(self, api_create_mock):
api_create_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
@ -132,44 +90,14 @@ class _TestAggregateObject(object):
metadata={'one': 'two'})
@mock.patch('nova.objects.aggregate._aggregate_delete_from_db')
@mock.patch('nova.db.aggregate_delete')
def test_destroy(self, delete_mock, api_delete_mock):
def test_destroy(self, api_delete_mock):
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.destroy()
self.assertFalse(delete_mock.called)
api_delete_mock.assert_called_with(self.context, 123)
@mock.patch('nova.objects.aggregate._aggregate_delete_from_db')
@mock.patch('nova.db.aggregate_delete')
def test_destroy_cell(self, delete_mock, api_delete_mock):
api_delete_mock.side_effect = exception.AggregateNotFound(
aggregate_id=123)
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.destroy()
delete_mock.assert_called_with(self.context, 123)
api_delete_mock.assert_called_with(self.context, 123)
@mock.patch('nova.objects.aggregate._aggregate_update_to_db')
@mock.patch('nova.db.aggregate_update')
def test_save_to_cell(self, update_mock, api_update_mock):
api_update_mock.side_effect = exception.AggregateNotFound(
aggregate_id='foo')
update_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.name = 'fake-aggregate'
agg.save()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
update_mock.assert_called_once_with(self.context,
123,
{'name': 'fake-aggregate'})
self.assertTrue(api_update_mock.called)
@mock.patch('nova.objects.aggregate._aggregate_update_to_db')
@mock.patch('nova.db.aggregate_update')
def test_save_to_api(self, update_mock, api_update_mock):
def test_save_to_api(self, api_update_mock):
api_update_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
@ -179,7 +107,6 @@ class _TestAggregateObject(object):
api_update_mock.assert_called_once_with(self.context,
123,
{'name': 'fake-api-aggregate'})
self.assertFalse(update_mock.called)
api_update_mock.assert_called_once_with(self.context,
123, {'name': 'fake-api-aggregate'})
@ -195,49 +122,8 @@ class _TestAggregateObject(object):
@mock.patch('nova.objects.aggregate._metadata_delete_from_db')
@mock.patch('nova.objects.aggregate._metadata_add_to_db')
@mock.patch('nova.db.aggregate_metadata_delete')
@mock.patch('nova.db.aggregate_metadata_add')
def test_update_metadata(self,
mock_metadata_add,
mock_metadata_delete,
mock_api_metadata_add,
mock_api_metadata_delete):
fake_notifier.NOTIFICATIONS = []
agg = aggregate.Aggregate()
agg._context = self.context
agg.id = 123
agg.metadata = {'foo': 'bar'}
agg.obj_reset_changes()
agg.update_metadata({'todelete': None, 'toadd': 'myval'})
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
mock_metadata_add.assert_called_once_with(self.context, 123,
{'toadd': 'myval'})
mock_metadata_delete.assert_called_once_with(self.context, 123,
'todelete')
self.assertFalse(mock_api_metadata_add.called)
self.assertFalse(mock_api_metadata_delete.called)
@mock.patch('nova.objects.Aggregate.in_api')
@mock.patch('nova.objects.aggregate._metadata_delete_from_db')
@mock.patch('nova.objects.aggregate._metadata_add_to_db')
@mock.patch('nova.db.aggregate_metadata_delete')
@mock.patch('nova.db.aggregate_metadata_add')
def test_update_metadata_api(self,
mock_metadata_add,
mock_metadata_delete,
mock_api_metadata_add,
mock_api_metadata_delete,
mock_in_api):
mock_in_api.return_value = True
def test_update_metadata_api(self, mock_api_metadata_add,
mock_api_metadata_delete):
fake_notifier.NOTIFICATIONS = []
agg = aggregate.Aggregate()
agg._context = self.context
@ -259,9 +145,6 @@ class _TestAggregateObject(object):
'todelete')
mock_api_metadata_add.assert_called_once_with(self.context, 123,
{'toadd': 'myval'})
self.assertFalse(mock_metadata_add.called)
self.assertFalse(mock_metadata_delete.called)
mock_api_metadata_delete.assert_called_once_with(self.context,
123,
'todelete')
@ -269,23 +152,9 @@ class _TestAggregateObject(object):
123,
{'toadd': 'myval'})
@mock.patch.object(db, 'aggregate_host_add')
def test_add_host(self, mock_host_add):
mock_host_add.return_value = {'host': 'bar'}
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
agg._context = self.context
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
mock_host_add.assert_called_once_with(self.context, 123, 'bar')
@mock.patch('nova.db.aggregate_host_add')
@mock.patch('nova.objects.aggregate._host_add_to_db')
@mock.patch('nova.objects.Aggregate.in_api')
def test_add_host_api(self, mock_in_api, mock_host_add_api, mock_host_add):
def test_add_host_api(self, mock_host_add_api):
mock_host_add_api.return_value = {'host': 'bar'}
mock_in_api.return_value = True
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
@ -293,25 +162,9 @@ class _TestAggregateObject(object):
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
mock_host_add_api.assert_called_once_with(self.context, 123, 'bar')
self.assertFalse(mock_host_add.called)
@mock.patch.object(db, 'aggregate_host_delete')
def test_delete_host(self, mock_host_delete):
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
agg._context = self.context
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
mock_host_delete.assert_called_once_with(self.context, 123, 'foo')
@mock.patch('nova.db.aggregate_host_delete')
@mock.patch('nova.objects.aggregate._host_delete_from_db')
@mock.patch('nova.objects.Aggregate.in_api')
def test_delete_host_api(self, mock_in_api,
mock_host_delete_api,
mock_host_delete):
mock_in_api.return_value = True
def test_delete_host_api(self, mock_host_delete_api):
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
@ -319,7 +172,6 @@ class _TestAggregateObject(object):
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
mock_host_delete_api.assert_called_once_with(self.context, 123, 'foo')
self.assertFalse(mock_host_delete.called)
def test_availability_zone(self):
agg = aggregate.Aggregate()
@ -327,45 +179,35 @@ class _TestAggregateObject(object):
self.assertEqual('foo', agg.availability_zone)
@mock.patch('nova.objects.aggregate._get_all_from_db')
@mock.patch('nova.db.aggregate_get_all')
def test_get_all(self, mock_get_all, mock_api_get_all):
mock_get_all.return_value = [fake_aggregate]
mock_api_get_all.return_value = [fake_api_aggregate]
def test_get_all(self, mock_api_get_all):
mock_api_get_all.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_all(self.context)
self.assertEqual(2, len(aggs))
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
self.compare_obj(aggs[1], fake_api_aggregate, subs=SUBS)
@mock.patch('nova.objects.aggregate._get_by_host_from_db')
@mock.patch('nova.db.aggregate_get_by_host')
def test_by_host(self, mock_get_by_host, mock_api_get_by_host):
mock_get_by_host.return_value = [fake_aggregate]
mock_api_get_by_host.return_value = [fake_api_aggregate]
def test_by_host(self, mock_api_get_by_host):
mock_api_get_by_host.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
self.assertEqual(2, len(aggs))
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
self.compare_obj(aggs[1], fake_api_aggregate, subs=SUBS)
@mock.patch('nova.objects.aggregate._get_by_metadata_key_from_db')
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key(self,
mock_get_by_metadata_key,
mock_api_get_by_metadata_key):
mock_get_by_metadata_key.return_value = [fake_aggregate]
mock_api_get_by_metadata_key.return_value = [fake_api_aggregate]
def test_get_by_metadata_key(self, mock_api_get_by_metadata_key):
mock_api_get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this')
self.assertEqual(2, len(aggs))
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.db.aggregate_get_by_metadata_key')
@mock.patch('nova.objects.aggregate._get_by_metadata_key_from_db')
def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['baz'])
self.assertEqual(0, len(aggs))
@mock.patch('nova.db.aggregate_get_by_metadata_key')
@mock.patch('nova.objects.aggregate._get_by_metadata_key_from_db')
def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(

View File

@ -44,21 +44,19 @@ class AvailabilityZoneTestCases(test.TestCase):
self.agg = self._create_az('az_agg', self.availability_zone)
def tearDown(self):
db.aggregate_delete(self.context, self.agg['id'])
self.agg.destroy()
super(AvailabilityZoneTestCases, self).tearDown()
def _create_az(self, agg_name, az_name):
agg_meta = {'name': agg_name, 'uuid': uuidsentinel.agg_uuid}
agg = db.aggregate_create(self.context, agg_meta)
metadata = {'availability_zone': az_name}
db.aggregate_metadata_add(self.context, agg['id'], metadata)
agg_meta = {'name': agg_name, 'uuid': uuidsentinel.agg_uuid,
'metadata': {'availability_zone': az_name}}
agg = objects.Aggregate(self.context, **agg_meta)
agg.create()
agg = objects.Aggregate.get_by_id(self.context, agg.id)
return agg
def _update_az(self, aggregate, az_name):
metadata = {'availability_zone': az_name}
db.aggregate_update(self.context, aggregate['id'], metadata)
aggregate.update_metadata({'availability_zone': az_name})
def _create_service_with_topic(self, topic, host, disabled=False):
values = {
@ -73,12 +71,10 @@ class AvailabilityZoneTestCases(test.TestCase):
return db.service_destroy(self.context, service['id'])
def _add_to_aggregate(self, service, aggregate):
return db.aggregate_host_add(self.context,
aggregate['id'], service['host'])
aggregate.add_host(service['host'])
def _delete_from_aggregate(self, service, aggregate):
return db.aggregate_host_delete(self.context,
aggregate['id'], service['host'])
aggregate.delete_host(service['host'])
def test_rest_availability_zone_reset_cache(self):
az._get_cache().add('cache', 'fake_value')

View File

@ -1427,7 +1427,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase,
return [test_aggregate.fake_aggregate]
else:
return []
self.stub_out('nova.db.aggregate_get_by_host',
self.stub_out('nova.objects.aggregate._get_by_host_from_db',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):