From 9ad61b180162cc99302f30981dabb31104a9ddd9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Apr 2016 15:48:06 +0200 Subject: [PATCH] Remove the deprecated DB2 driver Change-Id: I0718da0d8cd3a63708117d770b1bf21f9ce3bfe1 --- ceilometer/event/storage/impl_db2.py | 69 --- ceilometer/event/storage/pymongo_base.py | 4 +- ceilometer/storage/__init__.py | 11 - ceilometer/storage/impl_db2.py | 419 ------------------ ceilometer/storage/mongo/utils.py | 2 +- ceilometer/storage/pymongo_base.py | 5 +- ceilometer/tests/db.py | 1 - .../functional/api/v2/test_event_scenarios.py | 8 +- .../api/v2/test_statistics_scenarios.py | 8 +- .../tests/functional/storage/test_impl_db2.py | 145 ------ .../functional/storage/test_pymongo_base.py | 6 +- .../storage/test_storage_scenarios.py | 6 +- setup.cfg | 2 - 13 files changed, 19 insertions(+), 667 deletions(-) delete mode 100644 ceilometer/event/storage/impl_db2.py delete mode 100644 ceilometer/storage/impl_db2.py delete mode 100644 ceilometer/tests/functional/storage/test_impl_db2.py diff --git a/ceilometer/event/storage/impl_db2.py b/ceilometer/event/storage/impl_db2.py deleted file mode 100644 index 2d88678b..00000000 --- a/ceilometer/event/storage/impl_db2.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""DB2 storage backend -""" -import pymongo - -from ceilometer.event.storage import pymongo_base -from ceilometer import storage -from ceilometer.storage.mongo import utils as pymongo_utils - - -class Connection(pymongo_base.Connection): - """The db2 event storage for Ceilometer.""" - - CONNECTION_POOL = pymongo_utils.ConnectionPool() - - def __init__(self, url): - - # Since we are using pymongo, even though we are connecting to DB2 - # we still have to make sure that the scheme which used to distinguish - # db2 driver from mongodb driver be replaced so that pymongo will not - # produce an exception on the scheme. - url = url.replace('db2:', 'mongodb:', 1) - self.conn = self.CONNECTION_POOL.connect(url) - - # Require MongoDB 2.2 to use aggregate(), since we are using mongodb - # as backend for test, the following code is necessary to make sure - # that the test wont try aggregate on older mongodb during the test. - # For db2, the versionArray won't be part of the server_info, so there - # will not be exception when real db2 gets used as backend. - server_info = self.conn.server_info() - if server_info.get('sysInfo'): - self._using_mongodb = True - else: - self._using_mongodb = False - - if self._using_mongodb and server_info.get('versionArray') < [2, 2]: - raise storage.StorageBadVersion("Need at least MongoDB 2.2") - - connection_options = pymongo.uri_parser.parse_uri(url) - self.db = getattr(self.conn, connection_options['database']) - if connection_options.get('username'): - self.db.authenticate(connection_options['username'], - connection_options['password']) - - self.upgrade() - - def upgrade(self): - # create collection if not present - if 'event' not in self.db.conn.collection_names(): - self.db.conn.create_collection('event') - - def clear(self): - # drop_database command does nothing on db2 database since this has - # not been implemented. However calling this method is important for - # removal of all the empty dbs created during the test runs since - # test run is against mongodb on Jenkins - self.conn.drop_database(self.db.name) - self.conn.close() diff --git a/ceilometer/event/storage/pymongo_base.py b/ceilometer/event/storage/pymongo_base.py index 594b931c..be53316d 100644 --- a/ceilometer/event/storage/pymongo_base.py +++ b/ceilometer/event/storage/pymongo_base.py @@ -10,7 +10,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Common functions for MongoDB and DB2 backends +"""Common functions for MongoDB backend """ from oslo_log import log import pymongo @@ -35,7 +35,7 @@ AVAILABLE_STORAGE_CAPABILITIES = { class Connection(base.Connection): - """Base event Connection class for MongoDB and DB2 drivers.""" + """Base event Connection class for MongoDB driver.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) diff --git a/ceilometer/storage/__init__.py b/ceilometer/storage/__init__.py index 8ff68da4..fd9e1a4f 100644 --- a/ceilometer/storage/__init__.py +++ b/ceilometer/storage/__init__.py @@ -57,11 +57,6 @@ OPTS = [ secret=True, help='The connection string used to connect to the event ' 'database. (if unset, connection is used)'), - cfg.IntOpt('db2nosql_resource_id_maxlen', - default=512, - help="The max length of resources id in DB2 nosql, " - "the value should be larger than len(hostname) * 2 " - "as compute node's resource id is _."), ] cfg.CONF.register_opts(OPTS, group='database') @@ -117,12 +112,6 @@ def get_connection(url, namespace): # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] - if engine_name == 'db2': - import warnings - warnings.simplefilter("always") - import debtcollector - debtcollector.deprecate("The DB2nosql driver is no longer supported", - version="Liberty", removal_version="N*-cycle") # NOTE: translation not applied bug #1446983 LOG.debug('looking for %(name)r driver in %(namespace)r', {'name': engine_name, 'namespace': namespace}) diff --git a/ceilometer/storage/impl_db2.py b/ceilometer/storage/impl_db2.py deleted file mode 100644 index 0792ed0a..00000000 --- a/ceilometer/storage/impl_db2.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""DB2 storage backend -""" - -from __future__ import division -import copy -import datetime -import itertools -import sys - -import bson.code -import bson.objectid -from oslo_config import cfg -from oslo_utils import timeutils -import pymongo -import six - -import ceilometer -from ceilometer import storage -from ceilometer.storage import base -from ceilometer.storage import models -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer.storage import pymongo_base -from ceilometer import utils - - -AVAILABLE_CAPABILITIES = { - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True}} -} - - -class Connection(pymongo_base.Connection): - """The db2 storage for Ceilometer - - Collections:: - - - meter - - the raw incoming data - - resource - - the metadata for resources - - { _id: uuid of resource, - metadata: metadata dictionaries - user_id: uuid - project_id: uuid - meter: [ array of {counter_name: string, counter_type: string, - counter_unit: string} ] - } - """ - - CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - CONNECTION_POOL = pymongo_utils.ConnectionPool() - - GROUP = {'_id': '$counter_name', - 'unit': {'$min': '$counter_unit'}, - 'min': {'$min': '$counter_volume'}, - 'max': {'$max': '$counter_volume'}, - 'sum': {'$sum': '$counter_volume'}, - 'count': {'$sum': 1}, - 'duration_start': {'$min': '$timestamp'}, - 'duration_end': {'$max': '$timestamp'}, - } - - PROJECT = {'_id': 0, 'unit': 1, - 'min': 1, 'max': 1, 'sum': 1, 'count': 1, - 'avg': {'$divide': ['$sum', '$count']}, - 'duration_start': 1, - 'duration_end': 1, - } - - SORT_OPERATION_MAP = {'desc': pymongo.DESCENDING, 'asc': pymongo.ASCENDING} - - SECONDS_IN_A_DAY = 86400 - - def __init__(self, url): - - # Since we are using pymongo, even though we are connecting to DB2 - # we still have to make sure that the scheme which used to distinguish - # db2 driver from mongodb driver be replaced so that pymongo will not - # produce an exception on the scheme. - url = url.replace('db2:', 'mongodb:', 1) - self.conn = self.CONNECTION_POOL.connect(url) - - # Require MongoDB 2.2 to use aggregate(), since we are using mongodb - # as backend for test, the following code is necessary to make sure - # that the test wont try aggregate on older mongodb during the test. - # For db2, the versionArray won't be part of the server_info, so there - # will not be exception when real db2 gets used as backend. - server_info = self.conn.server_info() - if server_info.get('sysInfo'): - self._using_mongodb = True - else: - self._using_mongodb = False - - if self._using_mongodb and server_info.get('versionArray') < [2, 2]: - raise storage.StorageBadVersion("Need at least MongoDB 2.2") - - connection_options = pymongo.uri_parser.parse_uri(url) - self.db = getattr(self.conn, connection_options['database']) - if connection_options.get('username'): - self.db.authenticate(connection_options['username'], - connection_options['password']) - - self.upgrade() - - @classmethod - def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): - """Returns a sort_instruction. - - Sort instructions are used in the query to determine what attributes - to sort on and what direction to use. - :param q: The query dict passed in. - :param sort_keys: array of attributes by which results be sorted. - :param sort_dir: direction in which results be sorted (asc, desc). - :return: sort parameters - """ - sort_keys = sort_keys or [] - sort_instructions = [] - _sort_dir = cls.SORT_OPERATION_MAP.get( - sort_dir, cls.SORT_OPERATION_MAP['desc']) - - for _sort_key in sort_keys: - _instruction = (_sort_key, _sort_dir) - sort_instructions.append(_instruction) - - return sort_instructions - - def _generate_random_str(self, str_len): - init_str = str(bson.objectid.ObjectId()) - objectid_len = len(init_str) - if str_len >= objectid_len: - init_str = (init_str * int(str_len/objectid_len) + - 'x' * int(str_len % objectid_len)) - return init_str - - def upgrade(self, version=None): - # create collection if not present - if 'resource' not in self.db.conn.collection_names(): - self.db.conn.create_collection('resource') - if 'meter' not in self.db.conn.collection_names(): - self.db.conn.create_collection('meter') - - # Establish indexes - # - # We need variations for user_id vs. project_id because of the - # way the indexes are stored in b-trees. The user_id and - # project_id values are usually mutually exclusive in the - # queries, so the database won't take advantage of an index - # including both. - if self.db.resource.index_information() == {}: - # Initializing a longer resource id to workaround DB2 nosql issue. - # Longer resource id is required by compute node's resource as - # their id is '_'. DB2 creates a VARCHAR(70) - # for resource id when its length < 70. But DB2 can create a - # VARCHAR(n) for the resource id which has n(n>70) characters. - # Users can adjust 'db2nosql_resource_id_maxlen'(default is 512) - # for their ENV. - resource_id = self._generate_random_str( - cfg.CONF.database.db2nosql_resource_id_maxlen) - self.db.resource.insert_one({'_id': resource_id, - 'no_key': resource_id}) - meter_id = str(bson.objectid.ObjectId()) - timestamp = timeutils.utcnow() - self.db.meter.insert_one({'_id': meter_id, - 'no_key': meter_id, - 'timestamp': timestamp}) - - self.db.resource.create_index([ - ('user_id', pymongo.ASCENDING), - ('project_id', pymongo.ASCENDING), - ('source', pymongo.ASCENDING)], name='resource_idx') - - self.db.meter.create_index([ - ('resource_id', pymongo.ASCENDING), - ('user_id', pymongo.ASCENDING), - ('project_id', pymongo.ASCENDING), - ('counter_name', pymongo.ASCENDING), - ('timestamp', pymongo.ASCENDING), - ('source', pymongo.ASCENDING)], name='meter_idx') - - self.db.meter.create_index([('timestamp', - pymongo.DESCENDING)], - name='timestamp_idx') - - self.db.resource.remove({'_id': resource_id}) - self.db.meter.remove({'_id': meter_id}) - - def clear(self): - # db2 does not support drop_database, remove all collections - for col in ['resource', 'meter']: - self.db[col].drop() - # drop_database command does nothing on db2 database since this has - # not been implemented. However calling this method is important for - # removal of all the empty dbs created during the test runs since - # test run is against mongodb on Jenkins - self.conn.drop_database(self.db.name) - self.conn.close() - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - # Record the updated resource metadata - data = copy.deepcopy(data) - data['resource_metadata'] = pymongo_utils.improve_keys( - data.pop('resource_metadata')) - self.db.resource.update_one( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'] or 'null', - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - 'counter_unit': data['counter_unit'], - }, - }, - }, - upsert=True, - ) - - # Record the raw data for the meter. Use a copy so we do not - # modify a data structure owned by our caller (the driver adds - # a new key '_id'). - record = copy.copy(data) - record['recorded_at'] = timeutils.utcnow() - # Make sure that the data does have field _id which db2 wont add - # automatically. - if record.get('_id') is None: - record['_id'] = str(bson.objectid.ObjectId()) - self.db.meter.insert_one(record) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} - - q = {} - if user is not None: - q['user_id'] = user - if project is not None: - q['project_id'] = project - if source is not None: - q['source'] = source - if resource is not None: - q['resource_id'] = resource - # Add resource_ prefix so it matches the field in the db - q.update(dict(('resource_' + k, v) - for (k, v) in six.iteritems(metaquery))) - - if start_timestamp or end_timestamp: - # Look for resources matching the above criteria and with - # samples in the time range we care about, then change the - # resource query to return just those resources by id. - ts_range = pymongo_utils.make_timestamp_range(start_timestamp, - end_timestamp, - start_timestamp_op, - end_timestamp_op) - if ts_range: - q['timestamp'] = ts_range - - sort_keys = base._handle_sort_key('resource', 'timestamp') - sort_keys.insert(0, 'resource_id') - sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, - sort_dir='desc') - resource = lambda x: x['resource_id'] - if limit is not None: - meters = self.db.meter.find(q, sort=sort_instructions, - limit=limit) - else: - meters = self.db.meter.find(q, sort=sort_instructions) - for resource_id, r_meters in itertools.groupby(meters, key=resource): - # Because we have to know first/last timestamp, and we need a full - # list of references to the resource's meters, we need a tuple - # here. - r_meters = tuple(r_meters) - latest_meter = r_meters[0] - last_ts = latest_meter['timestamp'] - first_ts = r_meters[-1]['timestamp'] - - yield models.Resource(resource_id=latest_meter['resource_id'], - project_id=latest_meter['project_id'], - first_sample_timestamp=first_ts, - last_sample_timestamp=last_ts, - source=latest_meter['source'], - user_id=latest_meter['user_id'], - metadata=pymongo_utils.unquote_keys( - latest_meter['resource_metadata'])) - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of models.Statistics instance. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - """ - if (groupby and - set(groupby) - set(['user_id', 'project_id', - 'resource_id', 'source'])): - raise ceilometer.NotImplementedError( - "Unable to group by these fields") - - if aggregate: - raise ceilometer.NotImplementedError( - 'Selectable aggregates not implemented') - - q = pymongo_utils.make_query_from_filter(sample_filter) - - if period: - if sample_filter.start_timestamp: - period_start = sample_filter.start_timestamp - else: - period_start = self.db.meter.find( - limit=1, sort=[('timestamp', - pymongo.ASCENDING)])[0]['timestamp'] - - if groupby: - sort_keys = ['counter_name'] + groupby + ['timestamp'] - else: - sort_keys = ['counter_name', 'timestamp'] - - sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, - sort_dir='asc') - meters = self.db.meter.find(q, sort=sort_instructions) - - def _group_key(meter): - # the method to define a key for groupby call - key = {} - for y in sort_keys: - if y == 'timestamp' and period: - key[y] = (timeutils.delta_seconds(period_start, - meter[y]) // period) - elif y != 'timestamp': - key[y] = meter[y] - return key - - def _to_offset(periods): - return {'days': (periods * period) // self.SECONDS_IN_A_DAY, - 'seconds': (periods * period) % self.SECONDS_IN_A_DAY} - - for key, grouped_meters in itertools.groupby(meters, key=_group_key): - stat = models.Statistics(unit=None, - min=sys.maxsize, max=-sys.maxsize, - avg=0, sum=0, count=0, - period=0, period_start=0, period_end=0, - duration=0, duration_start=0, - duration_end=0, groupby=None) - - for meter in grouped_meters: - stat.unit = meter.get('counter_unit', '') - m_volume = meter.get('counter_volume') - if stat.min > m_volume: - stat.min = m_volume - if stat.max < m_volume: - stat.max = m_volume - stat.sum += m_volume - stat.count += 1 - if stat.duration_start == 0: - stat.duration_start = meter['timestamp'] - stat.duration_end = meter['timestamp'] - if groupby and not stat.groupby: - stat.groupby = {} - for group_key in groupby: - stat.groupby[group_key] = meter[group_key] - - stat.duration = timeutils.delta_seconds(stat.duration_start, - stat.duration_end) - stat.avg = stat.sum / stat.count - if period: - stat.period = period - periods = key.get('timestamp') - stat.period_start = (period_start + - datetime. - timedelta(**(_to_offset(periods)))) - stat.period_end = (period_start + - datetime. - timedelta(**(_to_offset(periods + 1)))) - else: - stat.period_start = stat.duration_start - stat.period_end = stat.duration_end - yield stat diff --git a/ceilometer/storage/mongo/utils.py b/ceilometer/storage/mongo/utils.py index 75d880da..3c1195a2 100644 --- a/ceilometer/storage/mongo/utils.py +++ b/ceilometer/storage/mongo/utils.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Common functions for MongoDB and DB2 backends +"""Common functions for MongoDB backend """ import datetime diff --git a/ceilometer/storage/pymongo_base.py b/ceilometer/storage/pymongo_base.py index c2a18eb1..bbb49ac5 100644 --- a/ceilometer/storage/pymongo_base.py +++ b/ceilometer/storage/pymongo_base.py @@ -15,8 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Common functions for MongoDB and DB2 backends -""" +"""Common functions for MongoDB backend.""" import pymongo from ceilometer.storage import base @@ -40,7 +39,7 @@ AVAILABLE_STORAGE_CAPABILITIES = { class Connection(base.Connection): - """Base Connection class for MongoDB and DB2 drivers.""" + """Base Connection class for MongoDB driver.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) diff --git a/ceilometer/tests/db.py b/ceilometer/tests/db.py index ae67df1f..a633dabf 100644 --- a/ceilometer/tests/db.py +++ b/ceilometer/tests/db.py @@ -172,7 +172,6 @@ class TestBase(test_base.BaseTestCase): 'mongodb': MongoDbManager, 'mysql': MySQLManager, 'postgresql': PgSQLManager, - 'db2': MongoDbManager, 'sqlite': SQLiteManager, 'es': ElasticSearchManager, } diff --git a/ceilometer/tests/functional/api/v2/test_event_scenarios.py b/ceilometer/tests/functional/api/v2/test_event_scenarios.py index e4d450a2..dede8f53 100644 --- a/ceilometer/tests/functional/api/v2/test_event_scenarios.py +++ b/ceilometer/tests/functional/api/v2/test_event_scenarios.py @@ -604,7 +604,7 @@ class AclRestrictedEventTestBase(v2.FunctionalTest): expect_errors=True) self.assertEqual(404, data.status_int) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, @@ -614,7 +614,7 @@ class AclRestrictedEventTestBase(v2.FunctionalTest): self.assertEqual(set(['empty_ev', 'admin_ev']), set(ev['event_type'] for ev in data)) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_trait_filter(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, @@ -627,7 +627,7 @@ class AclRestrictedEventTestBase(v2.FunctionalTest): self.assertEqual(1, len(data)) self.assertEqual('empty_ev', data[0]['event_type']) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_single(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, @@ -637,7 +637,7 @@ class AclRestrictedEventTestBase(v2.FunctionalTest): data = self.get_json('/events/2', headers=a_headers) self.assertEqual('admin_ev', data['event_type']) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_trait_filter_no_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, diff --git a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py b/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py index b9e41d32..cafa1c80 100644 --- a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py +++ b/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py @@ -172,7 +172,7 @@ class TestMaxResourceVolume(v2.FunctionalTest): period=-1) self.assertEqual(400, resp.status_code) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') def test_period_with_large_value(self): resp = self.get_json(self.PATH, expect_errors=True, q=[{'field': 'user_id', @@ -1208,7 +1208,7 @@ class TestGroupByInstance(v2.FunctionalTest): u'2013-08-01T14:00:00']) -@tests_db.run_with('mongodb', 'hbase', 'db2') +@tests_db.run_with('mongodb', 'hbase') class TestGroupBySource(v2.FunctionalTest): # FIXME(terriyu): We have to put test_group_by_source in its own class @@ -1551,7 +1551,7 @@ class TestSelectableAggregates(v2.FunctionalTest): 'Bad aggregate: cardinality.injection_attack') -@tests_db.run_with('mongodb', 'hbase', 'db2') +@tests_db.run_with('mongodb', 'hbase') class TestUnparameterizedAggregates(v2.FunctionalTest): # We put the stddev test case in a separate class so that we @@ -1559,7 +1559,7 @@ class TestUnparameterizedAggregates(v2.FunctionalTest): # support the stddev_pop function and fails ungracefully with # OperationalError when it is used. However we still want to # test the corresponding functionality in the mongo driver. - # For hbase & db2, the skip on NotImplementedError logic works + # For hbase, the skip on NotImplementedError logic works # in the usual way. PATH = '/meters/instance/statistics' diff --git a/ceilometer/tests/functional/storage/test_impl_db2.py b/ceilometer/tests/functional/storage/test_impl_db2.py deleted file mode 100644 index 3dcc5056..00000000 --- a/ceilometer/tests/functional/storage/test_impl_db2.py +++ /dev/null @@ -1,145 +0,0 @@ -# -# Copyright Ericsson AB 2014. All rights reserved -# -# Authors: Ildiko Vancsa -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_db2.py - -.. note:: - In order to run the tests against another MongoDB server set the - environment variable CEILOMETER_TEST_DB2_URL to point to a DB2 - server before running the tests. - -""" - -import bson -import mock -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.event.storage import impl_db2 as impl_db2_event -from ceilometer.storage import impl_db2 -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer.tests import base as test_base - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'resources': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True, - 'complex': False}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - actual_capabilities = impl_db2.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - actual_capabilities = impl_db2_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = impl_db2.Connection.get_storage_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - -class ConnectionTest(test_base.BaseTestCase): - @mock.patch.object(impl_db2.Connection, '_generate_random_str') - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_upgrade(self, meter_id, timestamp, mongo_connect, - _generate_random_str): - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - _generate_random_str.return_value = 'wew' * 247 + 'x' * 3 - conn_mock.ceilodb2.resource.index_information.return_value = {} - mongo_connect.return_value = conn_mock - meter_id.return_value = '54b8860d75bfe43b54e84ce7' - timestamp.return_value = 'timestamp' - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 256, - group='database') - impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - resource_id = 'wew' * 247 + 'x' * 3 - conn_mock.ceilodb2.resource.insert_one.assert_called_with( - {'_id': resource_id, - 'no_key': resource_id}) - conn_mock.ceilodb2.meter.insert_one.assert_called_with( - {'_id': '54b8860d75bfe43b54e84ce7', - 'no_key': '54b8860d75bfe43b54e84ce7', - 'timestamp': 'timestamp'}) - - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_generate_random_str_with_less_config_len(self, objectid, - mongo_connect): - fake_str = '54b8860d75bfe43b54e84ce7' - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - mongo_connect.return_value = conn_mock - objectid.return_value = fake_str - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 20, - group='database') - conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - rand_str = conn._generate_random_str(20) - self.assertEqual(fake_str, rand_str) - - @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') - @mock.patch.object(bson.objectid, 'ObjectId') - def test_generate_random_str_with_default_config_len(self, objectid, - mongo_connect): - fake_str = '54b8860d75bfe43b54e84ce7' - conn_mock = mock.MagicMock() - conn_mock.server_info.return_value = {} - mongo_connect.return_value = conn_mock - objectid.return_value = fake_str - cfg.CONF.set_override('db2nosql_resource_id_maxlen', - 512, - group='database') - conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') - rand_str = conn._generate_random_str(512) - str_len = len(str(fake_str)) - expect_str = fake_str * int(512 / str_len) + 'x' * (512 % str_len) - self.assertEqual(expect_str, rand_str) diff --git a/ceilometer/tests/functional/storage/test_pymongo_base.py b/ceilometer/tests/functional/storage/test_pymongo_base.py index 86c1ed71..b22824ff 100644 --- a/ceilometer/tests/functional/storage/test_pymongo_base.py +++ b/ceilometer/tests/functional/storage/test_pymongo_base.py @@ -9,7 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Tests the mongodb and db2 common functionality +"""Tests the mongodb functionality """ import copy @@ -23,7 +23,7 @@ from ceilometer.tests import db as tests_db from ceilometer.tests.functional.storage import test_storage_scenarios -@tests_db.run_with('mongodb', 'db2') +@tests_db.run_with('mongodb') class CompatibilityTest(test_storage_scenarios.DBTestBase): def prepare_data(self): @@ -81,7 +81,7 @@ class CompatibilityTest(test_storage_scenarios.DBTestBase): # TODO(ananya) same test should be done for other databse -@tests_db.run_with('mongodb', 'db2') +@tests_db.run_with('mongodb') class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase): def prepare_data(self): def old_record_metering_data(self, data): diff --git a/ceilometer/tests/functional/storage/test_storage_scenarios.py b/ceilometer/tests/functional/storage/test_storage_scenarios.py index f0c61415..751ab739 100644 --- a/ceilometer/tests/functional/storage/test_storage_scenarios.py +++ b/ceilometer/tests/functional/storage/test_storage_scenarios.py @@ -575,7 +575,7 @@ class RawSampleTest(DBTestBase): results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') def test_clear_metering_data(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. @@ -588,7 +588,7 @@ class RawSampleTest(DBTestBase): results = list(self.conn.get_resources()) self.assertEqual(5, len(results)) - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') + @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') def test_clear_metering_data_no_data_to_remove(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. @@ -2690,7 +2690,7 @@ class EventTTLTest(EventTestBase): self.assertEqual(4, len(traits)) -@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'db2') +@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb') class EventTest(EventTestBase): def test_duplicate_message_id(self): now = datetime.datetime.utcnow() diff --git a/setup.cfg b/setup.cfg index dcaf47b9..53d10cf9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -190,7 +190,6 @@ ceilometer.event.storage = postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection hbase = ceilometer.event.storage.impl_hbase:Connection - db2 = ceilometer.event.storage.impl_db2:Connection ceilometer.metering.storage = log = ceilometer.storage.impl_log:Connection @@ -199,7 +198,6 @@ ceilometer.metering.storage = postgresql = ceilometer.storage.impl_sqlalchemy:Connection sqlite = ceilometer.storage.impl_sqlalchemy:Connection hbase = ceilometer.storage.impl_hbase:Connection - db2 = ceilometer.storage.impl_db2:Connection ceilometer.compute.virt = libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector