Refactor notification

A middleware is introduced to send all API request level notifications using
a refactored notifier. For asynchronous requests such as table create/delete,
explicit notifications will be used to send events.

Change-Id: I7c55e4385fd8337256ee6a7b6b884dfbbebb0a2c
Implements: blueprint refactor-notification
This commit is contained in:
Charles Wang 2014-12-19 10:35:25 -05:00 committed by Dmitriy Ukhlov
parent 627d486cdf
commit 70d4aeee64
26 changed files with 359 additions and 455 deletions

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright 2015 Symantec Corporation
# Copyright 2013 Mirantis Inc.
# All Rights Reserved.
#
@ -17,6 +17,7 @@
import os
import sys
import time
from oslo.config import cfg
from oslo import messaging
@ -61,6 +62,7 @@ class SchemaEndpoint(object):
LOG.debug("Start creating table '%s'", table_name)
context = ctxt.RequestContext.from_dict(ctx)
start_time = time.time()
try:
table_info = self._table_info_repo.get(context, table_name)
@ -71,8 +73,10 @@ class SchemaEndpoint(object):
dict(
table_name=table_name,
table_uuid="",
message=e.message
))
message=e.message,
value=start_time
)
)
LOG.error("Create table failed."
" Table info for table '%s' does not exist in repo",
table_name)
@ -86,8 +90,10 @@ class SchemaEndpoint(object):
dict(
table_name=table_name,
table_uuid=str(table_info.id),
message=e.message
))
message=e.message,
value=start_time
)
)
LOG.error("Create table failed. Table '%s' with uuid '%s' is in "
"%s state but %s is expected",
@ -106,14 +112,16 @@ class SchemaEndpoint(object):
table_info.internal_name = internal_name
self._table_info_repo.update(
context, table_info, ["status", "internal_name"])
self._notifier.info(
self._notifier.audit(
context,
notifier.EVENT_TYPE_TABLE_CREATE_END,
notifier.EVENT_TYPE_TABLE_CREATE,
dict(
table_name=table_name,
table_uuid=str(table_info.id),
index_count=len(table_info.schema['index_def_map'].keys())
))
index_count=len(table_info.schema['index_def_map'].keys()),
value=start_time
)
)
LOG.debug("Table '%s' with uuid %s created", table_name,
str(table_info.id))
@ -124,8 +132,10 @@ class SchemaEndpoint(object):
dict(
table_name=table_name,
table_uuid=str(table_info.id),
message=e.message
))
message=e.message,
value=start_time
)
)
table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
self._table_info_repo.update(context, table_info, ["status"])
@ -134,6 +144,7 @@ class SchemaEndpoint(object):
def delete(self, ctx, table_name):
LOG.debug("Start deleting table '%s'", table_name)
start_time = time.time()
context = ctxt.RequestContext.from_dict(ctx)
@ -145,8 +156,10 @@ class SchemaEndpoint(object):
notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
dict(
table_name=table_name,
message=e.message
))
message=e.message,
value=start_time
)
)
LOG.error("Delete table failed."
" Table info for table '%s' does not exist in repo",
@ -162,8 +175,10 @@ class SchemaEndpoint(object):
dict(
table_name=table_name,
table_uuid=str(table_info.id),
message=e.message
))
message=e.message,
value=start_time
)
)
LOG.error("Delete table failed. Table '%s' with uuid %s is in %s "
"state but %s is expected",
@ -177,13 +192,15 @@ class SchemaEndpoint(object):
try:
self._storage_driver.delete_table(context, table_info)
self._table_info_repo.delete(context, table_name)
self._notifier.info(
self._notifier.audit(
context,
notifier.EVENT_TYPE_TABLE_DELETE_END,
notifier.EVENT_TYPE_TABLE_DELETE,
dict(
table_name=table_name,
table_uuid=str(table_info.id)
))
table_uuid=str(table_info.id),
value=start_time
)
)
LOG.debug("Table '%s' with uuid %s deleted", table_name,
str(table_info.id))
except Exception as e:
@ -193,8 +210,10 @@ class SchemaEndpoint(object):
dict(
table_name=table_name,
table_uuid=str(table_info.id),
message=e.message
))
message=e.message,
value=start_time
)
)
table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
self._table_info_repo.update(context, table_info, ["status"])
@ -220,7 +239,7 @@ if __name__ == '__main__':
endpoints = [
SchemaEndpoint(),
]
LOG.debug('Creating PRC server..')
LOG.debug('Creating RPC server..')
server = messaging.get_rpc_server(transport, target, endpoints,
executor='blocking')
LOG.debug('Starting...')

View File

@ -11,16 +11,16 @@ use = egg:Paste#urlmap
/healthcheck = health_check
[pipeline:rest_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
#pipeline = connection_handler url_rewriter probe_filter faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
pipeline = connection_handler probe_filter mdb_request_notifications faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
#pipeline = connection_handler url_rewriter probe_filter mdb_request_notifications faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
[pipeline:management_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit tokenauth contextwrapper management_app
#pipeline = connection_handler url_rewriter probe_filter faultwrapper rate_limit tokenauth contextwrapper management_app
[pipeline:dynamodb_api]
pipeline = connection_handler probe_filter rate_limit ec2authtoken contextwrapper dynamodb_app
#pipeline = connection_handler url_rewriter probe_filter rate_limit ec2authtoken contextwrapper dynamodb_app
pipeline = connection_handler probe_filter ddb_request_notifications rate_limit ec2authtoken contextwrapper dynamodb_app
#pipeline = connection_handler url_rewriter probe_filter ddb_request_notifications rate_limit ec2authtoken contextwrapper dynamodb_app
[pipeline:monitoring_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit contextwrapper monitoring_app
@ -66,6 +66,14 @@ paste.filter_factory = magnetodb.common.middleware.probe_filter:ProbeFilter.fact
#[filter:url_rewriter]
#paste.filter_factory = magnetodb.common.middleware.url_rewriter:UrlRewriterMiddleware.factory_method
[filter:mdb_request_notifications]
paste.filter_factory = magnetodb.common.middleware.request_notifications:RequestNotificationsMiddleware.factory_method
api_type="mdb"
[filter:ddb_request_notifications]
paste.filter_factory = magnetodb.common.middleware.request_notifications:RequestNotificationsMiddleware.factory_method
api_type="ddb"
[app:magnetodb_app]
paste.app_factory = magnetodb.api.openstack.v1.app_data:app_factory

View File

@ -11,16 +11,16 @@ use = egg:Paste#urlmap
/healthcheck = health_check
[pipeline:rest_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
#pipeline = connection_handler url_rewriter probe_filter faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
pipeline = connection_handler probe_filter mdb_request_notifications faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
#pipeline = connection_handler url_rewriter probe_filter mdb_request_notifications faultwrapper rate_limit tokenauth contextwrapper magnetodb_app
[pipeline:management_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit tokenauth contextwrapper management_app
#pipeline = connection_handler url_rewriter probe_filter faultwrapper rate_limit tokenauth contextwrapper management_app
[pipeline:dynamodb_api]
pipeline = connection_handler probe_filter rate_limit ec2authtoken contextwrapper dynamodb_app
#pipeline = connection_handler url_rewriter probe_filter rate_limit ec2authtoken contextwrapper dynamodb_app
pipeline = connection_handler probe_filter ddb_request_notifications rate_limit ec2authtoken contextwrapper dynamodb_app
#pipeline = connection_handler url_rewriter probe_filter ddb_request_notifications rate_limit ec2authtoken contextwrapper dynamodb_app
[pipeline:monitoring_api]
pipeline = connection_handler probe_filter faultwrapper rate_limit contextwrapper monitoring_app
@ -68,6 +68,14 @@ paste.filter_factory = magnetodb.common.middleware.probe_filter:ProbeFilter.fact
#[filter:url_rewriter]
#paste.filter_factory = magnetodb.common.middleware.url_rewriter:UrlRewriterMiddleware.factory_method
[filter:mdb_request_notifications]
paste.filter_factory = magnetodb.common.middleware.request_notifications:RequestNotificationsMiddleware.factory_method
api_type="mdb"
[filter:ddb_request_notifications]
paste.filter_factory = magnetodb.common.middleware.request_notifications:RequestNotificationsMiddleware.factory_method
api_type="ddb"
[app:magnetodb_app]
paste.app_factory = magnetodb.api.openstack.v1.app_data:app_factory

View File

@ -89,6 +89,8 @@ class AmzDynamoDBApiController():
(service_name, api_version, action_name))
)
context.request_type = action_name
return action.perform(context, action_params)
def process_request(self, req, body):

View File

@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnetodb import storage
from magnetodb.api.amz.dynamodb import action
from magnetodb.api.amz.dynamodb import exception as ddb_exception
from magnetodb.api.amz.dynamodb import parser
from magnetodb.common import exception
from magnetodb import storage
class DescribeTableDynamoDBAction(action.DynamoDBAction):

View File

@ -14,9 +14,10 @@
# under the License.
from magnetodb import api
from magnetodb.api import validation
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
@ -27,6 +28,7 @@ class BatchGetItemController(object):
@api.enforce_policy("mdb:batch_get_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("batch_read")
def process_request(self, req, body, project_id):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -18,6 +18,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
@ -28,6 +29,7 @@ class BatchWriteItemController(object):
@api.enforce_policy("mdb:batch_write_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("batch_write")
def process_request(self, req, body, project_id):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -19,6 +19,7 @@ from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import exception
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.i18n import _
from magnetodb.openstack.common import log as logging
from magnetodb import storage
@ -28,13 +29,13 @@ LOG = logging.getLogger(__name__)
class CreateTableController():
"""
The CreateTable operation adds a new table.
"""The CreateTable operation adds a new table.
Table names must be unique within each tenant.
"""
@api.enforce_policy("mdb:create_table")
@probe.Probe(__name__)
@request_context_decorator.request_type("create_table")
def create_table(self, req, body, project_id):
with probe.Probe(__name__ + '.validate'):
validation.validate_object(body, "body")

View File

@ -17,6 +17,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
from magnetodb.storage import models
@ -26,6 +27,7 @@ class DeleteItemController(object):
@api.enforce_policy("mdb:delete_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("delete_item")
def process_request(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.jsonschema.validate'):
validation.validate_object(body, "body")

View File

@ -17,6 +17,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.openstack.common import log as logging
from magnetodb import storage
@ -28,6 +29,7 @@ class DeleteTableController(object):
@api.enforce_policy("mdb:delete_table")
@probe.Probe(__name__)
@request_context_decorator.request_type("delete_table")
def delete_table(self, req, project_id, table_name):
validation.validate_table_name(table_name)

View File

@ -15,9 +15,10 @@
# under the License.
from magnetodb import api
from magnetodb.api import validation
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.openstack.common import log as logging
from magnetodb import storage
@ -29,6 +30,7 @@ class DescribeTableController(object):
@api.enforce_policy("mdb:describe_table")
@probe.Probe(__name__)
@request_context_decorator.request_type("describe_table")
def describe_table(self, req, project_id, table_name):
validation.validate_table_name(table_name)
@ -79,4 +81,5 @@ class DescribeTableController(object):
table_meta.schema.index_def_map
)
)
return result

View File

@ -18,6 +18,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
from magnetodb.storage import models
@ -27,6 +28,7 @@ class GetItemController(object):
@api.enforce_policy("mdb:get_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("get_item")
def process_request(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.validate'):
validation.validate_object(body, "body")

View File

@ -18,6 +18,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.openstack.common import log as logging
from magnetodb import storage
@ -30,6 +31,7 @@ class ListTablesController():
"""
@api.enforce_policy("mdb:list_tables")
@probe.Probe(__name__)
@request_context_decorator.request_type("list_tables")
def list_tables(self, req, project_id):
params = req.params.copy()

View File

@ -18,6 +18,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
from magnetodb.storage import models
@ -27,6 +28,7 @@ class PutItemController(object):
@api.enforce_policy("mdb:put_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("put_item")
def process_request(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -18,11 +18,11 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.openstack.common import log as logging
from magnetodb import storage
from magnetodb.storage import models
LOG = logging.getLogger(__name__)
@ -31,6 +31,7 @@ class QueryController(object):
@api.enforce_policy("mdb:query")
@probe.Probe(__name__)
@request_context_decorator.request_type("query")
def query(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -18,6 +18,7 @@ from magnetodb import api
from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb.openstack.common import log as logging
from magnetodb import storage
from magnetodb.storage import models
@ -32,6 +33,7 @@ class ScanController(object):
@api.enforce_policy("mdb:scan")
@probe.Probe(__name__)
@request_context_decorator.request_type("scan")
def scan(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -18,6 +18,7 @@ from magnetodb.api.openstack.v1 import parser
from magnetodb.api import validation
from magnetodb.common import exception
from magnetodb.common import probe
from magnetodb.common.utils import request_context_decorator
from magnetodb import storage
from magnetodb.storage import models
@ -29,6 +30,7 @@ class UpdateItemController(object):
@api.enforce_policy("mdb:update_item")
@probe.Probe(__name__)
@request_context_decorator.request_type("update_item")
def process_request(self, req, body, project_id, table_name):
with probe.Probe(__name__ + '.validation'):
validation.validate_object(body, "body")

View File

@ -0,0 +1,82 @@
# Copyright 2015 Symantec Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import webob
from magnetodb.common import wsgi
from magnetodb import notifier
from magnetodb.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class RequestNotificationsMiddleware(wsgi.Middleware):
"""Middleware that enable request based notifications.
Put this filter in the pipeline of api-paste.ini to turn on request metrics
collecting.
Note that only data api requests will participate in request metrics
collection.
"""
def __init__(self, app, options):
self.api_type = options["api_type"]
self._notifier = notifier.get_notifier()
super(RequestNotificationsMiddleware, self).__init__(app)
@webob.dec.wsgify
def __call__(self, req):
start_time = time.time()
response = req.get_response(self.application)
request_type = "unknown"
request_args = {}
context = req.context if hasattr(req, 'context') else None
if context is not None:
if hasattr(context, 'request_type') and context.request_type:
request_type = context.request_type
if hasattr(context, 'request_args') and context.request_args:
request_args = context.request_args
event_type = notifier.create_request_event_type(
self.api_type, request_type, response.status_code
)
payload = dict(
value=start_time,
request_content_length=req.content_length or 0,
response_content_length=response.content_length
)
if request_args:
payload.update(request_args)
if response.status_code >= 400:
payload.update(error=response.body)
self._notifier.error(context, event_type, payload)
else:
self._notifier.debug(context, event_type, payload)
return response
@classmethod
def factory_method(cls, global_config, **local_config):
return lambda application: cls(application, local_config)

View File

@ -0,0 +1,35 @@
# Copyright 2015 Symantec Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
def request_type(event, **dec_kwargs):
"""
Returns a decorator that sets value for request.context.request_type in
MagnetoDB API endpoint controllers. The request_type value will be used by
Notifier to look up the corresponding event in Event_Registry.
"""
def decorating_func(func):
@functools.wraps(func)
def _request_type(ctrl, req, *args, **kwargs):
req.context.request_type = event
resp = func(ctrl, req, *args, **kwargs)
return resp
return _request_type
return decorating_func

View File

@ -1,7 +1,7 @@
# Copyright 2014 Symantec Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
@ -16,9 +16,9 @@
import socket
from oslo.config import cfg
from oslo import messaging
from oslo.messaging import notify
from oslo.messaging import serializer
from oslo.messaging import transport
from oslo_serialization import jsonutils
from magnetodb import common as mdb_common
@ -36,39 +36,23 @@ extra_notifier_opts = [
cfg.CONF.register_opts(extra_notifier_opts)
EVENT_TYPE_TABLE_CREATE_START = 'magnetodb.table.create.start'
EVENT_TYPE_TABLE_CREATE_END = 'magnetodb.table.create.end'
EVENT_TYPE_TABLE_CREATE = 'magnetodb.table.create'
EVENT_TYPE_TABLE_CREATE_ERROR = 'magnetodb.table.create.error'
EVENT_TYPE_TABLE_DELETE_START = 'magnetodb.table.delete.start'
EVENT_TYPE_TABLE_DELETE_END = 'magnetodb.table.delete.end'
EVENT_TYPE_TABLE_DELETE = 'magnetodb.table.delete'
EVENT_TYPE_TABLE_DELETE_ERROR = 'magnetodb.table.delete.error'
EVENT_TYPE_TABLE_DESCRIBE = 'magnetodb.table.describe'
EVENT_TYPE_TABLE_LIST = 'magnetodb.table.list'
EVENT_TYPE_DATA_PUTITEM = 'magnetodb.data.putitem'
EVENT_TYPE_DATA_PUTITEM_START = 'magnetodb.data.putitem.start'
EVENT_TYPE_DATA_PUTITEM_END = 'magnetodb.data.putitem.end'
EVENT_TYPE_DATA_DELETEITEM = 'magnetodb.data.deleteitem'
EVENT_TYPE_DATA_DELETEITEM_START = 'magnetodb.data.deleteitem.start'
EVENT_TYPE_DATA_DELETEITEM_END = 'magnetodb.data.deleteitem.end'
EVENT_TYPE_DATA_DELETEITEM_ERROR = 'magnetodb.data.deleteitem.error'
EVENT_TYPE_DATA_BATCHWRITE_START = 'magnetodb.data.batchwrite.start'
EVENT_TYPE_DATA_BATCHWRITE_END = 'magnetodb.data.batchwrite.end'
EVENT_TYPE_DATA_BATCHREAD_START = 'magnetodb.data.batchread.start'
EVENT_TYPE_DATA_BATCHREAD_END = 'magnetodb.data.batchread.end'
EVENT_TYPE_DATA_UPDATEITEM = 'magnetodb.data.updateitem'
EVENT_TYPE_DATA_GETITEM = 'magnetodb.data.getitem'
EVENT_TYPE_DATA_GETITEM_START = 'magnetodb.data.getitem.start'
EVENT_TYPE_DATA_GETITEM_END = 'magnetodb.data.getitem.end'
EVENT_TYPE_DATA_QUERY = 'magnetodb.data.query'
EVENT_TYPE_DATA_QUERY_START = 'magnetodb.data.query.start'
EVENT_TYPE_DATA_QUERY_END = 'magnetodb.data.query.end'
EVENT_TYPE_DATA_SCAN_START = 'magnetodb.data.scan.start'
EVENT_TYPE_DATA_SCAN_END = 'magnetodb.data.scan.end'
EVENT_TYPE_STREAMING_PATH_ERROR = 'magnetodb.streaming.path.error'
EVENT_TYPE_STREAMING_DATA_START = 'magnetodb.streaming.data.start'
EVENT_TYPE_STREAMING_DATA_END = 'magnetodb.streaming.data.end'
EVENT_TYPE_STREAMING_DATA_ERROR = 'magnetodb.streaming.data.error'
EVENT_TYPE_REQUEST_RATE_LIMITED = 'magnetodb.request.rate.limited'
def create_request_event_type(api_type, request_type, status_code):
event_type = "magnetodb.req.{}.{}".format(api_type, request_type)
return (
event_type if status_code < 400 else
"{}.{}.{}".format(event_type, "error", str(status_code))
)
__NOTIFIER = None
@ -90,7 +74,7 @@ def get_notifier():
publisher_id = '{}.{}'.format(service, host)
__NOTIFIER = notify.Notifier(
messaging.get_transport(cfg.CONF),
transport.get_transport(cfg.CONF),
publisher_id,
serializer=RequestContextSerializer(JsonPayloadSerializer())
)

View File

@ -117,6 +117,9 @@ def create_table(context, table_name, table_schema):
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, table_schema=table_schema
)
return __STORAGE_MANAGER_IMPL.create_table(context, table_name,
table_schema)
@ -132,6 +135,7 @@ def delete_table(context, table_name):
:raises: BackendInteractionException
"""
context.request_args = dict(table_name=table_name)
return __STORAGE_MANAGER_IMPL.delete_table(context, table_name)
@ -146,6 +150,7 @@ def describe_table(context, table_name):
:raises: BackendInteractionException
"""
context.request_args = dict(table_name=table_name)
return __STORAGE_MANAGER_IMPL.describe_table(context, table_name)
@ -158,6 +163,9 @@ def list_tables(context, exclusive_start_table_name=None, limit=None):
:raises: BackendInteractionException
"""
context.request_args = dict(
exclusive_start_table_name=exclusive_start_table_name, limit=limit
)
return __STORAGE_MANAGER_IMPL.list_tables(
context, exclusive_start_table_name, limit
)
@ -198,6 +206,11 @@ def put_item(context, table_name, attribute_map, return_values=None,
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, attribute_map=attribute_map,
return_values=return_values, if_not_exist=if_not_exist,
expected_condition_map=expected_condition_map
)
return __STORAGE_MANAGER_IMPL.put_item(
context, table_name, attribute_map, return_values,
if_not_exist, expected_condition_map
@ -249,6 +262,10 @@ def delete_item(context, table_name, key_attribute_map,
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, key_attribute_map=key_attribute_map,
expected_condition_map=expected_condition_map
)
return __STORAGE_MANAGER_IMPL.delete_item(
context, table_name, key_attribute_map, expected_condition_map
)
@ -284,6 +301,7 @@ def execute_write_batch(context, write_request_map):
:returns: Unprocessed request list
"""
return __STORAGE_MANAGER_IMPL.execute_write_batch(context,
write_request_map)
@ -296,6 +314,7 @@ def execute_get_batch(context, get_request_list):
:returns: tuple of items list and unprocessed request list
"""
context.request_args = dict(get_request_list=get_request_list)
return __STORAGE_MANAGER_IMPL.execute_get_batch(context, get_request_list)
@ -317,6 +336,11 @@ def update_item(context, table_name, key_attribute_map,
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, key_attribute_map=key_attribute_map,
attribute_action_map=attribute_action_map,
expected_condition_map=expected_condition_map
)
return __STORAGE_MANAGER_IMPL.update_item(
context, table_name, key_attribute_map, attribute_action_map,
expected_condition_map
@ -351,6 +375,12 @@ def query(context, table_name, indexed_condition_map=None,
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, indexed_condition_map=indexed_condition_map,
select_type=select_type, index_name=index_name, limit=limit,
exclusive_start_key=exclusive_start_key, consistent=consistent,
order_type=order_type
)
return __STORAGE_MANAGER_IMPL.query(
context, table_name, indexed_condition_map, select_type, index_name,
limit, exclusive_start_key, consistent, order_type
@ -401,6 +431,11 @@ def scan(context, table_name, condition_map, attributes_to_get=None,
:raises: BackendInteractionException
"""
context.request_args = dict(
table_name=table_name, condition_map=condition_map,
attributes_to_get=attributes_to_get, limit=limit,
exclusive_start_key=exclusive_start_key, consistent=consistent,
)
return __STORAGE_MANAGER_IMPL.scan(
context, table_name, condition_map, attributes_to_get, limit,
exclusive_start_key, consistent=False

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
from magnetodb import notifier
@ -33,6 +34,7 @@ class AsyncSimpleStorageManager(manager.SimpleStorageManager):
)
def _do_create_table(self, context, table_info):
start_time = time.time()
future = self._execute_async(self._storage_driver.create_table,
context, table_info)
@ -45,8 +47,13 @@ class AsyncSimpleStorageManager(manager.SimpleStorageManager):
)
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_CREATE_END,
table_info.schema)
notifier.EVENT_TYPE_TABLE_CREATE,
dict(
table_name=table_info.name,
table_uuid=str(table_info.id),
schema=table_info.schema,
value=start_time
))
else:
table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
self._table_info_repo.update(
@ -55,12 +62,17 @@ class AsyncSimpleStorageManager(manager.SimpleStorageManager):
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
future.exception()
)
dict(
table_name=table_info.name,
table_uuid=str(table_info.id),
message=future.exception(),
value=start_time
))
future.add_done_callback(callback)
def _do_delete_table(self, context, table_info):
start_time = time.time()
future = self._execute_async(self._storage_driver.delete_table,
context, table_info)
@ -70,8 +82,12 @@ class AsyncSimpleStorageManager(manager.SimpleStorageManager):
context, table_info.name
)
self._notifier.info(
context, notifier.EVENT_TYPE_TABLE_DELETE_END,
table_info.name)
context, notifier.EVENT_TYPE_TABLE_DELETE,
dict(
table_name=table_info.name,
table_uuid=str(table_info.id),
value=start_time
))
else:
table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
self._table_info_repo.update(
@ -79,7 +95,11 @@ class AsyncSimpleStorageManager(manager.SimpleStorageManager):
)
self._notifier.error(
context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
future.exception()
)
dict(
message=future.exception(),
table_name=table_info.name,
table_uuid=str(table_info.id),
value=start_time
))
future.add_done_callback(callback)

View File

@ -17,6 +17,7 @@
from concurrent import futures
import logging
import threading
import time
import weakref
import uuid
@ -45,38 +46,44 @@ class SimpleStorageManager(manager.StorageManager):
self._notifier = notifier.get_notifier()
def _do_create_table(self, context, table_info):
start_time = time.time()
try:
table_info.internal_name = self._storage_driver.create_table(
context, table_info
)
table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
self._table_info_repo.update(
context, table_info, ["status", "internal_name"]
)
except exception.BackendInteractionError as ex:
table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
self._table_info_repo.update(
context, table_info, ["status"]
)
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
dict(
table_name=table_info.name,
message=ex.message
message=ex.message,
value=start_time
))
raise
self._notifier.info(
table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
self._table_info_repo.update(
context, table_info, ["status", "internal_name"]
)
self._notifier.audit(
context,
notifier.EVENT_TYPE_TABLE_CREATE_END,
table_info.schema)
notifier.EVENT_TYPE_TABLE_CREATE,
dict(
schema=table_info.schema,
value=start_time
))
def _get_table_id(self, table_name):
return uuid.uuid1()
def create_table(self, context, table_name, table_schema):
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_CREATE_START,
table_schema)
table_id = self._get_table_id(table_name)
table_info = table_info_repo.TableInfo(
table_name, table_id, table_schema,
@ -85,14 +92,7 @@ class SimpleStorageManager(manager.StorageManager):
try:
self._table_info_repo.save(context, table_info)
except exception.TableAlreadyExistsException as e:
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
dict(
table_name=table_name,
message=e.message
))
except exception.TableAlreadyExistsException:
raise
self._do_create_table(context, table_info)
@ -104,53 +104,50 @@ class SimpleStorageManager(manager.StorageManager):
table_info.creation_date_time)
def _do_delete_table(self, context, table_info):
self._storage_driver.delete_table(context, table_info)
start_time = time.time()
try:
self._storage_driver.delete_table(context, table_info)
except exception.BackendInteractionError as ex:
table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
self._table_info_repo.update(context, table_info, ["status"])
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
dict(
table_name=table_info.name,
message=ex.message,
value=start_time
))
raise
self._table_info_repo.delete(context, table_info.name)
self._notifier.info(
self._notifier.audit(
context,
notifier.EVENT_TYPE_TABLE_DELETE_END,
table_info.name)
notifier.EVENT_TYPE_TABLE_DELETE,
dict(
table_name=table_info.name,
value=start_time
)
)
def delete_table(self, context, table_name):
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_DELETE_START,
table_name)
try:
table_info = self._table_info_repo.get(context,
table_name,
['status'])
except exception.TableNotExistsException as e:
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
dict(
table_name=table_name,
message=e.message
))
except exception.TableNotExistsException:
raise
if table_info.status == models.TableMeta.TABLE_STATUS_DELETING:
# table is already being deleted, just return immediately
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_DELETE_END,
table_name)
return models.TableMeta(table_info.id, table_info.schema,
table_info.status,
table_info.creation_date_time)
elif table_info.in_use:
e = exception.ResourceInUseException()
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
dict(
table_name=table_name,
message=e.message
))
raise e
raise exception.ResourceInUseException()
table_info.status = models.TableMeta.TABLE_STATUS_DELETING
@ -160,15 +157,20 @@ class SimpleStorageManager(manager.StorageManager):
# if table internal name is missing, table is not actually created
# just remove the table_info entry for the table and
# send notification
LOG.info(("Table '{}' with tenant id '{}', id '{}' does not have "
"valid internal name. Unable or no need to delete.").
format(table_info.name, context.tenant, table_info.id))
msg = ("Table '{}' with tenant id '{}', id '{}' does not have "
"valid internal name. Unable or no need to delete."
).format(table_info.name, context.tenant, table_info.id)
LOG.info(msg)
self._table_info_repo.delete(context, table_info.name)
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_DELETE_END,
table_info.name)
notifier.EVENT_TYPE_TABLE_DELETE,
dict(
table_name=table_name,
message=msg,
value=time.time()
))
else:
self._do_delete_table(context, table_info)
@ -181,10 +183,6 @@ class SimpleStorageManager(manager.StorageManager):
def describe_table(self, context, table_name):
table_info = self._table_info_repo.get(
context, table_name, ['status', 'last_update_date_time'])
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_DESCRIBE,
table_name)
if timeutils.is_older_than(table_info.last_update_date_time,
self._schema_operation_timeout):
@ -197,14 +195,6 @@ class SimpleStorageManager(manager.StorageManager):
table_info.name,
models.TableMeta.TABLE_STATUS_CREATE_FAILED)
)
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
dict(
table_name=table_name,
message='Operation timed out'
)
)
if table_info.status == models.TableMeta.TABLE_STATUS_DELETING:
table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
@ -215,14 +205,6 @@ class SimpleStorageManager(manager.StorageManager):
table_info.name,
models.TableMeta.TABLE_STATUS_DELETE_FAILED)
)
self._notifier.error(
context,
notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
dict(
table_name=table_name,
message='Operation timed out'
)
)
return models.TableMeta(
table_info.id,
@ -232,19 +214,9 @@ class SimpleStorageManager(manager.StorageManager):
def list_tables(self, context, exclusive_start_table_name=None,
limit=None):
tnames = self._table_info_repo.get_tenant_table_names(
return self._table_info_repo.get_tenant_table_names(
context, exclusive_start_table_name, limit
)
self._notifier.info(
context,
notifier.EVENT_TYPE_TABLE_LIST,
dict(
exclusive_start_table_name=exclusive_start_table_name,
limit=limit
)
)
return tnames
def list_tenant_tables(self, last_evaluated_project=None,
last_evaluated_table=None, limit=None):
@ -330,57 +302,18 @@ class SimpleStorageManager(manager.StorageManager):
context, table_info, attribute_map, return_values,
if_not_exist, expected_condition_map
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_PUTITEM,
dict(
table_name=table_name,
attribute_map=attribute_map,
return_values=return_values,
if_not_exist=if_not_exist,
expected_condition_map=expected_condition_map
)
)
return result
def _put_item_async(self, context, table_info, attribute_map,
return_values=None, if_not_exist=False,
expected_condition_map=None):
payload = dict(
table_name=table_info.name,
attribute_map=attribute_map,
return_values=return_values,
if_not_exist=if_not_exist,
expected_condition_map=expected_condition_map
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_PUTITEM_START,
payload)
put_future = self._execute_async(
self._storage_driver.put_item,
context, table_info, attribute_map, return_values,
if_not_exist, expected_condition_map
)
weak_self = weakref.proxy(self)
def callback(future):
if not future.exception():
weak_self._notifier.info(
context, notifier.EVENT_TYPE_DATA_PUTITEM_END,
payload
)
else:
weak_self._notifier.error(
context,
notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
payload=future.exception()
)
put_future.add_done_callback(callback)
return put_future
def put_item_async(self, context, table_name, attribute_map, return_values,
@ -404,52 +337,15 @@ class SimpleStorageManager(manager.StorageManager):
result = self._storage_driver.delete_item(
context, table_info, key_attribute_map, expected_condition_map
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_DELETEITEM,
dict(
table_name=table_name,
key_attribute_map=key_attribute_map,
expected_condition_map=expected_condition_map
)
)
return result
def _delete_item_async(self, context, table_info, key_attribute_map,
expected_condition_map=None):
payload = dict(
table_name=table_info.name,
key_attribute_map=key_attribute_map,
expected_condition_map=expected_condition_map
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_DELETEITEM_START,
payload)
del_future = self._execute_async(
self._storage_driver.delete_item,
context, table_info, key_attribute_map, expected_condition_map
)
weak_self = weakref.proxy(self)
def callback(future):
if not future.exception():
weak_self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_DELETEITEM_END,
payload
)
else:
weak_self._notifier.error(
context,
notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
future.exception()
)
del_future.add_done_callback(callback)
return del_future
def delete_item_async(self, context, table_name, key_attribute_map,
@ -469,10 +365,6 @@ class SimpleStorageManager(manager.StorageManager):
]
def execute_write_batch(self, context, write_request_map):
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_BATCHWRITE_START,
write_request_map)
write_request_list_to_send = []
for table_name, write_request_list in write_request_map.iteritems():
table_info = self._table_info_repo.get(context, table_name)
@ -534,15 +426,6 @@ class SimpleStorageManager(manager.StorageManager):
tables_unprocessed_items.append(write_request)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_BATCHWRITE_END,
dict(
write_request_map=write_request_map,
unprocessed_items=unprocessed_items
)
)
return unprocessed_items
def _batch_write_async(self, context, write_request_list):
@ -655,25 +538,11 @@ class SimpleStorageManager(manager.StorageManager):
return executor
prepared_batch.append(make_request_executor())
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_BATCHREAD_START,
read_request_list)
for request_executor in prepared_batch:
request_executor()
done_event.wait()
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_BATCHREAD_END,
dict(
read_request_list=read_request_list,
unprocessed_items=unprocessed_items
)
)
return items, unprocessed_items
def update_item(self, context, table_name, key_attribute_map,
@ -687,16 +556,6 @@ class SimpleStorageManager(manager.StorageManager):
context, table_info, key_attribute_map, attribute_action_map,
expected_condition_map
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_UPDATEITEM,
dict(
table_name=table_name,
key_attribute_map=key_attribute_map,
attribute_action_map=attribute_action_map,
expected_condition_map=expected_condition_map
)
)
return result
@ -786,20 +645,6 @@ class SimpleStorageManager(manager.StorageManager):
range_condition_list, select_type,
index_name, limit, exclusive_start_key, consistent, order_type
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_QUERY,
dict(
table_name=table_name,
indexed_condition_map=indexed_condition_map,
select_type=select_type,
index_name=index_name,
limit=limit,
exclusive_start_key=exclusive_start_key,
consistent=consistent,
order_type=order_type
)
)
return result
@ -833,31 +678,11 @@ class SimpleStorageManager(manager.StorageManager):
range_condition_list, select_type,
consistent=consistent
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_GETITEM,
dict(
table_name=table_name,
hash_key=hash_key_value,
range_key=range_key_value,
select_type=select_type,
consistent=consistent
)
)
return result
def _get_item_async(self, context, table_info, hash_key, range_key,
attributes_to_get, consistent=True):
payload = dict(table_name=table_info.name,
hash_key=hash_key,
range_key=range_key,
attributes_to_get=attributes_to_get,
consistent=consistent)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_GETITEM_START,
payload)
select_type = (
models.SelectType.all() if attributes_to_get is None else
models.SelectType.specific_attributes(attributes_to_get)
@ -873,10 +698,6 @@ class SimpleStorageManager(manager.StorageManager):
context, table_info, hash_key_condition_list,
range_key_condition_list, select_type, consistent=consistent
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_GETITEM_END,
payload)
return result
def scan(self, context, table_name, condition_map, attributes_to_get=None,
@ -888,26 +709,11 @@ class SimpleStorageManager(manager.StorageManager):
if exclusive_start_key is not None:
self._validate_table_schema(table_info, exclusive_start_key)
payload = dict(table_name=table_name,
condition_map=condition_map,
attributes_to_get=attributes_to_get,
limit=limit,
exclusive_start_key=exclusive_start_key,
consistent=consistent)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_SCAN_START,
payload)
with self.__task_semaphore:
result = self._storage_driver.scan(
context, table_info, condition_map, attributes_to_get,
limit, exclusive_start_key, consistent
)
self._notifier.info(
context,
notifier.EVENT_TYPE_DATA_SCAN_END,
payload)
return result

View File

@ -222,7 +222,7 @@ class CassandraTableInfoRepository(table_info_repo.TableInfoRepository):
if not result[0]['[applied]']:
raise exception.TableNotExistsException(
"Table {} is not exists".format(table_info.name)
"Table {} does not exist".format(table_info.name)
)
self._remove_table_info_from_cache(context, table_info.name)
return True

View File

@ -13,18 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
from concurrent import futures
import mock
import time
from oslo_utils import timeutils
from magnetodb import notifier
from magnetodb.storage import driver
from magnetodb.storage.manager import simple_impl
from magnetodb.storage.manager import async_simple_impl
from magnetodb.storage import models
from magnetodb.storage import table_info_repo
from magnetodb.tests.unittests.common.notifier import test_notification
DATETIMEFORMAT = test_notification.DATETIMEFORMAT
@ -51,7 +45,7 @@ class TestNotifyStorageManager(test_notification.TestNotify):
# wait for async create table call to finish
for i in range(10):
if (mock_table_info_repo.update.called and
len(self.get_notifications()) == 2):
len(self.get_notifications()) == 1):
break
else:
time.sleep(1)
@ -62,27 +56,14 @@ class TestNotifyStorageManager(test_notification.TestNotify):
self.assertTrue(mock_storage_driver.create_table.called)
# check notification queue
self.assertEqual(len(self.get_notifications()), 2)
self.assertEqual(len(self.get_notifications()), 1)
start_event = self.get_notifications()[0]
end_event = self.get_notifications()[1]
self.assertEqual(start_event['priority'], 'INFO')
self.assertEqual(start_event['event_type'],
notifier.EVENT_TYPE_TABLE_CREATE_START)
self.assertEqual(start_event['payload'], table_schema)
end_event = self.get_notifications()[0]
self.assertEqual(end_event['priority'], 'INFO')
self.assertEqual(end_event['event_type'],
notifier.EVENT_TYPE_TABLE_CREATE_END)
self.assertEqual(end_event['payload'], table_schema)
time_start = timeutils.parse_strtime(
start_event['timestamp'], DATETIMEFORMAT)
time_end = timeutils.parse_strtime(
end_event['timestamp'], DATETIMEFORMAT)
self.assertTrue(time_start < time_end,
"start event is later than end event")
notifier.EVENT_TYPE_TABLE_CREATE)
self.assertEqual(end_event['payload']['schema'], table_schema)
@mock.patch('magnetodb.storage.table_info_repo')
def test_notify_delete_table_async(self, mock_table_info_repo):
@ -113,7 +94,7 @@ class TestNotifyStorageManager(test_notification.TestNotify):
# wait for async delete table call to finish
for i in range(10):
if (mock_table_info_repo.delete.called and
len(self.get_notifications()) == 2):
len(self.get_notifications()) == 1):
# delete_table method of mock_storage_driver has been called
break
else:
@ -125,109 +106,11 @@ class TestNotifyStorageManager(test_notification.TestNotify):
self.assertTrue(mock_table_info_repo.delete.called)
# check notification queue
self.assertEqual(len(self.get_notifications()), 2)
self.assertEqual(len(self.get_notifications()), 1)
start_event = self.get_notifications()[0]
end_event = self.get_notifications()[1]
self.assertEqual(start_event['priority'], 'INFO')
self.assertEqual(start_event['event_type'],
notifier.EVENT_TYPE_TABLE_DELETE_START)
self.assertEqual(start_event['payload'], table_name)
self.assertEqual(end_event['priority'], 'INFO')
self.assertEqual(end_event['event_type'],
notifier.EVENT_TYPE_TABLE_DELETE_END)
self.assertEqual(end_event['payload'], table_name)
time_start = timeutils.parse_strtime(
start_event['timestamp'], DATETIMEFORMAT)
time_end = timeutils.parse_strtime(
end_event['timestamp'], DATETIMEFORMAT)
self.assertTrue(time_start < time_end,
"start event is later than end event")
@mock.patch('magnetodb.storage.driver.StorageDriver.batch_write')
@mock.patch('magnetodb.storage.manager.simple_impl.SimpleStorageManager.'
'_validate_table_schema')
@mock.patch('magnetodb.storage.manager.simple_impl.SimpleStorageManager.'
'_validate_table_is_active')
@mock.patch('magnetodb.storage.table_info_repo.TableInfoRepository.get')
@mock.patch('magnetodb.storage.manager.simple_impl.SimpleStorageManager.'
'_delete_item_async')
@mock.patch('magnetodb.storage.manager.simple_impl.SimpleStorageManager.'
'_put_item_async')
def test_notify_batch_write(self, mock_put_item, mock_delete_item,
mock_repo_get, mock_validate_table_is_active,
mock_validate_table_schema, mock_batch_write):
self.cleanup_test_notifier()
future = futures.Future()
future.set_result(True)
mock_put_item.return_value = future
mock_delete_item.return_value = future
table_info = mock.Mock()
table_info.schema.key_attributes = ['id', 'range']
mock_repo_get.return_value = table_info
mock_batch_write.side_effect = NotImplementedError()
context = mock.Mock(tenant='fake_tenant')
table_name = 'fake_table'
request_map = {
table_name: [
models.WriteItemRequest.put(
{
'id': models.AttributeValue('N', 1),
'range': models.AttributeValue('S', '1'),
'str': models.AttributeValue('S', 'str1'),
}
),
models.WriteItemRequest.put(
{
'id': models.AttributeValue('N', 2),
'range': models.AttributeValue('S', '1'),
'str': models.AttributeValue('S', 'str1')
}
),
models.WriteItemRequest.delete(
{
'id': models.AttributeValue('N', 3),
'range': models.AttributeValue('S', '3')
}
)
]
}
storage_manager = simple_impl.SimpleStorageManager(
driver.StorageDriver(), table_info_repo.TableInfoRepository()
)
storage_manager.execute_write_batch(context, request_map)
# check notification queue
self.assertEqual(len(self.get_notifications()), 2)
start_event = self.get_notifications()[0]
end_event = self.get_notifications()[1]
self.assertEqual(start_event['priority'], 'INFO')
self.assertEqual(start_event['event_type'],
notifier.EVENT_TYPE_DATA_BATCHWRITE_START)
self.assertEqual(len(start_event['payload']), len(request_map))
self.assertEqual(end_event['priority'], 'INFO')
self.assertEqual(end_event['event_type'],
notifier.EVENT_TYPE_DATA_BATCHWRITE_END)
self.assertEqual(len(end_event['payload']['write_request_map']),
len(request_map))
self.assertEqual(len(end_event['payload']['unprocessed_items']), 0)
time_start = timeutils.parse_strtime(
start_event['timestamp'], DATETIMEFORMAT)
time_end = timeutils.parse_strtime(
end_event['timestamp'], DATETIMEFORMAT)
self.assertTrue(time_start < time_end,
"start event is later than end event")
notifier.EVENT_TYPE_TABLE_DELETE)
self.assertEqual(start_event['payload']['table_name'], table_name)

View File

@ -46,7 +46,7 @@ commands =
ignore = H305,H306,H307,H402,H404,H405,H904,E226,E241
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,*egg,tools,etc,build,doc,*openstack/common*
exclude=.venv,.git,.tox,dist,*egg,tools,etc,build,doc,*openstack/common*,*contrib/cassandra*
filename=*.py,magnetodb-*
[hacking]