Switch to oslo_db retry decorator

Our sqlalchemy code uses a custom retry decorator that does not
limit the number of retries. Since this code was written, oslo_db
now has a retry decorator better used for this purpose that limits
the number of retry attempts.

Closes-bug: #1738292
(cherry picked from commit 3aae507a2a)

Conflicts:
  cinder/db/sqlalchemy/api.py

Change-Id: If39a81bbea26c14604b151a51d8e2bbe1bb2f110
This commit is contained in:
Sean McGinnis 2017-12-14 16:28:28 -06:00
parent c709fd13be
commit 82c9d00160
1 changed files with 23 additions and 41 deletions

View File

@ -26,10 +26,10 @@ import itertools
import re
import sys
import threading
import time
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
@ -241,24 +241,6 @@ def require_backup_exists(f):
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning("Deadlock detected when running "
"'%(func_name)s': Retrying...",
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
@ -569,7 +551,7 @@ def service_create(context, values):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def service_update(context, service_id, values):
if 'disabled' in values:
values = values.copy()
@ -713,7 +695,7 @@ def cluster_create(context, values):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def cluster_update(context, id, values):
"""Set the given properties on an cluster and update it.
@ -1130,7 +1112,7 @@ def _get_quota_usages_by_resource(context, session, resource):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def quota_usage_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
@ -1141,7 +1123,7 @@ def quota_usage_update_resource(context, old_res, new_res):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
@ -1322,7 +1304,7 @@ def _dict_with_usage_id(usages):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
@ -1341,7 +1323,7 @@ def reservation_commit(context, reservations, project_id=None):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
@ -1367,7 +1349,7 @@ def quota_destroy_by_project(*args, **kwargs):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def quota_destroy_all_by_project(context, project_id, only_quotas=False):
"""Destroy all quotas associated with a project.
@ -1409,7 +1391,7 @@ def quota_destroy_all_by_project(context, project_id, only_quotas=False):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_expire(context):
session = get_session()
with session.begin():
@ -1586,7 +1568,7 @@ def volume_data_get_for_project(context, project_id, volume_type_id=None):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
@ -1961,7 +1943,7 @@ def volume_attachment_get_all_by_project(context, project_id, filters=None,
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def attachment_destroy(context, attachment_id):
"""Destroy the specified attachment record."""
utcnow = timeutils.utcnow()
@ -2780,7 +2762,7 @@ def volume_metadata_get(context, volume_id):
@require_context
@require_volume_exists
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_metadata_delete(context, volume_id, key, meta_type):
if meta_type == common.METADATA_TYPES.user:
(_volume_user_metadata_get_query(context, volume_id).
@ -2803,7 +2785,7 @@ def volume_metadata_delete(context, volume_id, key, meta_type):
@require_context
@handle_db_data_error
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_metadata_update(context, volume_id, metadata, delete, meta_type):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(context,
@ -2852,7 +2834,7 @@ def volume_admin_metadata_get(context, volume_id):
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
@ -2862,7 +2844,7 @@ def volume_admin_metadata_delete(context, volume_id, key):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
return _volume_admin_metadata_update(context, volume_id, metadata, delete,
@ -2890,7 +2872,7 @@ def snapshot_create(context, values):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def snapshot_destroy(context, snapshot_id):
utcnow = timeutils.utcnow()
session = get_session()
@ -3233,7 +3215,7 @@ def snapshot_metadata_get(context, snapshot_id):
@require_context
@require_snapshot_exists
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def snapshot_metadata_delete(context, snapshot_id, key):
_snapshot_metadata_get_query(context, snapshot_id).\
filter_by(key=key).\
@ -3259,7 +3241,7 @@ def _snapshot_metadata_get_item(context, snapshot_id, key, session=None):
@require_context
@require_snapshot_exists
@handle_db_data_error
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
session = get_session()
with session.begin():
@ -3967,7 +3949,7 @@ def volume_type_qos_specs_get(context, type_id):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_type_destroy(context, id):
utcnow = timeutils.utcnow()
session = get_session()
@ -4005,7 +3987,7 @@ def volume_type_destroy(context, id):
@require_admin_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def group_type_destroy(context, id):
session = get_session()
with session.begin():
@ -5171,7 +5153,7 @@ def _backup_metadata_get_item(context, backup_id, key, session=None):
@require_context
@require_backup_exists
@handle_db_data_error
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def backup_metadata_update(context, backup_id, metadata, delete):
session = get_session()
with session.begin():
@ -5284,7 +5266,7 @@ def transfer_create(context, values):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def transfer_destroy(context, transfer_id):
utcnow = timeutils.utcnow()
session = get_session()
@ -6878,7 +6860,7 @@ def _check_is_not_multitable(values, model):
@require_context
@_retry_on_deadlock
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False, order=None):
"""Compare-and-swap conditional update SQLAlchemy implementation."""