Properly compute resource_properties pickle

When you check for resource_properties size when storing event, we use
the regular pickle module with the default protocol. Sqlalchemy uses
cPickle if available, with the highest protocol, which can result in
fairly different storing size. Use that instead, this should remove the
last occurence of the storage bug.

Change-Id: Ia417e221f7097a388a6b36959064b12cb818206f
Related-Bug: #1493858
(cherry picked from commit 1e92462ae6)
This commit is contained in:
Thomas Herve 2016-10-04 13:18:11 +02:00 committed by Steven Hardy
parent 6533b3d874
commit 33c5eee231
1 changed files with 6 additions and 3 deletions

View File

@ -11,9 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import pickle
import six
from sqlalchemy.util.compat import pickle
import oslo_db.exception
from oslo_log import log as logging
@ -75,7 +76,8 @@ class Event(object):
# event.resource_properties column if the data is too large
# (greater than permitted by BLOB). Otherwise, we end up with
# an unsightly log message.
rp_size = len(pickle.dumps(ev['resource_properties']))
rp_size = len(pickle.dumps(ev['resource_properties'],
pickle.HIGHEST_PROTOCOL))
if rp_size > MAX_EVENT_RESOURCE_PROPERTIES_SIZE:
LOG.debug('event\'s resource_properties too large to store at '
'%d bytes', rp_size)
@ -86,7 +88,8 @@ class Event(object):
err = 'Resource properties are too large to store fully'
ev['resource_properties'].update({'Error': err})
ev['resource_properties'][max_key] = '<Deleted, too large>'
rp_size = len(pickle.dumps(ev['resource_properties']))
rp_size = len(pickle.dumps(ev['resource_properties'],
pickle.HIGHEST_PROTOCOL))
if rp_size > MAX_EVENT_RESOURCE_PROPERTIES_SIZE:
LOG.debug('event\'s resource_properties STILL too large '
'after truncating largest key at %d bytes', rp_size)