follow oslo
move to oslo.db and some other oslo libraries Change-Id: I57e4f01bd46339cdfe3ed2e4748eb1e923cea584
This commit is contained in:
parent
71ef91bdc2
commit
67eae1069d
|
@ -18,7 +18,9 @@ Starting point for routing EC2 requests.
|
||||||
import hashlib
|
import hashlib
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
import requests
|
import requests
|
||||||
import six
|
import six
|
||||||
|
@ -31,9 +33,7 @@ from ec2api.api import ec2utils
|
||||||
from ec2api.api import faults
|
from ec2api.api import faults
|
||||||
from ec2api import context
|
from ec2api import context
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import jsonutils
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api import wsgi
|
from ec2api import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass # clients will log absense of neutronclient in this case
|
pass # clients will log absense of neutronclient in this case
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
|
@ -25,7 +25,7 @@ from ec2api.api import ec2utils
|
||||||
from ec2api.api import internet_gateway as internet_gateway_api
|
from ec2api.api import internet_gateway as internet_gateway_api
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -21,15 +21,15 @@ import datetime
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
from oslo_utils import encodeutils
|
from oslo_utils import encodeutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ec2api.api import cloud
|
from ec2api.api import cloud
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -17,9 +17,8 @@ Common Auth Middleware.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
auth_opts = [
|
auth_opts = [
|
||||||
|
|
|
@ -12,12 +12,12 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api import utils
|
from ec2api import utils
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
from keystoneclient.v2_0 import client as kc
|
from keystoneclient.v2_0 import client as kc
|
||||||
from novaclient import client as novaclient
|
from novaclient import client as novaclient
|
||||||
from novaclient import shell as novashell
|
from novaclient import shell as novashell
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslo import messaging
|
from oslo_log import log as logging
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
|
||||||
from ec2api import context as ec2_context
|
from ec2api import context as ec2_context
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,8 @@ datastore.
|
||||||
import collections
|
import collections
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import address
|
from ec2api.api import address
|
||||||
from ec2api.api import availability_zone
|
from ec2api.api import availability_zone
|
||||||
|
@ -40,7 +41,6 @@ from ec2api.api import tag
|
||||||
from ec2api.api import volume
|
from ec2api.api import volume
|
||||||
from ec2api.api import vpc
|
from ec2api.api import vpc
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -16,14 +16,14 @@ import collections
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import inspect
|
import inspect
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.api import validator
|
from ec2api.api import validator
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common import gettextutils as textutils
|
from ec2api.i18n import _LW
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
ec2_opts = [
|
ec2_opts = [
|
||||||
|
@ -86,7 +86,7 @@ class OnCrashCleaner(object):
|
||||||
formatted_args += ', '
|
formatted_args += ', '
|
||||||
formatted_args += kwargs_string
|
formatted_args += kwargs_string
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
textutils._LW('Error cleaning up %(name)s(%(args)s)') %
|
_LW('Error cleaning up %(name)s(%(args)s)') %
|
||||||
{'name': name, 'args': formatted_args},
|
{'name': name, 'args': formatted_args},
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
|
|
||||||
|
|
||||||
import netaddr
|
import netaddr
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -15,13 +15,13 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from glanceclient.common import exceptions as glance_exception
|
from glanceclient.common import exceptions as glance_exception
|
||||||
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ def is_ec2_timestamp_expired(request, expires=None):
|
||||||
timeutils.is_newer_than(query_time, expires))
|
timeutils.is_newer_than(query_time, expires))
|
||||||
return False
|
return False
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.audit(_("Timestamp is invalid."))
|
LOG.exception(_("Timestamp is invalid: "))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -14,14 +14,13 @@
|
||||||
|
|
||||||
from xml.sax import saxutils
|
from xml.sax import saxutils
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
import ec2api.api
|
import ec2api.api
|
||||||
from ec2api import context
|
from ec2api import context
|
||||||
from ec2api.openstack.common import gettextutils
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
@ -43,8 +42,6 @@ def utf8(value):
|
||||||
"""
|
"""
|
||||||
if isinstance(value, unicode):
|
if isinstance(value, unicode):
|
||||||
return value.encode('utf-8')
|
return value.encode('utf-8')
|
||||||
elif isinstance(value, gettextutils.Message):
|
|
||||||
return unicode(value).encode('utf-8')
|
|
||||||
assert isinstance(value, str)
|
assert isinstance(value, str)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
|
@ -27,8 +27,8 @@ import boto.s3.connection
|
||||||
import eventlet
|
import eventlet
|
||||||
from glanceclient.common import exceptions as glance_exception
|
from glanceclient.common import exceptions as glance_exception
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from oslo.config import cfg
|
|
||||||
from oslo_concurrency import processutils
|
from oslo_concurrency import processutils
|
||||||
|
from oslo_config import cfg
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
|
@ -38,7 +38,7 @@ from ec2api.api import instance as instance_api
|
||||||
from ec2api import context as ec2_context
|
from ec2api import context as ec2_context
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
s3_opts = [
|
s3_opts = [
|
||||||
|
|
|
@ -20,7 +20,7 @@ import random
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from ec2api.api import address as address_api
|
from ec2api.api import address as address_api
|
||||||
|
@ -31,8 +31,8 @@ from ec2api.api import network_interface as network_interface_api
|
||||||
from ec2api.api import security_group as security_group_api
|
from ec2api.api import security_group as security_group_api
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
|
from ec2api.i18n import _
|
||||||
from ec2api import novadb
|
from ec2api import novadb
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
ec2_opts = [
|
ec2_opts = [
|
||||||
|
|
|
@ -19,15 +19,15 @@ datastore.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -15,13 +15,13 @@
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -17,7 +17,8 @@ import collections
|
||||||
|
|
||||||
import netaddr
|
import netaddr
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from ec2api.api import address as address_api
|
from ec2api.api import address as address_api
|
||||||
|
@ -28,8 +29,7 @@ from ec2api.api import ec2utils
|
||||||
from ec2api.api import security_group as security_group_api
|
from ec2api.api import security_group as security_group_api
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -23,7 +23,7 @@ from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
Validator = common.Validator
|
Validator = common.Validator
|
||||||
|
|
|
@ -20,7 +20,8 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass # clients will log absense of neutronclient in this case
|
pass # clients will log absense of neutronclient in this case
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
|
@ -28,8 +29,7 @@ from ec2api.api import ec2utils
|
||||||
from ec2api.api import validator
|
from ec2api.api import validator
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -19,7 +19,7 @@ from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
"""Snapshot related API implementation
|
"""Snapshot related API implementation
|
||||||
|
|
|
@ -15,7 +15,8 @@
|
||||||
|
|
||||||
import netaddr
|
import netaddr
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
|
@ -24,8 +25,7 @@ from ec2api.api import network_interface as network_interface_api
|
||||||
from ec2api.api import route_table as route_table_api
|
from ec2api.api import route_table as route_table_api
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -16,7 +16,7 @@ from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
"""Tag related API implementation
|
"""Tag related API implementation
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import netaddr
|
import netaddr
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -20,7 +20,7 @@ from ec2api.api import common
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
|
|
||||||
"""Volume related API implementation
|
"""Volume related API implementation
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
|
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import common
|
from ec2api.api import common
|
||||||
|
@ -25,8 +26,7 @@ from ec2api.api import security_group as security_group_api
|
||||||
from ec2api.api import subnet as subnet_api
|
from ec2api.api import subnet as subnet_api
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
# Copyright 2014
|
|
||||||
# The Cloudscaling Group, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from ec2api.openstack.common import gettextutils
|
|
||||||
gettextutils.install('ec2api')
|
|
|
@ -18,14 +18,18 @@ EC2api API Server
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api import config
|
from ec2api import config
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api import service
|
from ec2api import service
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
config.parse_args(sys.argv)
|
config.parse_args(sys.argv)
|
||||||
logging.setup('ec2api')
|
logging.setup(CONF, 'ec2api')
|
||||||
|
|
||||||
server = service.WSGIService('ec2api', max_url_len=16384)
|
server = service.WSGIService('ec2api', max_url_len=16384)
|
||||||
service.serve(server)
|
service.serve(server)
|
||||||
|
|
|
@ -18,14 +18,18 @@ EC2api API Metadata Server
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api import config
|
from ec2api import config
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api import service
|
from ec2api import service
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
config.parse_args(sys.argv)
|
config.parse_args(sys.argv)
|
||||||
logging.setup("ec2api")
|
logging.setup(CONF, "ec2api")
|
||||||
|
|
||||||
server = service.WSGIService('metadata')
|
server = service.WSGIService('metadata')
|
||||||
service.serve(server, workers=server.workers)
|
service.serve(server, workers=server.workers)
|
||||||
|
|
|
@ -17,13 +17,15 @@
|
||||||
CLI interface for EC2 API management.
|
CLI interface for EC2 API management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from ec2api import config
|
||||||
from ec2api.db import migration
|
from ec2api.db import migration
|
||||||
from ec2api.openstack.common import log
|
from ec2api.i18n import _
|
||||||
from ec2api import version
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
@ -61,13 +63,20 @@ command_opt = cfg.SubCommandOpt('command',
|
||||||
def main():
|
def main():
|
||||||
CONF.register_cli_opt(command_opt)
|
CONF.register_cli_opt(command_opt)
|
||||||
try:
|
try:
|
||||||
default_config_files = cfg.find_config_files('ec2api')
|
config.parse_args(sys.argv)
|
||||||
CONF(sys.argv[1:], project='ec2api', prog='ec2-api-manage',
|
log.setup(CONF, "ec2api")
|
||||||
version=version.version_info.version_string(),
|
except cfg.ConfigFilesNotFoundError:
|
||||||
default_config_files=default_config_files)
|
cfgfile = CONF.config_file[-1] if CONF.config_file else None
|
||||||
log.setup("ec2api")
|
if cfgfile and not os.access(cfgfile, os.R_OK):
|
||||||
except RuntimeError as e:
|
st = os.stat(cfgfile)
|
||||||
sys.exit("ERROR: %s" % e)
|
print(_("Could not read %s. Re-running with sudo") % cfgfile)
|
||||||
|
try:
|
||||||
|
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
|
||||||
|
except Exception:
|
||||||
|
print(_('sudo failed, continuing as if nothing happened'))
|
||||||
|
|
||||||
|
print(_('Please re-run ec2-api-manage as root.'))
|
||||||
|
return(2)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
CONF.command.func()
|
CONF.command.func()
|
||||||
|
|
|
@ -12,18 +12,38 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db import options
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from ec2api.openstack.common.db import options
|
|
||||||
from ec2api import paths
|
from ec2api import paths
|
||||||
from ec2api import version
|
from ec2api import version
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite')
|
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite')
|
||||||
|
|
||||||
|
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
||||||
|
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
|
||||||
|
'oslo.messaging=INFO', 'iso8601=WARN',
|
||||||
|
'requests.packages.urllib3.connectionpool=WARN',
|
||||||
|
'urllib3.connectionpool=WARN', 'websocket=WARN',
|
||||||
|
'keystonemiddleware=WARN', 'routes.middleware=WARN',
|
||||||
|
'stevedore=WARN', 'glanceclient=WARN']
|
||||||
|
|
||||||
|
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
|
||||||
|
'%(levelname)s %(name)s [%(request_id)s '
|
||||||
|
'%(user_identity)s] %(instance)s'
|
||||||
|
'%(message)s')
|
||||||
|
|
||||||
|
|
||||||
def parse_args(argv, default_config_files=None):
|
def parse_args(argv, default_config_files=None):
|
||||||
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
|
||||||
|
log.register_options(CONF)
|
||||||
|
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
|
||||||
sqlite_db='ec2api.sqlite')
|
sqlite_db='ec2api.sqlite')
|
||||||
|
|
||||||
cfg.CONF(argv[1:],
|
cfg.CONF(argv[1:],
|
||||||
project='ec2api',
|
project='ec2api',
|
||||||
version=version.version_info.version_string(),
|
version=version.version_info.version_string(),
|
||||||
|
|
|
@ -16,13 +16,13 @@
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import local
|
from ec2api.openstack.common import local
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
|
@ -28,10 +28,9 @@ functions from ec2api.db namespace, not the ec2api.db.api namespace.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db import api as db_api
|
||||||
from ec2api.openstack.common.db import api as db_api
|
from oslo_log import log as logging
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
tpool_opts = [
|
tpool_opts = [
|
||||||
|
@ -45,8 +44,6 @@ tpool_opts = [
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(tpool_opts, 'database')
|
CONF.register_opts(tpool_opts, 'database')
|
||||||
CONF.import_opt('backend', 'ec2api.openstack.common.db.options',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.db.sqlalchemy.api'}
|
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.db.sqlalchemy.api'}
|
||||||
|
|
||||||
|
|
|
@ -14,10 +14,10 @@
|
||||||
|
|
||||||
"""Database setup and migration commands."""
|
"""Database setup and migration commands."""
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
|
@ -20,43 +20,37 @@ import json
|
||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db import exception as db_exception
|
||||||
|
from oslo_db.sqlalchemy import session as db_session
|
||||||
from sqlalchemy import and_
|
from sqlalchemy import and_
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.sql import bindparam
|
from sqlalchemy.sql import bindparam
|
||||||
|
|
||||||
import ec2api.context
|
import ec2api.context
|
||||||
from ec2api.db.sqlalchemy import models
|
from ec2api.db.sqlalchemy import models
|
||||||
from ec2api.openstack.common.db import exception as db_exception
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('connection',
|
|
||||||
'ec2api.openstack.common.db.sqlalchemy.session',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
|
|
||||||
_MASTER_FACADE = None
|
_MASTER_FACADE = None
|
||||||
|
|
||||||
|
|
||||||
def _create_facade_lazily(use_slave=False):
|
def _create_facade_lazily():
|
||||||
global _MASTER_FACADE
|
global _MASTER_FACADE
|
||||||
|
|
||||||
if _MASTER_FACADE is None:
|
if _MASTER_FACADE is None:
|
||||||
_MASTER_FACADE = db_session.EngineFacade(
|
_MASTER_FACADE = db_session.EngineFacade.from_config(CONF)
|
||||||
CONF.database.connection,
|
|
||||||
**dict(CONF.database.iteritems())
|
|
||||||
)
|
|
||||||
return _MASTER_FACADE
|
return _MASTER_FACADE
|
||||||
|
|
||||||
|
|
||||||
def get_engine(use_slave=False):
|
def get_engine():
|
||||||
facade = _create_facade_lazily(use_slave)
|
facade = _create_facade_lazily()
|
||||||
return facade.get_engine()
|
return facade.get_engine()
|
||||||
|
|
||||||
|
|
||||||
def get_session(use_slave=False, **kwargs):
|
def get_session(**kwargs):
|
||||||
facade = _create_facade_lazily(use_slave)
|
facade = _create_facade_lazily()
|
||||||
return facade.get_session(**kwargs)
|
return facade.get_session(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ import sqlalchemy
|
||||||
|
|
||||||
from ec2api.db.sqlalchemy import api as db_session
|
from ec2api.db.sqlalchemy import api as db_session
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
|
|
||||||
INIT_VERSION = 0
|
INIT_VERSION = 0
|
||||||
_REPOSITORY = None
|
_REPOSITORY = None
|
||||||
|
|
|
@ -16,12 +16,11 @@
|
||||||
SQLAlchemy models for ec2api data.
|
SQLAlchemy models for ec2api data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from oslo_db.sqlalchemy import models
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
from sqlalchemy import Column, PrimaryKeyConstraint, String, Text
|
from sqlalchemy import Column, PrimaryKeyConstraint, String, Text
|
||||||
from sqlalchemy import UniqueConstraint
|
from sqlalchemy import UniqueConstraint
|
||||||
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import models
|
|
||||||
|
|
||||||
BASE = declarative_base()
|
BASE = declarative_base()
|
||||||
|
|
||||||
ITEMS_OS_ID_INDEX_NAME = 'items_os_id_idx'
|
ITEMS_OS_ID_INDEX_NAME = 'items_os_id_idx'
|
||||||
|
|
|
@ -22,11 +22,11 @@ SHOULD include dedicated exception logging.
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Copyright 2014 IBM Corp.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""oslo.i18n integration module.
|
||||||
|
|
||||||
|
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import oslo_i18n
|
||||||
|
|
||||||
|
DOMAIN = 'ec2-api'
|
||||||
|
|
||||||
|
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||||
|
|
||||||
|
# The primary translation function using the well-known name "_"
|
||||||
|
_ = _translators.primary
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = _translators.log_info
|
||||||
|
_LW = _translators.log_warning
|
||||||
|
_LE = _translators.log_error
|
||||||
|
_LC = _translators.log_critical
|
||||||
|
|
||||||
|
|
||||||
|
def translate(value, user_locale):
|
||||||
|
return oslo_i18n.translate(value, user_locale)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_languages():
|
||||||
|
return oslo_i18n.get_available_languages(DOMAIN)
|
|
@ -19,16 +19,15 @@ import urlparse
|
||||||
|
|
||||||
import httplib2
|
import httplib2
|
||||||
from keystoneclient.v2_0 import client as keystone_client
|
from keystoneclient.v2_0 import client as keystone_client
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
from ec2api import context as ec2context
|
from ec2api import context as ec2context
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
|
from ec2api.i18n import _, _LE, _LW
|
||||||
from ec2api.metadata import api
|
from ec2api.metadata import api
|
||||||
from ec2api.openstack.common import gettextutils as textutils
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api import utils
|
from ec2api import utils
|
||||||
from ec2api import wsgi
|
from ec2api import wsgi
|
||||||
|
|
||||||
|
@ -103,7 +102,7 @@ class MetadataRequestHandler(wsgi.Application):
|
||||||
except exception.EC2MetadataNotFound:
|
except exception.EC2MetadataNotFound:
|
||||||
return webob.exc.HTTPNotFound()
|
return webob.exc.HTTPNotFound()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(textutils._LE("Unexpected error."))
|
LOG.exception(_LE("Unexpected error."))
|
||||||
msg = _('An unknown error has occurred. '
|
msg = _('An unknown error has occurred. '
|
||||||
'Please try your request again.')
|
'Please try your request again.')
|
||||||
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||||
|
@ -138,7 +137,7 @@ class MetadataRequestHandler(wsgi.Application):
|
||||||
req.response.body = content
|
req.response.body = content
|
||||||
return req.response
|
return req.response
|
||||||
elif resp.status == 403:
|
elif resp.status == 403:
|
||||||
LOG.warn(textutils._LW(
|
LOG.warn(_LW(
|
||||||
'The remote metadata server responded with Forbidden. This '
|
'The remote metadata server responded with Forbidden. This '
|
||||||
'response usually occurs when shared secrets do not match.'
|
'response usually occurs when shared secrets do not match.'
|
||||||
))
|
))
|
||||||
|
@ -249,7 +248,7 @@ class MetadataRequestHandler(wsgi.Application):
|
||||||
hashlib.sha256).hexdigest()
|
hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
if not utils.constant_time_compare(expected_signature, signature):
|
if not utils.constant_time_compare(expected_signature, signature):
|
||||||
LOG.warning(textutils._LW(
|
LOG.warning(_LW(
|
||||||
'X-Instance-ID-Signature: %(signature)s does '
|
'X-Instance-ID-Signature: %(signature)s does '
|
||||||
'not match the expected value: '
|
'not match the expected value: '
|
||||||
'%(expected_signature)s for id: '
|
'%(expected_signature)s for id: '
|
||||||
|
|
|
@ -15,14 +15,14 @@
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.api import clients
|
from ec2api.api import clients
|
||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.api import instance as instance_api
|
from ec2api.api import instance as instance_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
|
from ec2api.i18n import _
|
||||||
from ec2api.novadb import api as novadb
|
from ec2api.novadb import api as novadb
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
@ -29,17 +29,14 @@ these objects be simple dictionaries.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db import api as db_api
|
||||||
from ec2api.openstack.common.db import api as db_api
|
from oslo_log import log as logging
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('use_tpool', 'ec2api.db.api',
|
CONF.import_opt('use_tpool', 'ec2api.db.api',
|
||||||
group='database')
|
group='database')
|
||||||
CONF.import_opt('backend', 'ec2api.openstack.common.db.options',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.novadb.sqlalchemy.api'}
|
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.novadb.sqlalchemy.api'}
|
||||||
|
|
||||||
|
@ -80,15 +77,12 @@ MAX_INT = 0x7FFFFFFF
|
||||||
####################
|
####################
|
||||||
|
|
||||||
|
|
||||||
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
|
def instance_get_by_uuid(context, uuid, columns_to_join=None):
|
||||||
"""Get an instance or raise if it does not exist."""
|
"""Get an instance or raise if it does not exist."""
|
||||||
return IMPL.instance_get_by_uuid(context, uuid,
|
return IMPL.instance_get_by_uuid(context, uuid, columns_to_join)
|
||||||
columns_to_join, use_slave=use_slave)
|
|
||||||
|
|
||||||
|
|
||||||
def block_device_mapping_get_all_by_instance(context, instance_uuid,
|
def block_device_mapping_get_all_by_instance(context, instance_uuid):
|
||||||
use_slave=False):
|
|
||||||
"""Get all block device mapping belonging to an instance."""
|
"""Get all block device mapping belonging to an instance."""
|
||||||
return IMPL.block_device_mapping_get_all_by_instance(context,
|
return IMPL.block_device_mapping_get_all_by_instance(context,
|
||||||
instance_uuid,
|
instance_uuid)
|
||||||
use_slave)
|
|
||||||
|
|
|
@ -20,25 +20,21 @@
|
||||||
import functools
|
import functools
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db.sqlalchemy import session as db_session
|
||||||
|
from oslo_log import log as logging
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
|
|
||||||
import ec2api.context
|
import ec2api.context
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
|
from ec2api.i18n import _
|
||||||
from ec2api.novadb.sqlalchemy import models
|
from ec2api.novadb.sqlalchemy import models
|
||||||
from ec2api.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
connection_opts = [
|
connection_opts = [
|
||||||
cfg.StrOpt('connection_nova',
|
cfg.StrOpt('connection_nova',
|
||||||
secret=True,
|
secret=True,
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
'nova database'),
|
'nova database'),
|
||||||
cfg.StrOpt('slave_connection',
|
|
||||||
secret=True,
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'slave database'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
@ -48,37 +44,26 @@ LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
_MASTER_FACADE = None
|
_MASTER_FACADE = None
|
||||||
_SLAVE_FACADE = None
|
|
||||||
|
|
||||||
|
|
||||||
def _create_facade_lazily(use_slave=False):
|
def _create_facade_lazily():
|
||||||
global _MASTER_FACADE
|
global _MASTER_FACADE
|
||||||
global _SLAVE_FACADE
|
|
||||||
|
|
||||||
return_slave = use_slave and CONF.database.slave_connection
|
if _MASTER_FACADE is None:
|
||||||
if not return_slave:
|
_MASTER_FACADE = db_session.EngineFacade(
|
||||||
if _MASTER_FACADE is None:
|
CONF.database.connection_nova,
|
||||||
_MASTER_FACADE = db_session.EngineFacade(
|
**dict(CONF.database.iteritems())
|
||||||
CONF.database.connection_nova,
|
)
|
||||||
**dict(CONF.database.iteritems())
|
return _MASTER_FACADE
|
||||||
)
|
|
||||||
return _MASTER_FACADE
|
|
||||||
else:
|
|
||||||
if _SLAVE_FACADE is None:
|
|
||||||
_SLAVE_FACADE = db_session.EngineFacade(
|
|
||||||
CONF.database.slave_connection,
|
|
||||||
**dict(CONF.database.iteritems())
|
|
||||||
)
|
|
||||||
return _SLAVE_FACADE
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine(use_slave=False):
|
def get_engine():
|
||||||
facade = _create_facade_lazily(use_slave)
|
facade = _create_facade_lazily()
|
||||||
return facade.get_engine()
|
return facade.get_engine()
|
||||||
|
|
||||||
|
|
||||||
def get_session(use_slave=False, **kwargs):
|
def get_session(**kwargs):
|
||||||
facade = _create_facade_lazily(use_slave)
|
facade = _create_facade_lazily()
|
||||||
return facade.get_session(**kwargs)
|
return facade.get_session(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@ -109,7 +94,6 @@ def model_query(context, model, *args, **kwargs):
|
||||||
"""Query helper that accounts for context's `read_deleted` field.
|
"""Query helper that accounts for context's `read_deleted` field.
|
||||||
|
|
||||||
:param context: context to query under
|
:param context: context to query under
|
||||||
:param use_slave: If true, use slave_connection
|
|
||||||
:param session: if present, the session to use
|
:param session: if present, the session to use
|
||||||
:param read_deleted: if present, overrides context's read_deleted field.
|
:param read_deleted: if present, overrides context's read_deleted field.
|
||||||
:param project_only: if present and context is user-type, then restrict
|
:param project_only: if present and context is user-type, then restrict
|
||||||
|
@ -121,11 +105,7 @@ def model_query(context, model, *args, **kwargs):
|
||||||
model parameter.
|
model parameter.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use_slave = kwargs.get('use_slave') or False
|
session = kwargs.get('session') or get_session()
|
||||||
if CONF.database.slave_connection == '':
|
|
||||||
use_slave = False
|
|
||||||
|
|
||||||
session = kwargs.get('session') or get_session(use_slave=use_slave)
|
|
||||||
read_deleted = kwargs.get('read_deleted') or context.read_deleted
|
read_deleted = kwargs.get('read_deleted') or context.read_deleted
|
||||||
project_only = kwargs.get('project_only', False)
|
project_only = kwargs.get('project_only', False)
|
||||||
|
|
||||||
|
@ -167,16 +147,15 @@ def model_query(context, model, *args, **kwargs):
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
|
def instance_get_by_uuid(context, uuid, columns_to_join=None):
|
||||||
return _instance_get_by_uuid(context, uuid,
|
return _instance_get_by_uuid(context, uuid,
|
||||||
columns_to_join=columns_to_join, use_slave=use_slave)
|
columns_to_join=columns_to_join)
|
||||||
|
|
||||||
|
|
||||||
def _instance_get_by_uuid(context, uuid, session=None,
|
def _instance_get_by_uuid(context, uuid, session=None,
|
||||||
columns_to_join=None, use_slave=False):
|
columns_to_join=None):
|
||||||
result = (_build_instance_get(context, session=session,
|
result = (_build_instance_get(context, session=session,
|
||||||
columns_to_join=columns_to_join,
|
columns_to_join=columns_to_join).
|
||||||
use_slave=use_slave).
|
|
||||||
filter_by(uuid=uuid).
|
filter_by(uuid=uuid).
|
||||||
first())
|
first())
|
||||||
|
|
||||||
|
@ -188,28 +167,25 @@ def _instance_get_by_uuid(context, uuid, session=None,
|
||||||
|
|
||||||
|
|
||||||
def _build_instance_get(context, session=None,
|
def _build_instance_get(context, session=None,
|
||||||
columns_to_join=None, use_slave=False):
|
columns_to_join=None):
|
||||||
query = model_query(context, models.Instance, session=session,
|
query = model_query(context, models.Instance, session=session,
|
||||||
project_only=True, use_slave=use_slave,
|
project_only=True, read_deleted="no")
|
||||||
read_deleted="no")
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
def _block_device_mapping_get_query(context, session=None,
|
def _block_device_mapping_get_query(context, session=None,
|
||||||
columns_to_join=None, use_slave=False):
|
columns_to_join=None):
|
||||||
if columns_to_join is None:
|
if columns_to_join is None:
|
||||||
columns_to_join = []
|
columns_to_join = []
|
||||||
|
|
||||||
query = model_query(context, models.BlockDeviceMapping,
|
query = model_query(context, models.BlockDeviceMapping,
|
||||||
session=session, use_slave=use_slave,
|
session=session, read_deleted="no")
|
||||||
read_deleted="no")
|
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def block_device_mapping_get_all_by_instance(context, instance_uuid,
|
def block_device_mapping_get_all_by_instance(context, instance_uuid):
|
||||||
use_slave=False):
|
return (_block_device_mapping_get_query(context).
|
||||||
return (_block_device_mapping_get_query(context, use_slave=use_slave).
|
|
||||||
filter_by(instance_uuid=instance_uuid).
|
filter_by(instance_uuid=instance_uuid).
|
||||||
all())
|
all())
|
||||||
|
|
|
@ -19,7 +19,8 @@
|
||||||
SQLAlchemy models for nova data.
|
SQLAlchemy models for nova data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_db.sqlalchemy import models
|
||||||
from sqlalchemy import Column, Index, Integer, Enum, String
|
from sqlalchemy import Column, Index, Integer, Enum, String
|
||||||
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
|
@ -27,7 +28,6 @@ from sqlalchemy import DateTime, Boolean, Text
|
||||||
from sqlalchemy.orm import object_mapper
|
from sqlalchemy.orm import object_mapper
|
||||||
|
|
||||||
from ec2api.novadb.sqlalchemy import types
|
from ec2api.novadb.sqlalchemy import types
|
||||||
from ec2api.openstack.common.db.sqlalchemy import models
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
BASE = declarative_base()
|
BASE = declarative_base()
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""oslo.i18n integration module.
|
||||||
|
|
||||||
|
See http://docs.openstack.org/developer/oslo.i18n/usage.html
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
import oslo_i18n
|
||||||
|
|
||||||
|
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
|
||||||
|
# application name when this module is synced into the separate
|
||||||
|
# repository. It is OK to have more than one translation function
|
||||||
|
# using the same domain, since there will still only be one message
|
||||||
|
# catalog.
|
||||||
|
_translators = oslo_i18n.TranslatorFactory(domain='ec2api')
|
||||||
|
|
||||||
|
# The primary translation function using the well-known name "_"
|
||||||
|
_ = _translators.primary
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = _translators.log_info
|
||||||
|
_LW = _translators.log_warning
|
||||||
|
_LE = _translators.log_error
|
||||||
|
_LC = _translators.log_critical
|
||||||
|
except ImportError:
|
||||||
|
# NOTE(dims): Support for cases where a project wants to use
|
||||||
|
# code from oslo-incubator, but is not ready to be internationalized
|
||||||
|
# (like tempest)
|
||||||
|
_ = _LI = _LW = _LE = _LC = lambda x: x
|
|
@ -1,126 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Simple class that stores security context information in the web request.
|
|
||||||
|
|
||||||
Projects should subclass this class if they wish to enhance the request
|
|
||||||
context or provide additional information in their specific WSGI pipeline.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
|
|
||||||
def generate_request_id():
|
|
||||||
return b'req-' + str(uuid.uuid4()).encode('ascii')
|
|
||||||
|
|
||||||
|
|
||||||
class RequestContext(object):
|
|
||||||
|
|
||||||
"""Helper class to represent useful information about a request context.
|
|
||||||
|
|
||||||
Stores information about the security context under which the user
|
|
||||||
accesses the system, as well as additional request information.
|
|
||||||
"""
|
|
||||||
|
|
||||||
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
|
||||||
|
|
||||||
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
|
||||||
user_domain=None, project_domain=None, is_admin=False,
|
|
||||||
read_only=False, show_deleted=False, request_id=None,
|
|
||||||
instance_uuid=None):
|
|
||||||
self.auth_token = auth_token
|
|
||||||
self.user = user
|
|
||||||
self.tenant = tenant
|
|
||||||
self.domain = domain
|
|
||||||
self.user_domain = user_domain
|
|
||||||
self.project_domain = project_domain
|
|
||||||
self.is_admin = is_admin
|
|
||||||
self.read_only = read_only
|
|
||||||
self.show_deleted = show_deleted
|
|
||||||
self.instance_uuid = instance_uuid
|
|
||||||
if not request_id:
|
|
||||||
request_id = generate_request_id()
|
|
||||||
self.request_id = request_id
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
user_idt = (
|
|
||||||
self.user_idt_format.format(user=self.user or '-',
|
|
||||||
tenant=self.tenant or '-',
|
|
||||||
domain=self.domain or '-',
|
|
||||||
user_domain=self.user_domain or '-',
|
|
||||||
p_domain=self.project_domain or '-'))
|
|
||||||
|
|
||||||
return {'user': self.user,
|
|
||||||
'tenant': self.tenant,
|
|
||||||
'domain': self.domain,
|
|
||||||
'user_domain': self.user_domain,
|
|
||||||
'project_domain': self.project_domain,
|
|
||||||
'is_admin': self.is_admin,
|
|
||||||
'read_only': self.read_only,
|
|
||||||
'show_deleted': self.show_deleted,
|
|
||||||
'auth_token': self.auth_token,
|
|
||||||
'request_id': self.request_id,
|
|
||||||
'instance_uuid': self.instance_uuid,
|
|
||||||
'user_identity': user_idt}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_dict(cls, ctx):
|
|
||||||
return cls(
|
|
||||||
auth_token=ctx.get("auth_token"),
|
|
||||||
user=ctx.get("user"),
|
|
||||||
tenant=ctx.get("tenant"),
|
|
||||||
domain=ctx.get("domain"),
|
|
||||||
user_domain=ctx.get("user_domain"),
|
|
||||||
project_domain=ctx.get("project_domain"),
|
|
||||||
is_admin=ctx.get("is_admin", False),
|
|
||||||
read_only=ctx.get("read_only", False),
|
|
||||||
show_deleted=ctx.get("show_deleted", False),
|
|
||||||
request_id=ctx.get("request_id"),
|
|
||||||
instance_uuid=ctx.get("instance_uuid"))
|
|
||||||
|
|
||||||
|
|
||||||
def get_admin_context(show_deleted=False):
|
|
||||||
context = RequestContext(None,
|
|
||||||
tenant=None,
|
|
||||||
is_admin=True,
|
|
||||||
show_deleted=show_deleted)
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
def get_context_from_function_and_args(function, args, kwargs):
|
|
||||||
"""Find an arg of type RequestContext and return it.
|
|
||||||
|
|
||||||
This is useful in a couple of decorators where we don't
|
|
||||||
know much about the function we're wrapping.
|
|
||||||
"""
|
|
||||||
|
|
||||||
for arg in itertools.chain(kwargs.values(), args):
|
|
||||||
if isinstance(arg, RequestContext):
|
|
||||||
return arg
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def is_user_context(context):
|
|
||||||
"""Indicates if the request context is a normal user."""
|
|
||||||
if not context:
|
|
||||||
return False
|
|
||||||
if context.is_admin:
|
|
||||||
return False
|
|
||||||
if not context.user_id or not context.project_id:
|
|
||||||
return False
|
|
||||||
return True
|
|
|
@ -1,162 +0,0 @@
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Multiple DB API backend support.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
|
||||||
takes no arguments. The method can return any object that implements DB
|
|
||||||
API methods.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db import exception
|
|
||||||
from ec2api.openstack.common.gettextutils import _LE
|
|
||||||
from ec2api.openstack.common import importutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def safe_for_db_retry(f):
|
|
||||||
"""Enable db-retry for decorated function, if config option enabled."""
|
|
||||||
f.__dict__['enable_retry'] = True
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
class wrap_db_retry(object):
|
|
||||||
"""Retry db.api methods, if DBConnectionError() raised
|
|
||||||
|
|
||||||
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
|
||||||
in config, this decorator will be applied to all db.api functions,
|
|
||||||
marked with @safe_for_db_retry decorator.
|
|
||||||
Decorator catchs DBConnectionError() and retries function in a
|
|
||||||
loop until it succeeds, or until maximum retries count will be reached.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
|
||||||
max_retry_interval):
|
|
||||||
super(wrap_db_retry, self).__init__()
|
|
||||||
|
|
||||||
self.retry_interval = retry_interval
|
|
||||||
self.max_retries = max_retries
|
|
||||||
self.inc_retry_interval = inc_retry_interval
|
|
||||||
self.max_retry_interval = max_retry_interval
|
|
||||||
|
|
||||||
def __call__(self, f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
next_interval = self.retry_interval
|
|
||||||
remaining = self.max_retries
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except exception.DBConnectionError as e:
|
|
||||||
if remaining == 0:
|
|
||||||
LOG.exception(_LE('DB exceeded retry limit.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
if remaining != -1:
|
|
||||||
remaining -= 1
|
|
||||||
LOG.exception(_LE('DB connection error.'))
|
|
||||||
# NOTE(vsergeyev): We are using patched time module, so
|
|
||||||
# this effectively yields the execution
|
|
||||||
# context to another green thread.
|
|
||||||
time.sleep(next_interval)
|
|
||||||
if self.inc_retry_interval:
|
|
||||||
next_interval = min(
|
|
||||||
next_interval * 2,
|
|
||||||
self.max_retry_interval
|
|
||||||
)
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
|
||||||
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
|
||||||
**kwargs):
|
|
||||||
"""Initialize the chosen DB API backend.
|
|
||||||
|
|
||||||
:param backend_name: name of the backend to load
|
|
||||||
:type backend_name: str
|
|
||||||
|
|
||||||
:param backend_mapping: backend name -> module/class to load mapping
|
|
||||||
:type backend_mapping: dict
|
|
||||||
|
|
||||||
:param lazy: load the DB backend lazily on the first DB API method call
|
|
||||||
:type lazy: bool
|
|
||||||
|
|
||||||
Keyword arguments:
|
|
||||||
|
|
||||||
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
|
||||||
:type use_db_reconnect: bool
|
|
||||||
|
|
||||||
:keyword retry_interval: seconds between transaction retries
|
|
||||||
:type retry_interval: int
|
|
||||||
|
|
||||||
:keyword inc_retry_interval: increase retry interval or not
|
|
||||||
:type inc_retry_interval: bool
|
|
||||||
|
|
||||||
:keyword max_retry_interval: max interval value between retries
|
|
||||||
:type max_retry_interval: int
|
|
||||||
|
|
||||||
:keyword max_retries: max number of retries before an error is raised
|
|
||||||
:type max_retries: int
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self._backend = None
|
|
||||||
self._backend_name = backend_name
|
|
||||||
self._backend_mapping = backend_mapping or {}
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
if not lazy:
|
|
||||||
self._load_backend()
|
|
||||||
|
|
||||||
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
|
||||||
self.retry_interval = kwargs.get('retry_interval', 1)
|
|
||||||
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
|
||||||
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
|
||||||
self.max_retries = kwargs.get('max_retries', 20)
|
|
||||||
|
|
||||||
def _load_backend(self):
|
|
||||||
with self._lock:
|
|
||||||
if not self._backend:
|
|
||||||
# Import the untranslated name if we don't have a mapping
|
|
||||||
backend_path = self._backend_mapping.get(self._backend_name,
|
|
||||||
self._backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
|
||||||
self._backend = backend_mod.get_backend()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
if not self._backend:
|
|
||||||
self._load_backend()
|
|
||||||
|
|
||||||
attr = getattr(self._backend, key)
|
|
||||||
if not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
|
||||||
# DB API methods, decorated with @safe_for_db_retry
|
|
||||||
# on disconnect.
|
|
||||||
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
|
||||||
attr = wrap_db_retry(
|
|
||||||
retry_interval=self.retry_interval,
|
|
||||||
max_retries=self.max_retries,
|
|
||||||
inc_retry_interval=self.inc_retry_interval,
|
|
||||||
max_retry_interval=self.max_retry_interval)(attr)
|
|
||||||
|
|
||||||
return attr
|
|
|
@ -1,56 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""DB related custom exceptions."""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
self.inner_exception = inner_exception
|
|
||||||
super(DBError, self).__init__(six.text_type(inner_exception))
|
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, columns=[], inner_exception=None):
|
|
||||||
self.columns = columns
|
|
||||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBDeadlock(DBError):
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
super(DBDeadlock, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBInvalidUnicodeParameter(Exception):
|
|
||||||
message = _("Invalid Parameter: "
|
|
||||||
"Unicode is not supported by the current database.")
|
|
||||||
|
|
||||||
|
|
||||||
class DbMigrationError(DBError):
|
|
||||||
"""Wraps migration specific exception."""
|
|
||||||
def __init__(self, message=None):
|
|
||||||
super(DbMigrationError, self).__init__(message)
|
|
||||||
|
|
||||||
|
|
||||||
class DBConnectionError(DBError):
|
|
||||||
"""Wraps connection specific exception."""
|
|
||||||
pass
|
|
|
@ -1,171 +0,0 @@
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
database_opts = [
|
|
||||||
cfg.StrOpt('sqlite_db',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default='ec2api.sqlite',
|
|
||||||
help='The file name to use with SQLite'),
|
|
||||||
cfg.BoolOpt('sqlite_synchronous',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default=True,
|
|
||||||
help='If True, SQLite uses synchronous mode'),
|
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
cfg.StrOpt('connection',
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'database',
|
|
||||||
secret=True,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('connection',
|
|
||||||
group='sql'), ]),
|
|
||||||
cfg.StrOpt('mysql_sql_mode',
|
|
||||||
default='TRADITIONAL',
|
|
||||||
help='The SQL mode to be used for MySQL sessions. '
|
|
||||||
'This option, including the default, overrides any '
|
|
||||||
'server-set SQL mode. To use whatever SQL mode '
|
|
||||||
'is set by the server configuration, '
|
|
||||||
'set this to no value. Example: mysql_sql_mode='),
|
|
||||||
cfg.IntOpt('idle_timeout',
|
|
||||||
default=3600,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('idle_timeout',
|
|
||||||
group='sql')],
|
|
||||||
help='Timeout before idle sql connections are reaped'),
|
|
||||||
cfg.IntOpt('min_pool_size',
|
|
||||||
default=1,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Minimum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_pool_size',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_retries',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum db connection retries during startup. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
cfg.IntOpt('retry_interval',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('reconnect_interval',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Interval between retries of opening a sql connection'),
|
|
||||||
cfg.IntOpt('max_overflow',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
|
||||||
cfg.IntOpt('connection_debug',
|
|
||||||
default=0,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Verbosity of SQL debugging information. 0=None, '
|
|
||||||
'100=Everything'),
|
|
||||||
cfg.BoolOpt('connection_trace',
|
|
||||||
default=False,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
|
||||||
cfg.IntOpt('pool_timeout',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
|
||||||
cfg.BoolOpt('use_db_reconnect',
|
|
||||||
default=False,
|
|
||||||
help='Enable the experimental use of database reconnect '
|
|
||||||
'on connection lost'),
|
|
||||||
cfg.IntOpt('db_retry_interval',
|
|
||||||
default=1,
|
|
||||||
help='seconds between db connection retries'),
|
|
||||||
cfg.BoolOpt('db_inc_retry_interval',
|
|
||||||
default=True,
|
|
||||||
help='Whether to increase interval between db connection '
|
|
||||||
'retries, up to db_max_retry_interval'),
|
|
||||||
cfg.IntOpt('db_max_retry_interval',
|
|
||||||
default=10,
|
|
||||||
help='max seconds between db connection retries, if '
|
|
||||||
'db_inc_retry_interval is enabled'),
|
|
||||||
cfg.IntOpt('db_max_retries',
|
|
||||||
default=20,
|
|
||||||
help='maximum db connection retries before error is raised. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(database_opts, 'database')
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
|
||||||
max_overflow=None, pool_timeout=None):
|
|
||||||
"""Set defaults for configuration variables."""
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
connection=sql_connection,
|
|
||||||
sqlite_db=sqlite_db)
|
|
||||||
# Update the QueuePool defaults
|
|
||||||
if max_pool_size is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_pool_size=max_pool_size)
|
|
||||||
if max_overflow is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_overflow=max_overflow)
|
|
||||||
if pool_timeout is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
pool_timeout=pool_timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
"""Returns a list of oslo.config options available in the library.
|
|
||||||
|
|
||||||
The returned list includes all oslo.config options which may be registered
|
|
||||||
at runtime by the library.
|
|
||||||
|
|
||||||
Each element of the list is a tuple. The first element is the name of the
|
|
||||||
group under which the list of elements in the second element will be
|
|
||||||
registered. A group name of None corresponds to the [DEFAULT] group in
|
|
||||||
config files.
|
|
||||||
|
|
||||||
The purpose of this is to allow tools like the Oslo sample config file
|
|
||||||
generator to discover the options exposed to users by this library.
|
|
||||||
|
|
||||||
:returns: a list of (group_name, opts) tuples
|
|
||||||
"""
|
|
||||||
return [('database', copy.deepcopy(database_opts))]
|
|
|
@ -1,278 +0,0 @@
|
||||||
# coding: utf-8
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
|
||||||
# the following license:
|
|
||||||
#
|
|
||||||
# The MIT License
|
|
||||||
#
|
|
||||||
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
# The above copyright notice and this permission notice shall be included in
|
|
||||||
# all copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
# THE SOFTWARE.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from migrate.changeset import ansisql
|
|
||||||
from migrate.changeset.databases import sqlite
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
import sqlalchemy
|
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db import exception
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
def _get_unique_constraints(self, table):
|
|
||||||
"""Retrieve information about existing unique constraints of the table
|
|
||||||
|
|
||||||
This feature is needed for _recreate_table() to work properly.
|
|
||||||
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
data = table.metadata.bind.execute(
|
|
||||||
"""SELECT sql
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE
|
|
||||||
type='table' AND
|
|
||||||
name=:table_name""",
|
|
||||||
table_name=table.name
|
|
||||||
).fetchone()[0]
|
|
||||||
|
|
||||||
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
|
||||||
return [
|
|
||||||
UniqueConstraint(
|
|
||||||
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
|
||||||
name=name
|
|
||||||
)
|
|
||||||
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
|
||||||
"""Recreate the table properly
|
|
||||||
|
|
||||||
Unlike the corresponding original method of sqlalchemy-migrate this one
|
|
||||||
doesn't drop existing unique constraints when creating a new one.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
table_name = self.preparer.format_table(table)
|
|
||||||
|
|
||||||
# we remove all indexes so as not to have
|
|
||||||
# problems during copy and re-create
|
|
||||||
for index in table.indexes:
|
|
||||||
index.drop()
|
|
||||||
|
|
||||||
# reflect existing unique constraints
|
|
||||||
for uc in self._get_unique_constraints(table):
|
|
||||||
table.append_constraint(uc)
|
|
||||||
# omit given unique constraints when creating a new table if required
|
|
||||||
table.constraints = set([
|
|
||||||
cons for cons in table.constraints
|
|
||||||
if omit_uniques is None or cons.name not in omit_uniques
|
|
||||||
])
|
|
||||||
|
|
||||||
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
insertion_string = self._modify_table(table, column, delta)
|
|
||||||
|
|
||||||
table.create(bind=self.connection)
|
|
||||||
self.append(insertion_string % {'table_name': table_name})
|
|
||||||
self.execute()
|
|
||||||
self.append('DROP TABLE migration_tmp')
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
|
|
||||||
def _visit_migrate_unique_constraint(self, *p, **k):
|
|
||||||
"""Drop the given unique constraint
|
|
||||||
|
|
||||||
The corresponding original method of sqlalchemy-migrate just
|
|
||||||
raises NotImplemented error
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
|
||||||
|
|
||||||
|
|
||||||
def patch_migrate():
|
|
||||||
"""A workaround for SQLite's inability to alter things
|
|
||||||
|
|
||||||
SQLite abilities to alter tables are very limited (please read
|
|
||||||
http://www.sqlite.org/lang_altertable.html for more details).
|
|
||||||
E. g. one can't drop a column or a constraint in SQLite. The
|
|
||||||
workaround for this is to recreate the original table omitting
|
|
||||||
the corresponding constraint (or column).
|
|
||||||
|
|
||||||
sqlalchemy-migrate library has recreate_table() method that
|
|
||||||
implements this workaround, but it does it wrong:
|
|
||||||
|
|
||||||
- information about unique constraints of a table
|
|
||||||
is not retrieved. So if you have a table with one
|
|
||||||
unique constraint and a migration adding another one
|
|
||||||
you will end up with a table that has only the
|
|
||||||
latter unique constraint, and the former will be lost
|
|
||||||
|
|
||||||
- dropping of unique constraints is not supported at all
|
|
||||||
|
|
||||||
The proper way to fix this is to provide a pull-request to
|
|
||||||
sqlalchemy-migrate, but the project seems to be dead. So we
|
|
||||||
can go on with monkey-patching of the lib at least for now.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# this patch is needed to ensure that recreate_table() doesn't drop
|
|
||||||
# existing unique constraints of the table when creating a new one
|
|
||||||
helper_cls = sqlite.SQLiteHelper
|
|
||||||
helper_cls.recreate_table = _recreate_table
|
|
||||||
helper_cls._get_unique_constraints = _get_unique_constraints
|
|
||||||
|
|
||||||
# this patch is needed to be able to drop existing unique constraints
|
|
||||||
constraint_cls = sqlite.SQLiteConstraintDropper
|
|
||||||
constraint_cls.visit_migrate_unique_constraint = \
|
|
||||||
_visit_migrate_unique_constraint
|
|
||||||
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
|
||||||
sqlite.SQLiteConstraintGenerator)
|
|
||||||
|
|
||||||
|
|
||||||
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
|
|
||||||
"""Upgrade or downgrade a database.
|
|
||||||
|
|
||||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository.
|
|
||||||
:param version: Database will upgrade/downgrade until this version.
|
|
||||||
If None - database will update to the latest
|
|
||||||
available version.
|
|
||||||
:param init_version: Initial database version
|
|
||||||
:param sanity_check: Require schema sanity checking for all tables
|
|
||||||
"""
|
|
||||||
|
|
||||||
if version is not None:
|
|
||||||
try:
|
|
||||||
version = int(version)
|
|
||||||
except ValueError:
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_("version should be an integer"))
|
|
||||||
|
|
||||||
current_version = db_version(engine, abs_path, init_version)
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
if sanity_check:
|
|
||||||
_db_schema_sanity_check(engine)
|
|
||||||
if version is None or version > current_version:
|
|
||||||
return versioning_api.upgrade(engine, repository, version)
|
|
||||||
else:
|
|
||||||
return versioning_api.downgrade(engine, repository,
|
|
||||||
version)
|
|
||||||
|
|
||||||
|
|
||||||
def _db_schema_sanity_check(engine):
|
|
||||||
"""Ensure all database tables were created with required parameters.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
if engine.name == 'mysql':
|
|
||||||
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
|
||||||
'from information_schema.TABLES '
|
|
||||||
'where TABLE_SCHEMA=%s and '
|
|
||||||
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
|
||||||
|
|
||||||
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
|
|
||||||
# versioning tables from the tables we need to verify utf8 status on.
|
|
||||||
# Non-standard table names are not supported.
|
|
||||||
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
|
|
||||||
|
|
||||||
table_names = [res[0] for res in
|
|
||||||
engine.execute(onlyutf8_sql, engine.url.database) if
|
|
||||||
res[0].lower() not in EXCLUDED_TABLES]
|
|
||||||
|
|
||||||
if len(table_names) > 0:
|
|
||||||
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
|
||||||
'please make sure all tables are CHARSET=utf8'
|
|
||||||
) % ','.join(table_names))
|
|
||||||
|
|
||||||
|
|
||||||
def db_version(engine, abs_path, init_version):
|
|
||||||
"""Show the current version of the repository.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
try:
|
|
||||||
return versioning_api.db_version(engine, repository)
|
|
||||||
except versioning_exceptions.DatabaseNotControlledError:
|
|
||||||
meta = sqlalchemy.MetaData()
|
|
||||||
meta.reflect(bind=engine)
|
|
||||||
tables = meta.tables
|
|
||||||
if len(tables) == 0 or 'alembic_version' in tables:
|
|
||||||
db_version_control(engine, abs_path, version=init_version)
|
|
||||||
return versioning_api.db_version(engine, repository)
|
|
||||||
else:
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_(
|
|
||||||
"The database is not under version control, but has "
|
|
||||||
"tables. Please stamp the current version of the schema "
|
|
||||||
"manually."))
|
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(engine, abs_path, version=None):
|
|
||||||
"""Mark a database as under this repository's version control.
|
|
||||||
|
|
||||||
Once a database is under version control, schema changes should
|
|
||||||
only be done via change scripts in this repository.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
versioning_api.version_control(engine, repository, version)
|
|
||||||
return version
|
|
||||||
|
|
||||||
|
|
||||||
def _find_migrate_repo(abs_path):
|
|
||||||
"""Get the project's change script repository
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
"""
|
|
||||||
if not os.path.exists(abs_path):
|
|
||||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
|
||||||
return Repository(abs_path)
|
|
|
@ -1,119 +0,0 @@
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
SQLAlchemy models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer
|
|
||||||
from sqlalchemy import DateTime
|
|
||||||
from sqlalchemy.orm import object_mapper
|
|
||||||
|
|
||||||
from ec2api.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(six.Iterator):
|
|
||||||
"""Base class for models."""
|
|
||||||
__table_initialized__ = False
|
|
||||||
|
|
||||||
def save(self, session):
|
|
||||||
"""Save this object."""
|
|
||||||
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
|
||||||
# session.add(self)
|
|
||||||
# session.flush()
|
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
|
||||||
# raises NoneType exception if there is no running
|
|
||||||
# transaction and rollback is called. As long as
|
|
||||||
# sqlalchemy has this bug we have to create transaction
|
|
||||||
# explicitly.
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
session.add(self)
|
|
||||||
session.flush()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return getattr(self, key)
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
return getattr(self, key, default)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _extra_keys(self):
|
|
||||||
"""Specifies custom fields
|
|
||||||
|
|
||||||
Subclasses can override this property to return a list
|
|
||||||
of custom fields that should be included in their dict
|
|
||||||
representation.
|
|
||||||
|
|
||||||
For reference check tests/db/sqlalchemy/test_models.py
|
|
||||||
"""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
columns = list(dict(object_mapper(self).columns).keys())
|
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
|
||||||
# property for an Instance.
|
|
||||||
columns.extend(self._extra_keys)
|
|
||||||
self._i = iter(columns)
|
|
||||||
return self
|
|
||||||
|
|
||||||
# In Python 3, __next__() has replaced next().
|
|
||||||
def __next__(self):
|
|
||||||
n = six.advance_iterator(self._i)
|
|
||||||
return n, getattr(self, n)
|
|
||||||
|
|
||||||
def next(self):
|
|
||||||
return self.__next__()
|
|
||||||
|
|
||||||
def update(self, values):
|
|
||||||
"""Make the model object behave like a dict."""
|
|
||||||
for k, v in six.iteritems(values):
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def iteritems(self):
|
|
||||||
"""Make the model object behave like a dict.
|
|
||||||
|
|
||||||
Includes attributes from joins.
|
|
||||||
"""
|
|
||||||
local = dict(self)
|
|
||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
|
||||||
if not k[0] == '_'])
|
|
||||||
local.update(joined)
|
|
||||||
return six.iteritems(local)
|
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
|
||||||
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
|
||||||
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
|
||||||
|
|
||||||
|
|
||||||
class SoftDeleteMixin(object):
|
|
||||||
deleted_at = Column(DateTime)
|
|
||||||
deleted = Column(Integer, default=0)
|
|
||||||
|
|
||||||
def soft_delete(self, session):
|
|
||||||
"""Mark this object as deleted."""
|
|
||||||
self.deleted = self.id
|
|
||||||
self.deleted_at = timeutils.utcnow()
|
|
||||||
self.save(session=session)
|
|
|
@ -1,157 +0,0 @@
|
||||||
# Copyright 2013 Mirantis.inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Provision test environment for specific DB backends"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
|
|
||||||
from six import moves
|
|
||||||
import sqlalchemy
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db import exception as exc
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine(uri):
|
|
||||||
"""Engine creation
|
|
||||||
|
|
||||||
Call the function without arguments to get admin connection. Admin
|
|
||||||
connection required to create temporary user and database for each
|
|
||||||
particular test. Otherwise use existing connection to recreate connection
|
|
||||||
to the temporary database.
|
|
||||||
"""
|
|
||||||
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
|
||||||
|
|
||||||
|
|
||||||
def _execute_sql(engine, sql, driver):
|
|
||||||
"""Initialize connection, execute sql query and close it."""
|
|
||||||
try:
|
|
||||||
with engine.connect() as conn:
|
|
||||||
if driver == 'postgresql':
|
|
||||||
conn.connection.set_isolation_level(0)
|
|
||||||
for s in sql:
|
|
||||||
conn.execute(s)
|
|
||||||
except sqlalchemy.exc.OperationalError:
|
|
||||||
msg = ('%s does not match database admin '
|
|
||||||
'credentials or database does not exist.')
|
|
||||||
LOG.exception(msg % engine.url)
|
|
||||||
raise exc.DBConnectionError(msg % engine.url)
|
|
||||||
|
|
||||||
|
|
||||||
def create_database(engine):
|
|
||||||
"""Provide temporary user and database for each particular test."""
|
|
||||||
driver = engine.name
|
|
||||||
|
|
||||||
auth = {
|
|
||||||
'database': ''.join(random.choice(string.ascii_lowercase)
|
|
||||||
for i in moves.range(10)),
|
|
||||||
'user': engine.url.username,
|
|
||||||
'passwd': engine.url.password,
|
|
||||||
}
|
|
||||||
|
|
||||||
sqls = [
|
|
||||||
"drop database if exists %(database)s;",
|
|
||||||
"create database %(database)s;"
|
|
||||||
]
|
|
||||||
|
|
||||||
if driver == 'sqlite':
|
|
||||||
return 'sqlite:////tmp/%s' % auth['database']
|
|
||||||
elif driver in ['mysql', 'postgresql']:
|
|
||||||
sql_query = map(lambda x: x % auth, sqls)
|
|
||||||
_execute_sql(engine, sql_query, driver)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
|
||||||
|
|
||||||
params = auth.copy()
|
|
||||||
params['backend'] = driver
|
|
||||||
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
|
||||||
|
|
||||||
|
|
||||||
def drop_database(admin_engine, current_uri):
|
|
||||||
"""Drop temporary database and user after each particular test."""
|
|
||||||
|
|
||||||
engine = get_engine(current_uri)
|
|
||||||
driver = engine.name
|
|
||||||
auth = {'database': engine.url.database, 'user': engine.url.username}
|
|
||||||
|
|
||||||
if driver == 'sqlite':
|
|
||||||
try:
|
|
||||||
os.remove(auth['database'])
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
elif driver in ['mysql', 'postgresql']:
|
|
||||||
sql = "drop database if exists %(database)s;"
|
|
||||||
_execute_sql(admin_engine, [sql % auth], driver)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Controller to handle commands
|
|
||||||
|
|
||||||
::create: Create test user and database with random names.
|
|
||||||
::drop: Drop user and database created by previous command.
|
|
||||||
"""
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description='Controller to handle database creation and dropping'
|
|
||||||
' commands.',
|
|
||||||
epilog='Under normal circumstances is not used directly.'
|
|
||||||
' Used in .testr.conf to automate test database creation'
|
|
||||||
' and dropping processes.')
|
|
||||||
subparsers = parser.add_subparsers(
|
|
||||||
help='Subcommands to manipulate temporary test databases.')
|
|
||||||
|
|
||||||
create = subparsers.add_parser(
|
|
||||||
'create',
|
|
||||||
help='Create temporary test '
|
|
||||||
'databases and users.')
|
|
||||||
create.set_defaults(which='create')
|
|
||||||
create.add_argument(
|
|
||||||
'instances_count',
|
|
||||||
type=int,
|
|
||||||
help='Number of databases to create.')
|
|
||||||
|
|
||||||
drop = subparsers.add_parser(
|
|
||||||
'drop',
|
|
||||||
help='Drop temporary test databases and users.')
|
|
||||||
drop.set_defaults(which='drop')
|
|
||||||
drop.add_argument(
|
|
||||||
'instances',
|
|
||||||
nargs='+',
|
|
||||||
help='List of databases uri to be dropped.')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
|
|
||||||
'sqlite://')
|
|
||||||
engine = get_engine(connection_string)
|
|
||||||
which = args.which
|
|
||||||
|
|
||||||
if which == "create":
|
|
||||||
for i in range(int(args.instances_count)):
|
|
||||||
print(create_database(engine))
|
|
||||||
elif which == "drop":
|
|
||||||
for db in args.instances:
|
|
||||||
drop_database(engine, db)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,905 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Session Handling for SQLAlchemy backend.
|
|
||||||
|
|
||||||
Recommended ways to use sessions within this framework:
|
|
||||||
|
|
||||||
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
|
|
||||||
`model_query()` will implicitly use a session when called without one
|
|
||||||
supplied. This is the ideal situation because it will allow queries
|
|
||||||
to be automatically retried if the database connection is interrupted.
|
|
||||||
|
|
||||||
.. note:: Automatic retry will be enabled in a future patch.
|
|
||||||
|
|
||||||
It is generally fine to issue several queries in a row like this. Even though
|
|
||||||
they may be run in separate transactions and/or separate sessions, each one
|
|
||||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
|
||||||
functionality should be handled at a logical level. For an example, look at
|
|
||||||
the code around quotas and `reservation_rollback()`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def get_foo(context, foo):
|
|
||||||
return (model_query(context, models.Foo).
|
|
||||||
filter_by(foo=foo).
|
|
||||||
first())
|
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
|
||||||
(model_query(context, models.Foo).
|
|
||||||
filter_by(id=id).
|
|
||||||
update({'foo': newfoo}))
|
|
||||||
|
|
||||||
def create_foo(context, values):
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(values)
|
|
||||||
foo_ref.save()
|
|
||||||
return foo_ref
|
|
||||||
|
|
||||||
|
|
||||||
* Within the scope of a single method, keep all the reads and writes within
|
|
||||||
the context managed by a single session. In this way, the session's
|
|
||||||
`__exit__` handler will take care of calling `flush()` and `commit()` for
|
|
||||||
you. If using this approach, you should not explicitly call `flush()` or
|
|
||||||
`commit()`. Any error within the context of the session will cause the
|
|
||||||
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
|
|
||||||
raised in `session`'s `__exit__` handler, and any try/except within the
|
|
||||||
context managed by `session` will not be triggered. And catching other
|
|
||||||
non-database errors in the session will not trigger the ROLLBACK, so
|
|
||||||
exception handlers should always be outside the session, unless the
|
|
||||||
developer wants to do a partial commit on purpose. If the connection is
|
|
||||||
dropped before this is possible, the database will implicitly roll back the
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
.. note:: Statements in the session scope will not be automatically retried.
|
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
|
||||||
do not need to call `model.save()`:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
for foo in foos:
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(foo)
|
|
||||||
session.add(foo_ref)
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
foo_ref = (model_query(context, models.Foo, session).
|
|
||||||
filter_by(id=foo_id).
|
|
||||||
first())
|
|
||||||
(model_query(context, models.Bar, session).
|
|
||||||
filter_by(id=foo_ref['bar_id']).
|
|
||||||
update({'bar': newbar}))
|
|
||||||
|
|
||||||
.. note:: `update_bar` is a trivially simple example of using
|
|
||||||
``with session.begin``. Whereas `create_many_foo` is a good example of
|
|
||||||
when a transaction is needed, it is always best to use as few queries as
|
|
||||||
possible.
|
|
||||||
|
|
||||||
The two queries in `update_bar` can be better expressed using a single query
|
|
||||||
which avoids the need for an explicit transaction. It can be expressed like
|
|
||||||
so:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
subq = (model_query(context, models.Foo.id).
|
|
||||||
filter_by(id=foo_id).
|
|
||||||
limit(1).
|
|
||||||
subquery())
|
|
||||||
(model_query(context, models.Bar).
|
|
||||||
filter_by(id=subq.as_scalar()).
|
|
||||||
update({'bar': newbar}))
|
|
||||||
|
|
||||||
For reference, this emits approximately the following SQL statement:
|
|
||||||
|
|
||||||
.. code-block:: sql
|
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
|
||||||
|
|
||||||
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
|
|
||||||
exception while using ``with session.begin``. Here create two duplicate
|
|
||||||
instances with same primary key, must catch the exception out of context
|
|
||||||
managed by a single session:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def create_duplicate_foo(context):
|
|
||||||
foo1 = models.Foo()
|
|
||||||
foo2 = models.Foo()
|
|
||||||
foo1.id = foo2.id = 1
|
|
||||||
session = sessionmaker()
|
|
||||||
try:
|
|
||||||
with session.begin():
|
|
||||||
session.add(foo1)
|
|
||||||
session.add(foo2)
|
|
||||||
except exception.DBDuplicateEntry as e:
|
|
||||||
handle_error(e)
|
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
|
||||||
SQLAlchemy will throw an error when you call `session.begin()` on an existing
|
|
||||||
transaction. Public methods should not accept a session parameter and should
|
|
||||||
not be involved in sessions within the caller's scope.
|
|
||||||
|
|
||||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
|
||||||
due to nesting transactions, and it is not possible to implicitly retry
|
|
||||||
failed database operations when using this approach.
|
|
||||||
|
|
||||||
This also makes code somewhat more difficult to read and debug, because a
|
|
||||||
single database transaction spans more than one method. Error handling
|
|
||||||
becomes less clear in this situation. When this is needed for code clarity,
|
|
||||||
it should be clearly documented.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def myfunc(foo):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
# do some database things
|
|
||||||
bar = _private_func(foo, session)
|
|
||||||
return bar
|
|
||||||
|
|
||||||
def _private_func(foo, session=None):
|
|
||||||
if not session:
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin(subtransaction=True):
|
|
||||||
# do some other database things
|
|
||||||
return bar
|
|
||||||
|
|
||||||
|
|
||||||
There are some things which it is best to avoid:
|
|
||||||
|
|
||||||
* Don't keep a transaction open any longer than necessary.
|
|
||||||
|
|
||||||
This means that your ``with session.begin()`` block should be as short
|
|
||||||
as possible, while still containing all the related calls for that
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
* Avoid ``with_lockmode('UPDATE')`` when possible.
|
|
||||||
|
|
||||||
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
|
|
||||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
|
||||||
"gap" where no rows exist, and prevents any other writes to that space.
|
|
||||||
This can effectively prevent any INSERT into a table by locking the gap
|
|
||||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
|
||||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
|
||||||
|
|
||||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
|
||||||
number of rows matching a query, and if only one row is returned,
|
|
||||||
then issue the SELECT FOR UPDATE.
|
|
||||||
|
|
||||||
The better long-term solution is to use
|
|
||||||
``INSERT .. ON DUPLICATE KEY UPDATE``.
|
|
||||||
However, this can not be done until the "deleted" columns are removed and
|
|
||||||
proper UNIQUE constraints are added to the tables.
|
|
||||||
|
|
||||||
|
|
||||||
Enabling soft deletes:
|
|
||||||
|
|
||||||
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
|
|
||||||
to your model class. For example:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
Efficient use of soft deletes:
|
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
|
||||||
`model.soft_delete()` and `query.soft_delete()`.
|
|
||||||
|
|
||||||
The `model.soft_delete()` method works with a single already-fetched entry.
|
|
||||||
`query.soft_delete()` makes only one db request for all entries that
|
|
||||||
correspond to the query.
|
|
||||||
|
|
||||||
* In almost all cases you should use `query.soft_delete()`. Some examples:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def soft_delete_bar():
|
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
|
||||||
if session is None:
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
count = (model_query(BarModel).
|
|
||||||
find(some_condition).
|
|
||||||
soft_delete(synchronize_session=True))
|
|
||||||
# Here synchronize_session is required, because we
|
|
||||||
# don't know what is going on in outer session.
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
* There is only one situation where `model.soft_delete()` is appropriate: when
|
|
||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
|
||||||
# Work with bar_ref
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
|
||||||
then soft delete them you should use the `query.soft_delete()` method:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
query = (model_query(BarModel, session=session).
|
|
||||||
find(some_condition))
|
|
||||||
model_refs = query.all()
|
|
||||||
# Work with model_refs
|
|
||||||
query.soft_delete(synchronize_session=False)
|
|
||||||
# synchronize_session=False should be set if there is no outer
|
|
||||||
# session and these entries are not used after this.
|
|
||||||
|
|
||||||
When working with many rows, it is very important to use query.soft_delete,
|
|
||||||
which issues a single query. Using `model.soft_delete()`, as in the following
|
|
||||||
example, is very inefficient.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
# This will produce count(bar_refs) db requests.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
import six
|
|
||||||
from sqlalchemy import exc as sqla_exc
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
|
||||||
import sqlalchemy.orm
|
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db import exception
|
|
||||||
from ec2api.openstack.common.gettextutils import _LE, _LW
|
|
||||||
from ec2api.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
|
||||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
|
||||||
|
|
||||||
The foreign key constraints are disabled by default in SQLite,
|
|
||||||
so the foreign key constraints will be enabled here for every
|
|
||||||
database connection
|
|
||||||
"""
|
|
||||||
def connect(self, dbapi_con, con_record):
|
|
||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
|
||||||
|
|
||||||
|
|
||||||
# note(boris-42): In current versions of DB backends unique constraint
|
|
||||||
# violation messages follow the structure:
|
|
||||||
#
|
|
||||||
# sqlite:
|
|
||||||
# 1 column - (IntegrityError) column c1 is not unique
|
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
|
||||||
#
|
|
||||||
# sqlite since 3.7.16:
|
|
||||||
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
|
|
||||||
#
|
|
||||||
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
|
|
||||||
#
|
|
||||||
# postgres:
|
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "users_c1_key"
|
|
||||||
# N columns - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "name_of_our_constraint"
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
|
||||||
# 'c1'")
|
|
||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
|
||||||
# with -' for key 'name_of_our_constraint'")
|
|
||||||
#
|
|
||||||
# ibm_db_sa:
|
|
||||||
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
|
|
||||||
# statement, UPDATE statement, or foreign key update caused by a
|
|
||||||
# DELETE statement are not valid because the primary key, unique
|
|
||||||
# constraint or unique index identified by "2" constrains table
|
|
||||||
# "NOVA.KEY_PAIRS" from having duplicate values for the index
|
|
||||||
# key.
|
|
||||||
_DUP_KEY_RE_DB = {
|
|
||||||
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
|
||||||
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
|
||||||
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
|
||||||
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
|
||||||
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|
||||||
"""Raise exception if two entries are duplicated.
|
|
||||||
|
|
||||||
In this function will be raised DBDuplicateEntry exception if integrity
|
|
||||||
error wrap unique constraint violation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_columns_from_uniq_cons_or_name(columns):
|
|
||||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
|
||||||
# where `t` it is table name and columns `c1`, `c2`
|
|
||||||
# are in UniqueConstraint.
|
|
||||||
uniqbase = "uniq_"
|
|
||||||
if not columns.startswith(uniqbase):
|
|
||||||
if engine_name == "postgresql":
|
|
||||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
|
||||||
return [columns]
|
|
||||||
return columns[len(uniqbase):].split("0")[1:]
|
|
||||||
|
|
||||||
if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
|
|
||||||
return
|
|
||||||
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
|
||||||
match = pattern.match(integrity_error.message)
|
|
||||||
if match:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
|
|
||||||
# columns so we have to omit that from the DBDuplicateEntry error.
|
|
||||||
columns = ''
|
|
||||||
|
|
||||||
if engine_name != 'ibm_db_sa':
|
|
||||||
columns = match.group(1)
|
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
|
||||||
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
|
||||||
else:
|
|
||||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
|
||||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
|
||||||
# messages follow the structure:
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
|
||||||
# 'restarting transaction') <query_str> <query_args>
|
|
||||||
_DEADLOCK_RE_DB = {
|
|
||||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
|
||||||
"""Raise exception on deadlock condition.
|
|
||||||
|
|
||||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
|
||||||
condition.
|
|
||||||
"""
|
|
||||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
|
||||||
if re is None:
|
|
||||||
return
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
m = re.match(operational_error.message)
|
|
||||||
if not m:
|
|
||||||
return
|
|
||||||
raise exception.DBDeadlock(operational_error)
|
|
||||||
|
|
||||||
|
|
||||||
def _wrap_db_error(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def _wrap(self, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
assert issubclass(
|
|
||||||
self.__class__, sqlalchemy.orm.session.Session
|
|
||||||
), ('_wrap_db_error() can only be applied to methods of '
|
|
||||||
'subclasses of sqlalchemy.orm.session.Session.')
|
|
||||||
|
|
||||||
return f(self, *args, **kwargs)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise exception.DBInvalidUnicodeParameter()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
_raise_if_db_connection_lost(e, self.bind)
|
|
||||||
_raise_if_deadlock_error(e, self.bind.dialect.name)
|
|
||||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
|
||||||
# so let's not wrap it for now.
|
|
||||||
raise
|
|
||||||
# note(boris-42): We should catch unique constraint violation and
|
|
||||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
|
||||||
# violation is wrapped by IntegrityError.
|
|
||||||
except sqla_exc.IntegrityError as e:
|
|
||||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
|
||||||
# DBs so we must do this. Also in some tables (for example
|
|
||||||
# instance_types) there are more than one unique constraint. This
|
|
||||||
# means we should get names of columns, which values violate
|
|
||||||
# unique constraint, from error message.
|
|
||||||
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
|
|
||||||
raise exception.DBError(e)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_LE('DB exception wrapped.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
return _wrap
|
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
|
||||||
"""Switch sqlite connections to non-synchronous mode."""
|
|
||||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
|
||||||
|
|
||||||
|
|
||||||
def _add_regexp_listener(dbapi_con, con_record):
|
|
||||||
"""Add REGEXP function to sqlite connections."""
|
|
||||||
|
|
||||||
def regexp(expr, item):
|
|
||||||
reg = re.compile(expr)
|
|
||||||
return reg.search(six.text_type(item)) is not None
|
|
||||||
dbapi_con.create_function('regexp', 2, regexp)
|
|
||||||
|
|
||||||
|
|
||||||
def _thread_yield(dbapi_con, con_record):
|
|
||||||
"""Ensure other greenthreads get a chance to be executed.
|
|
||||||
|
|
||||||
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
|
|
||||||
execute instead of time.sleep(0).
|
|
||||||
Force a context switch. With common database backends (eg MySQLdb and
|
|
||||||
sqlite), there is no implicit yield caused by network I/O since they are
|
|
||||||
implemented by C libraries that eventlet cannot monkey patch.
|
|
||||||
"""
|
|
||||||
time.sleep(0)
|
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
|
||||||
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.
|
|
||||||
|
|
||||||
Borrowed from:
|
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
|
||||||
"""
|
|
||||||
cursor = dbapi_conn.cursor()
|
|
||||||
try:
|
|
||||||
ping_sql = 'select 1'
|
|
||||||
if engine.name == 'ibm_db_sa':
|
|
||||||
# DB2 requires a table expression
|
|
||||||
ping_sql = 'select 1 from (values (1)) AS t1'
|
|
||||||
cursor.execute(ping_sql)
|
|
||||||
except Exception as ex:
|
|
||||||
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
|
||||||
msg = _LW('Database server has gone away: %s') % ex
|
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
# if the database server has gone away, all connections in the pool
|
|
||||||
# have become invalid and we can safely close all of them here,
|
|
||||||
# rather than waste time on checking of every single connection
|
|
||||||
engine.dispose()
|
|
||||||
|
|
||||||
# this will be handled by SQLAlchemy and will force it to create
|
|
||||||
# a new connection and retry the original action
|
|
||||||
raise sqla_exc.DisconnectionError(msg)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
|
|
||||||
"""Set the sql_mode session variable.
|
|
||||||
|
|
||||||
MySQL supports several server modes. The default is None, but sessions
|
|
||||||
may choose to enable server modes like TRADITIONAL, ANSI,
|
|
||||||
several STRICT_* modes and others.
|
|
||||||
|
|
||||||
Note: passing in '' (empty string) for sql_mode clears
|
|
||||||
the SQL mode for the session, overriding a potentially set
|
|
||||||
server default.
|
|
||||||
"""
|
|
||||||
|
|
||||||
cursor = dbapi_con.cursor()
|
|
||||||
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_get_effective_sql_mode(engine):
|
|
||||||
"""Returns the effective SQL mode for connections from the engine pool.
|
|
||||||
|
|
||||||
Returns ``None`` if the mode isn't available, otherwise returns the mode.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Get the real effective SQL mode. Even when unset by
|
|
||||||
# our own config, the server may still be operating in a specific
|
|
||||||
# SQL mode as set by the server configuration.
|
|
||||||
# Also note that the checkout listener will be called on execute to
|
|
||||||
# set the mode if it's registered.
|
|
||||||
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
|
|
||||||
if row is None:
|
|
||||||
return
|
|
||||||
return row[1]
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_check_effective_sql_mode(engine):
|
|
||||||
"""Logs a message based on the effective SQL mode for MySQL connections."""
|
|
||||||
realmode = _mysql_get_effective_sql_mode(engine)
|
|
||||||
|
|
||||||
if realmode is None:
|
|
||||||
LOG.warning(_LW('Unable to detect effective SQL mode'))
|
|
||||||
return
|
|
||||||
|
|
||||||
LOG.debug('MySQL server mode set to %s', realmode)
|
|
||||||
# 'TRADITIONAL' mode enables several other modes, so
|
|
||||||
# we need a substring match here
|
|
||||||
if not ('TRADITIONAL' in realmode.upper() or
|
|
||||||
'STRICT_ALL_TABLES' in realmode.upper()):
|
|
||||||
LOG.warning(_LW("MySQL SQL mode is '%s', "
|
|
||||||
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
|
|
||||||
realmode)
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_set_mode_callback(engine, sql_mode):
|
|
||||||
if sql_mode is not None:
|
|
||||||
mode_callback = functools.partial(_set_session_sql_mode,
|
|
||||||
sql_mode=sql_mode)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', mode_callback)
|
|
||||||
_mysql_check_effective_sql_mode(engine)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
|
||||||
"""Return True if error in connecting to db."""
|
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
|
||||||
# to support Postgres and others.
|
|
||||||
# For the db2, the error code is -30081 since the db2 is still not ready
|
|
||||||
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
|
|
||||||
for err_code in conn_err_codes:
|
|
||||||
if args.find(err_code) != -1:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_db_connection_lost(error, engine):
|
|
||||||
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
|
|
||||||
# requires connection and cursor in incoming parameters,
|
|
||||||
# but we have no possibility to create connection if DB
|
|
||||||
# is not available, so in such case reconnect fails.
|
|
||||||
# But is_disconnect() ignores these parameters, so it
|
|
||||||
# makes sense to pass to function None as placeholder
|
|
||||||
# instead of connection and cursor.
|
|
||||||
if engine.dialect.is_disconnect(error, None, None):
|
|
||||||
raise exception.DBConnectionError(error)
|
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
|
||||||
idle_timeout=3600,
|
|
||||||
connection_debug=0, max_pool_size=None, max_overflow=None,
|
|
||||||
pool_timeout=None, sqlite_synchronous=True,
|
|
||||||
connection_trace=False, max_retries=10, retry_interval=10):
|
|
||||||
"""Return a new SQLAlchemy engine."""
|
|
||||||
|
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
|
||||||
|
|
||||||
engine_args = {
|
|
||||||
"pool_recycle": idle_timeout,
|
|
||||||
'convert_unicode': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
logger = logging.getLogger('sqlalchemy.engine')
|
|
||||||
|
|
||||||
# Map SQL debug level to Python log level
|
|
||||||
if connection_debug >= 100:
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
elif connection_debug >= 50:
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
else:
|
|
||||||
logger.setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
if "sqlite" in connection_dict.drivername:
|
|
||||||
if sqlite_fk:
|
|
||||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
|
||||||
engine_args["poolclass"] = NullPool
|
|
||||||
|
|
||||||
if sql_connection == "sqlite://":
|
|
||||||
engine_args["poolclass"] = StaticPool
|
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
|
||||||
else:
|
|
||||||
if max_pool_size is not None:
|
|
||||||
engine_args['pool_size'] = max_pool_size
|
|
||||||
if max_overflow is not None:
|
|
||||||
engine_args['max_overflow'] = max_overflow
|
|
||||||
if pool_timeout is not None:
|
|
||||||
engine_args['pool_timeout'] = pool_timeout
|
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
|
||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
|
||||||
|
|
||||||
if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
|
|
||||||
ping_callback = functools.partial(_ping_listener, engine)
|
|
||||||
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
|
||||||
if engine.name == 'mysql':
|
|
||||||
if mysql_sql_mode:
|
|
||||||
_mysql_set_mode_callback(engine, mysql_sql_mode)
|
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
|
||||||
if not sqlite_synchronous:
|
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
|
||||||
_synchronous_switch_listener)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
|
||||||
|
|
||||||
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
|
|
||||||
_patch_mysqldb_with_stacktrace_comments()
|
|
||||||
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
|
|
||||||
remaining = max_retries
|
|
||||||
if remaining == -1:
|
|
||||||
remaining = 'infinite'
|
|
||||||
while True:
|
|
||||||
msg = _LW('SQL connection failed. %s attempts left.')
|
|
||||||
LOG.warning(msg % remaining)
|
|
||||||
if remaining != 'infinite':
|
|
||||||
remaining -= 1
|
|
||||||
time.sleep(retry_interval)
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
break
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if (remaining != 'infinite' and remaining == 0) or \
|
|
||||||
not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
class Query(sqlalchemy.orm.query.Query):
|
|
||||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
|
||||||
def soft_delete(self, synchronize_session='evaluate'):
|
|
||||||
return self.update({'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()},
|
|
||||||
synchronize_session=synchronize_session)
|
|
||||||
|
|
||||||
|
|
||||||
class Session(sqlalchemy.orm.session.Session):
|
|
||||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
|
||||||
@_wrap_db_error
|
|
||||||
def query(self, *args, **kwargs):
|
|
||||||
return super(Session, self).query(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def flush(self, *args, **kwargs):
|
|
||||||
return super(Session, self).flush(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def execute(self, *args, **kwargs):
|
|
||||||
return super(Session, self).execute(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
|
||||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
|
||||||
class_=Session,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
query_cls=Query)
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_mysqldb_with_stacktrace_comments():
|
|
||||||
"""Adds current stack trace as a comment in queries.
|
|
||||||
|
|
||||||
Patches MySQLdb.cursors.BaseCursor._do_query.
|
|
||||||
"""
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import MySQLdb.cursors
|
|
||||||
|
|
||||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
|
||||||
|
|
||||||
def _do_query(self, q):
|
|
||||||
stack = ''
|
|
||||||
for filename, line, method, function in traceback.extract_stack():
|
|
||||||
# exclude various common things from trace
|
|
||||||
if filename.endswith('session.py') and method == '_do_query':
|
|
||||||
continue
|
|
||||||
if filename.endswith('api.py') and method == 'wrapper':
|
|
||||||
continue
|
|
||||||
if filename.endswith('utils.py') and method == '_inner':
|
|
||||||
continue
|
|
||||||
if filename.endswith('exception.py') and method == '_wrap':
|
|
||||||
continue
|
|
||||||
# db/api is just a wrapper around db/sqlalchemy/api
|
|
||||||
if filename.endswith('db/api.py'):
|
|
||||||
continue
|
|
||||||
# only trace inside ec2api
|
|
||||||
index = filename.rfind('ec2api')
|
|
||||||
if index == -1:
|
|
||||||
continue
|
|
||||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
|
||||||
% (filename[index:], line, method, function)
|
|
||||||
|
|
||||||
# strip trailing " | " from stack
|
|
||||||
if stack:
|
|
||||||
stack = stack[:-3]
|
|
||||||
qq = "%s /* %s */" % (q, stack)
|
|
||||||
else:
|
|
||||||
qq = q
|
|
||||||
old_mysql_do_query(self, qq)
|
|
||||||
|
|
||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
|
||||||
|
|
||||||
|
|
||||||
class EngineFacade(object):
|
|
||||||
"""A helper class for removing of global engine instances from ec2api.db.
|
|
||||||
|
|
||||||
As a library, ec2api.db can't decide where to store/when to create engine
|
|
||||||
and sessionmaker instances, so this must be left for a target application.
|
|
||||||
|
|
||||||
On the other hand, in order to simplify the adoption of ec2api.db changes,
|
|
||||||
we'll provide a helper class, which creates engine and sessionmaker
|
|
||||||
on its instantiation and provides get_engine()/get_session() methods
|
|
||||||
that are compatible with corresponding utility functions that currently
|
|
||||||
exist in target projects, e.g. in Nova.
|
|
||||||
|
|
||||||
engine/sessionmaker instances will still be global (and they are meant to
|
|
||||||
be global), but they will be stored in the app context, rather that in the
|
|
||||||
ec2api.db context.
|
|
||||||
|
|
||||||
Note: using of this helper is completely optional and you are encouraged to
|
|
||||||
integrate engine/sessionmaker instances into your apps any way you like
|
|
||||||
(e.g. one might want to bind a session to a request context). Two important
|
|
||||||
things to remember:
|
|
||||||
|
|
||||||
1. An Engine instance is effectively a pool of DB connections, so it's
|
|
||||||
meant to be shared (and it's thread-safe).
|
|
||||||
2. A Session instance is not meant to be shared and represents a DB
|
|
||||||
transactional context (i.e. it's not thread-safe). sessionmaker is
|
|
||||||
a factory of sessions.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, sql_connection,
|
|
||||||
sqlite_fk=False, autocommit=True,
|
|
||||||
expire_on_commit=False, **kwargs):
|
|
||||||
"""Initialize engine and sessionmaker instances.
|
|
||||||
|
|
||||||
:param sqlite_fk: enable foreign keys in SQLite
|
|
||||||
:type sqlite_fk: bool
|
|
||||||
|
|
||||||
:param autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:param expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
Keyword arguments:
|
|
||||||
|
|
||||||
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
|
|
||||||
(defaults to TRADITIONAL)
|
|
||||||
:keyword idle_timeout: timeout before idle sql connections are reaped
|
|
||||||
(defaults to 3600)
|
|
||||||
:keyword connection_debug: verbosity of SQL debugging information.
|
|
||||||
0=None, 100=Everything (defaults to 0)
|
|
||||||
:keyword max_pool_size: maximum number of SQL connections to keep open
|
|
||||||
in a pool (defaults to SQLAlchemy settings)
|
|
||||||
:keyword max_overflow: if set, use this value for max_overflow with
|
|
||||||
sqlalchemy (defaults to SQLAlchemy settings)
|
|
||||||
:keyword pool_timeout: if set, use this value for pool_timeout with
|
|
||||||
sqlalchemy (defaults to SQLAlchemy settings)
|
|
||||||
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
|
|
||||||
(defaults to True)
|
|
||||||
:keyword connection_trace: add python stack traces to SQL as comment
|
|
||||||
strings (defaults to False)
|
|
||||||
:keyword max_retries: maximum db connection retries during startup.
|
|
||||||
(setting -1 implies an infinite retry count)
|
|
||||||
(defaults to 10)
|
|
||||||
:keyword retry_interval: interval between retries of opening a sql
|
|
||||||
connection (defaults to 10)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
super(EngineFacade, self).__init__()
|
|
||||||
|
|
||||||
self._engine = create_engine(
|
|
||||||
sql_connection=sql_connection,
|
|
||||||
sqlite_fk=sqlite_fk,
|
|
||||||
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
|
|
||||||
idle_timeout=kwargs.get('idle_timeout', 3600),
|
|
||||||
connection_debug=kwargs.get('connection_debug', 0),
|
|
||||||
max_pool_size=kwargs.get('max_pool_size'),
|
|
||||||
max_overflow=kwargs.get('max_overflow'),
|
|
||||||
pool_timeout=kwargs.get('pool_timeout'),
|
|
||||||
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
|
|
||||||
connection_trace=kwargs.get('connection_trace', False),
|
|
||||||
max_retries=kwargs.get('max_retries', 10),
|
|
||||||
retry_interval=kwargs.get('retry_interval', 10))
|
|
||||||
self._session_maker = get_maker(
|
|
||||||
engine=self._engine,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit)
|
|
||||||
|
|
||||||
def get_engine(self):
|
|
||||||
"""Get the engine instance (note, that it's shared)."""
|
|
||||||
|
|
||||||
return self._engine
|
|
||||||
|
|
||||||
def get_session(self, **kwargs):
|
|
||||||
"""Get a Session instance.
|
|
||||||
|
|
||||||
If passed, keyword arguments values override the ones used when the
|
|
||||||
sessionmaker instance was created.
|
|
||||||
|
|
||||||
:keyword autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:keyword expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
for arg in kwargs:
|
|
||||||
if arg not in ('autocommit', 'expire_on_commit'):
|
|
||||||
del kwargs[arg]
|
|
||||||
|
|
||||||
return self._session_maker(**kwargs)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_config(cls, connection_string, conf,
|
|
||||||
sqlite_fk=False, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Initialize EngineFacade using oslo.config config instance options.
|
|
||||||
|
|
||||||
:param connection_string: SQLAlchemy connection string
|
|
||||||
:type connection_string: string
|
|
||||||
|
|
||||||
:param conf: oslo.config config instance
|
|
||||||
:type conf: oslo.config.cfg.ConfigOpts
|
|
||||||
|
|
||||||
:param sqlite_fk: enable foreign keys in SQLite
|
|
||||||
:type sqlite_fk: bool
|
|
||||||
|
|
||||||
:param autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:param expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
return cls(sql_connection=connection_string,
|
|
||||||
sqlite_fk=sqlite_fk,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
**dict(conf.database.items()))
|
|
|
@ -1,167 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import abc
|
|
||||||
import functools
|
|
||||||
import os
|
|
||||||
|
|
||||||
import fixtures
|
|
||||||
from oslotest import base as test_base
|
|
||||||
import six
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import provision
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import session
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import utils
|
|
||||||
|
|
||||||
|
|
||||||
class DbFixture(fixtures.Fixture):
|
|
||||||
"""Basic database fixture.
|
|
||||||
|
|
||||||
Allows to run tests on various db backends, such as SQLite, MySQL and
|
|
||||||
PostgreSQL. By default use sqlite backend. To override default backend
|
|
||||||
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
|
|
||||||
credentials for specific backend.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _get_uri(self):
|
|
||||||
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
|
||||||
|
|
||||||
def __init__(self, test):
|
|
||||||
super(DbFixture, self).__init__()
|
|
||||||
|
|
||||||
self.test = test
|
|
||||||
|
|
||||||
def cleanUp(self):
|
|
||||||
self.test.engine.dispose()
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(DbFixture, self).setUp()
|
|
||||||
|
|
||||||
self.test.engine = session.create_engine(self._get_uri())
|
|
||||||
self.test.sessionmaker = session.get_maker(self.test.engine)
|
|
||||||
|
|
||||||
|
|
||||||
class DbTestCase(test_base.BaseTestCase):
|
|
||||||
"""Base class for testing of DB code.
|
|
||||||
|
|
||||||
Using `DbFixture`. Intended to be the main database test case to use all
|
|
||||||
the tests on a given backend with user defined uri. Backend specific
|
|
||||||
tests should be decorated with `backend_specific` decorator.
|
|
||||||
"""
|
|
||||||
|
|
||||||
FIXTURE = DbFixture
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(DbTestCase, self).setUp()
|
|
||||||
self.useFixture(self.FIXTURE(self))
|
|
||||||
|
|
||||||
|
|
||||||
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
|
||||||
|
|
||||||
|
|
||||||
def backend_specific(*dialects):
|
|
||||||
"""Decorator to skip backend specific tests on inappropriate engines.
|
|
||||||
|
|
||||||
::dialects: list of dialects names under which the test will be launched.
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def ins_wrap(self):
|
|
||||||
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
|
||||||
raise ValueError(
|
|
||||||
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
|
||||||
if self.engine.name not in dialects:
|
|
||||||
msg = ('The test "%s" can be run '
|
|
||||||
'only on %s. Current engine is %s.')
|
|
||||||
args = (f.__name__, ' '.join(dialects), self.engine.name)
|
|
||||||
self.skip(msg % args)
|
|
||||||
else:
|
|
||||||
return f(self)
|
|
||||||
return ins_wrap
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class OpportunisticFixture(DbFixture):
|
|
||||||
"""Base fixture to use default CI databases.
|
|
||||||
|
|
||||||
The databases exist in OpenStack CI infrastructure. But for the
|
|
||||||
correct functioning in local environment the databases must be
|
|
||||||
created manually.
|
|
||||||
"""
|
|
||||||
|
|
||||||
DRIVER = abc.abstractproperty(lambda: None)
|
|
||||||
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self._provisioning_engine = provision.get_engine(
|
|
||||||
utils.get_connect_string(backend=self.DRIVER,
|
|
||||||
user=self.USERNAME,
|
|
||||||
passwd=self.PASSWORD,
|
|
||||||
database=self.DBNAME)
|
|
||||||
)
|
|
||||||
self._uri = provision.create_database(self._provisioning_engine)
|
|
||||||
|
|
||||||
super(OpportunisticFixture, self).setUp()
|
|
||||||
|
|
||||||
def cleanUp(self):
|
|
||||||
super(OpportunisticFixture, self).cleanUp()
|
|
||||||
|
|
||||||
provision.drop_database(self._provisioning_engine, self._uri)
|
|
||||||
|
|
||||||
def _get_uri(self):
|
|
||||||
return self._uri
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class OpportunisticTestCase(DbTestCase):
|
|
||||||
"""Base test case to use default CI databases.
|
|
||||||
|
|
||||||
The subclasses of the test case are running only when openstack_citest
|
|
||||||
database is available otherwise a tests will be skipped.
|
|
||||||
"""
|
|
||||||
|
|
||||||
FIXTURE = abc.abstractproperty(lambda: None)
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
credentials = {
|
|
||||||
'backend': self.FIXTURE.DRIVER,
|
|
||||||
'user': self.FIXTURE.USERNAME,
|
|
||||||
'passwd': self.FIXTURE.PASSWORD,
|
|
||||||
'database': self.FIXTURE.DBNAME}
|
|
||||||
|
|
||||||
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
|
|
||||||
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
|
|
||||||
return self.skip(msg)
|
|
||||||
|
|
||||||
super(OpportunisticTestCase, self).setUp()
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLOpportunisticFixture(OpportunisticFixture):
|
|
||||||
DRIVER = 'mysql'
|
|
||||||
DBNAME = '' # connect to MySQL server, but not to the openstack_citest db
|
|
||||||
|
|
||||||
|
|
||||||
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
|
|
||||||
DRIVER = 'postgresql'
|
|
||||||
DBNAME = 'postgres' # PostgreSQL requires the db name here,use service one
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLOpportunisticTestCase(OpportunisticTestCase):
|
|
||||||
FIXTURE = MySQLOpportunisticFixture
|
|
||||||
|
|
||||||
|
|
||||||
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
|
|
||||||
FIXTURE = PostgreSQLOpportunisticFixture
|
|
|
@ -1,270 +0,0 @@
|
||||||
# Copyright 2010-2011 OpenStack Foundation
|
|
||||||
# Copyright 2012-2013 IBM Corp.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import lockfile
|
|
||||||
from oslotest import base as test_base
|
|
||||||
from six import moves
|
|
||||||
from six.moves.urllib import parse
|
|
||||||
import sqlalchemy
|
|
||||||
import sqlalchemy.exc
|
|
||||||
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import utils
|
|
||||||
from ec2api.openstack.common.gettextutils import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _have_mysql(user, passwd, database):
|
|
||||||
present = os.environ.get('TEST_MYSQL_PRESENT')
|
|
||||||
if present is None:
|
|
||||||
return utils.is_backend_avail(backend='mysql',
|
|
||||||
user=user,
|
|
||||||
passwd=passwd,
|
|
||||||
database=database)
|
|
||||||
return present.lower() in ('', 'true')
|
|
||||||
|
|
||||||
|
|
||||||
def _have_postgresql(user, passwd, database):
|
|
||||||
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
|
||||||
if present is None:
|
|
||||||
return utils.is_backend_avail(backend='postgres',
|
|
||||||
user=user,
|
|
||||||
passwd=passwd,
|
|
||||||
database=database)
|
|
||||||
return present.lower() in ('', 'true')
|
|
||||||
|
|
||||||
|
|
||||||
def _set_db_lock(lock_path=None, lock_prefix=None):
|
|
||||||
def decorator(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
path = lock_path or os.environ.get("EC2API_LOCK_PATH")
|
|
||||||
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
|
||||||
with lock:
|
|
||||||
LOG.debug('Got lock "%s"' % f.__name__)
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
finally:
|
|
||||||
LOG.debug('Lock released "%s"' % f.__name__)
|
|
||||||
return wrapper
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
class BaseMigrationTestCase(test_base.BaseTestCase):
|
|
||||||
"""Base class fort testing of migration utils."""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
|
||||||
|
|
||||||
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
|
||||||
'test_migrations.conf')
|
|
||||||
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
|
||||||
# to override the location of the config file for migration testing
|
|
||||||
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
|
||||||
self.DEFAULT_CONFIG_FILE)
|
|
||||||
self.test_databases = {}
|
|
||||||
self.migration_api = None
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(BaseMigrationTestCase, self).setUp()
|
|
||||||
|
|
||||||
# Load test databases from the config file. Only do this
|
|
||||||
# once. No need to re-run this on each test...
|
|
||||||
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
|
||||||
if os.path.exists(self.CONFIG_FILE_PATH):
|
|
||||||
cp = moves.configparser.RawConfigParser()
|
|
||||||
try:
|
|
||||||
cp.read(self.CONFIG_FILE_PATH)
|
|
||||||
defaults = cp.defaults()
|
|
||||||
for key, value in defaults.items():
|
|
||||||
self.test_databases[key] = value
|
|
||||||
except moves.configparser.ParsingError as e:
|
|
||||||
self.fail("Failed to read test_migrations.conf config "
|
|
||||||
"file. Got error: %s" % e)
|
|
||||||
else:
|
|
||||||
self.fail("Failed to find test_migrations.conf config "
|
|
||||||
"file.")
|
|
||||||
|
|
||||||
self.engines = {}
|
|
||||||
for key, value in self.test_databases.items():
|
|
||||||
self.engines[key] = sqlalchemy.create_engine(value)
|
|
||||||
|
|
||||||
# We start each test case with a completely blank slate.
|
|
||||||
self._reset_databases()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
# We destroy the test data store between each test case,
|
|
||||||
# and recreate it, which ensures that we have no side-effects
|
|
||||||
# from the tests
|
|
||||||
self._reset_databases()
|
|
||||||
super(BaseMigrationTestCase, self).tearDown()
|
|
||||||
|
|
||||||
def execute_cmd(self, cmd=None):
|
|
||||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
output = process.communicate()[0]
|
|
||||||
LOG.debug(output)
|
|
||||||
self.assertEqual(0, process.returncode,
|
|
||||||
"Failed to run: %s\n%s" % (cmd, output))
|
|
||||||
|
|
||||||
def _reset_pg(self, conn_pieces):
|
|
||||||
(user,
|
|
||||||
password,
|
|
||||||
database,
|
|
||||||
host) = utils.get_db_connection_info(conn_pieces)
|
|
||||||
os.environ['PGPASSWORD'] = password
|
|
||||||
os.environ['PGUSER'] = user
|
|
||||||
# note(boris-42): We must create and drop database, we can't
|
|
||||||
# drop database which we have connected to, so for such
|
|
||||||
# operations there is a special database template1.
|
|
||||||
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
|
||||||
" '%(sql)s' -d template1")
|
|
||||||
|
|
||||||
sql = ("drop database if exists %s;") % database
|
|
||||||
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
|
||||||
self.execute_cmd(droptable)
|
|
||||||
|
|
||||||
sql = ("create database %s;") % database
|
|
||||||
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
|
||||||
self.execute_cmd(createtable)
|
|
||||||
|
|
||||||
os.unsetenv('PGPASSWORD')
|
|
||||||
os.unsetenv('PGUSER')
|
|
||||||
|
|
||||||
@_set_db_lock(lock_prefix='migration_tests-')
|
|
||||||
def _reset_databases(self):
|
|
||||||
for key, engine in self.engines.items():
|
|
||||||
conn_string = self.test_databases[key]
|
|
||||||
conn_pieces = parse.urlparse(conn_string)
|
|
||||||
engine.dispose()
|
|
||||||
if conn_string.startswith('sqlite'):
|
|
||||||
# We can just delete the SQLite database, which is
|
|
||||||
# the easiest and cleanest solution
|
|
||||||
db_path = conn_pieces.path.strip('/')
|
|
||||||
if os.path.exists(db_path):
|
|
||||||
os.unlink(db_path)
|
|
||||||
# No need to recreate the SQLite DB. SQLite will
|
|
||||||
# create it for us if it's not there...
|
|
||||||
elif conn_string.startswith('mysql'):
|
|
||||||
# We can execute the MySQL client to destroy and re-create
|
|
||||||
# the MYSQL database, which is easier and less error-prone
|
|
||||||
# than using SQLAlchemy to do this via MetaData...trust me.
|
|
||||||
(user, password, database, host) = \
|
|
||||||
utils.get_db_connection_info(conn_pieces)
|
|
||||||
sql = ("drop database if exists %(db)s; "
|
|
||||||
"create database %(db)s;") % {'db': database}
|
|
||||||
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
|
||||||
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
|
||||||
'host': host, 'sql': sql}
|
|
||||||
self.execute_cmd(cmd)
|
|
||||||
elif conn_string.startswith('postgresql'):
|
|
||||||
self._reset_pg(conn_pieces)
|
|
||||||
|
|
||||||
|
|
||||||
class WalkVersionsMixin(object):
|
|
||||||
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
|
||||||
# Determine latest version script from the repo, then
|
|
||||||
# upgrade from 1 through to the latest, with no data
|
|
||||||
# in the databases. This just checks that the schema itself
|
|
||||||
# upgrades successfully.
|
|
||||||
|
|
||||||
# Place the database under version control
|
|
||||||
self.migration_api.version_control(engine, self.REPOSITORY,
|
|
||||||
self.INIT_VERSION)
|
|
||||||
self.assertEqual(self.INIT_VERSION,
|
|
||||||
self.migration_api.db_version(engine,
|
|
||||||
self.REPOSITORY))
|
|
||||||
|
|
||||||
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
|
||||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
|
||||||
|
|
||||||
for version in versions:
|
|
||||||
# upgrade -> downgrade -> upgrade
|
|
||||||
self._migrate_up(engine, version, with_data=True)
|
|
||||||
if snake_walk:
|
|
||||||
downgraded = self._migrate_down(
|
|
||||||
engine, version - 1, with_data=True)
|
|
||||||
if downgraded:
|
|
||||||
self._migrate_up(engine, version)
|
|
||||||
|
|
||||||
if downgrade:
|
|
||||||
# Now walk it back down to 0 from the latest, testing
|
|
||||||
# the downgrade paths.
|
|
||||||
for version in reversed(versions):
|
|
||||||
# downgrade -> upgrade -> downgrade
|
|
||||||
downgraded = self._migrate_down(engine, version - 1)
|
|
||||||
|
|
||||||
if snake_walk and downgraded:
|
|
||||||
self._migrate_up(engine, version)
|
|
||||||
self._migrate_down(engine, version - 1)
|
|
||||||
|
|
||||||
def _migrate_down(self, engine, version, with_data=False):
|
|
||||||
try:
|
|
||||||
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
|
||||||
except NotImplementedError:
|
|
||||||
# NOTE(sirp): some migrations, namely release-level
|
|
||||||
# migrations, don't support a downgrade.
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
|
||||||
|
|
||||||
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
|
||||||
# version). So if we have any downgrade checks, they need to be run for
|
|
||||||
# the previous (higher numbered) migration.
|
|
||||||
if with_data:
|
|
||||||
post_downgrade = getattr(
|
|
||||||
self, "_post_downgrade_%03d" % (version + 1), None)
|
|
||||||
if post_downgrade:
|
|
||||||
post_downgrade(engine)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _migrate_up(self, engine, version, with_data=False):
|
|
||||||
"""migrate up to a new version of the db.
|
|
||||||
|
|
||||||
We allow for data insertion and post checks at every
|
|
||||||
migration version with special _pre_upgrade_### and
|
|
||||||
_check_### functions in the main test.
|
|
||||||
"""
|
|
||||||
# NOTE(sdague): try block is here because it's impossible to debug
|
|
||||||
# where a failed data migration happens otherwise
|
|
||||||
try:
|
|
||||||
if with_data:
|
|
||||||
data = None
|
|
||||||
pre_upgrade = getattr(
|
|
||||||
self, "_pre_upgrade_%03d" % version, None)
|
|
||||||
if pre_upgrade:
|
|
||||||
data = pre_upgrade(engine)
|
|
||||||
|
|
||||||
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
|
||||||
self.assertEqual(version,
|
|
||||||
self.migration_api.db_version(engine,
|
|
||||||
self.REPOSITORY))
|
|
||||||
if with_data:
|
|
||||||
check = getattr(self, "_check_%03d" % version, None)
|
|
||||||
if check:
|
|
||||||
check(engine, data)
|
|
||||||
except Exception:
|
|
||||||
LOG.error(_LE("Failed to migrate to version %(version)s "
|
|
||||||
"on engine %(engine)s") % {'version': version,
|
|
||||||
'engine': engine})
|
|
||||||
raise
|
|
|
@ -1,655 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
|
||||||
# Copyright 2012 Justin Santa Barbara
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
|
|
||||||
import sqlalchemy
|
|
||||||
from sqlalchemy import Boolean
|
|
||||||
from sqlalchemy import CheckConstraint
|
|
||||||
from sqlalchemy import Column
|
|
||||||
from sqlalchemy.engine import reflection
|
|
||||||
from sqlalchemy.ext.compiler import compiles
|
|
||||||
from sqlalchemy import func
|
|
||||||
from sqlalchemy import Index
|
|
||||||
from sqlalchemy import Integer
|
|
||||||
from sqlalchemy import MetaData
|
|
||||||
from sqlalchemy import or_
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
from sqlalchemy.sql.expression import UpdateBase
|
|
||||||
from sqlalchemy import String
|
|
||||||
from sqlalchemy import Table
|
|
||||||
from sqlalchemy.types import NullType
|
|
||||||
|
|
||||||
from ec2api.openstack.common import context as request_context
|
|
||||||
from ec2api.openstack.common.db.sqlalchemy import models
|
|
||||||
from ec2api.openstack.common.gettextutils import _, _LI, _LW
|
|
||||||
from ec2api.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize_db_url(url):
|
|
||||||
match = _DBURL_REGEX.match(url)
|
|
||||||
if match:
|
|
||||||
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
|
||||||
return url
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidSortKey(Exception):
|
|
||||||
message = _("Sort key supplied was not valid.")
|
|
||||||
|
|
||||||
|
|
||||||
# copy from glance/db/sqlalchemy/api.py
|
|
||||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
|
||||||
sort_dir=None, sort_dirs=None):
|
|
||||||
"""Returns a query with sorting / pagination criteria added.
|
|
||||||
|
|
||||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
|
||||||
(If sort_keys is not unique, then we risk looping through values.)
|
|
||||||
We use the last row in the previous page as the 'marker' for pagination.
|
|
||||||
So we must return values that follow the passed marker in the order.
|
|
||||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
|
||||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
|
||||||
the lexicographical ordering:
|
|
||||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
|
||||||
|
|
||||||
We also have to cope with different sort_directions.
|
|
||||||
|
|
||||||
Typically, the id of the last row is used as the client-facing pagination
|
|
||||||
marker, then the actual marker object must be fetched from the db and
|
|
||||||
passed in to us as marker.
|
|
||||||
|
|
||||||
:param query: the query object to which we should add paging/sorting
|
|
||||||
:param model: the ORM model class
|
|
||||||
:param limit: maximum number of items to return
|
|
||||||
:param sort_keys: array of attributes by which results should be sorted
|
|
||||||
:param marker: the last item of the previous page; we returns the next
|
|
||||||
results after this value.
|
|
||||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
|
||||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
|
||||||
|
|
||||||
:rtype: sqlalchemy.orm.query.Query
|
|
||||||
:return: The query with sorting/pagination added.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if 'id' not in sort_keys:
|
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
|
||||||
# the actual primary key, rather than assuming its id
|
|
||||||
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
|
||||||
|
|
||||||
# Default the sort direction to ascending
|
|
||||||
if sort_dirs is None and sort_dir is None:
|
|
||||||
sort_dir = 'asc'
|
|
||||||
|
|
||||||
# Ensure a per-column sort direction
|
|
||||||
if sort_dirs is None:
|
|
||||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
|
||||||
|
|
||||||
assert(len(sort_dirs) == len(sort_keys))
|
|
||||||
|
|
||||||
# Add sorting
|
|
||||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
|
||||||
try:
|
|
||||||
sort_dir_func = {
|
|
||||||
'asc': sqlalchemy.asc,
|
|
||||||
'desc': sqlalchemy.desc,
|
|
||||||
}[current_sort_dir]
|
|
||||||
except KeyError:
|
|
||||||
raise ValueError(_("Unknown sort direction, "
|
|
||||||
"must be 'desc' or 'asc'"))
|
|
||||||
try:
|
|
||||||
sort_key_attr = getattr(model, current_sort_key)
|
|
||||||
except AttributeError:
|
|
||||||
raise InvalidSortKey()
|
|
||||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
|
||||||
|
|
||||||
# Add pagination
|
|
||||||
if marker is not None:
|
|
||||||
marker_values = []
|
|
||||||
for sort_key in sort_keys:
|
|
||||||
v = getattr(marker, sort_key)
|
|
||||||
marker_values.append(v)
|
|
||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
|
||||||
criteria_list = []
|
|
||||||
for i in range(len(sort_keys)):
|
|
||||||
crit_attrs = []
|
|
||||||
for j in range(i):
|
|
||||||
model_attr = getattr(model, sort_keys[j])
|
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
|
||||||
|
|
||||||
model_attr = getattr(model, sort_keys[i])
|
|
||||||
if sort_dirs[i] == 'desc':
|
|
||||||
crit_attrs.append((model_attr < marker_values[i]))
|
|
||||||
else:
|
|
||||||
crit_attrs.append((model_attr > marker_values[i]))
|
|
||||||
|
|
||||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
|
||||||
criteria_list.append(criteria)
|
|
||||||
|
|
||||||
f = sqlalchemy.sql.or_(*criteria_list)
|
|
||||||
query = query.filter(f)
|
|
||||||
|
|
||||||
if limit is not None:
|
|
||||||
query = query.limit(limit)
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def _read_deleted_filter(query, db_model, read_deleted):
|
|
||||||
if 'deleted' not in db_model.__table__.columns:
|
|
||||||
raise ValueError(_("There is no `deleted` column in `%s` table. "
|
|
||||||
"Project doesn't use soft-deleted feature.")
|
|
||||||
% db_model.__name__)
|
|
||||||
|
|
||||||
default_deleted_value = db_model.__table__.c.deleted.default.arg
|
|
||||||
if read_deleted == 'no':
|
|
||||||
query = query.filter(db_model.deleted == default_deleted_value)
|
|
||||||
elif read_deleted == 'yes':
|
|
||||||
pass # omit the filter to include deleted and active
|
|
||||||
elif read_deleted == 'only':
|
|
||||||
query = query.filter(db_model.deleted != default_deleted_value)
|
|
||||||
else:
|
|
||||||
raise ValueError(_("Unrecognized read_deleted value '%s'")
|
|
||||||
% read_deleted)
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def _project_filter(query, db_model, context, project_only):
|
|
||||||
if project_only and 'project_id' not in db_model.__table__.columns:
|
|
||||||
raise ValueError(_("There is no `project_id` column in `%s` table.")
|
|
||||||
% db_model.__name__)
|
|
||||||
|
|
||||||
if request_context.is_user_context(context) and project_only:
|
|
||||||
if project_only == 'allow_none':
|
|
||||||
is_none = None
|
|
||||||
query = query.filter(or_(db_model.project_id == context.project_id,
|
|
||||||
db_model.project_id == is_none))
|
|
||||||
else:
|
|
||||||
query = query.filter(db_model.project_id == context.project_id)
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def model_query(context, model, session, args=None, project_only=False,
|
|
||||||
read_deleted=None):
|
|
||||||
"""Query helper that accounts for context's `read_deleted` field.
|
|
||||||
|
|
||||||
:param context: context to query under
|
|
||||||
|
|
||||||
:param model: Model to query. Must be a subclass of ModelBase.
|
|
||||||
:type model: models.ModelBase
|
|
||||||
|
|
||||||
:param session: The session to use.
|
|
||||||
:type session: sqlalchemy.orm.session.Session
|
|
||||||
|
|
||||||
:param args: Arguments to query. If None - model is used.
|
|
||||||
:type args: tuple
|
|
||||||
|
|
||||||
:param project_only: If present and context is user-type, then restrict
|
|
||||||
query to match the context's project_id. If set to
|
|
||||||
'allow_none', restriction includes project_id = None.
|
|
||||||
:type project_only: bool
|
|
||||||
|
|
||||||
:param read_deleted: If present, overrides context's read_deleted field.
|
|
||||||
:type read_deleted: bool
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
..code:: python
|
|
||||||
|
|
||||||
result = (utils.model_query(context, models.Instance, session=session)
|
|
||||||
.filter_by(uuid=instance_uuid)
|
|
||||||
.all())
|
|
||||||
|
|
||||||
query = utils.model_query(
|
|
||||||
context, Node,
|
|
||||||
session=session,
|
|
||||||
args=(func.count(Node.id), func.sum(Node.ram))
|
|
||||||
).filter_by(project_id=project_id)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not read_deleted:
|
|
||||||
if hasattr(context, 'read_deleted'):
|
|
||||||
# NOTE(viktors): some projects use `read_deleted` attribute in
|
|
||||||
# their contexts instead of `show_deleted`.
|
|
||||||
read_deleted = context.read_deleted
|
|
||||||
else:
|
|
||||||
read_deleted = context.show_deleted
|
|
||||||
|
|
||||||
if not issubclass(model, models.ModelBase):
|
|
||||||
raise TypeError(_("model should be a subclass of ModelBase"))
|
|
||||||
|
|
||||||
query = session.query(model) if not args else session.query(*args)
|
|
||||||
query = _read_deleted_filter(query, model, read_deleted)
|
|
||||||
query = _project_filter(query, model, context, project_only)
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def get_table(engine, name):
|
|
||||||
"""Returns an sqlalchemy table dynamically from db.
|
|
||||||
|
|
||||||
Needed because the models don't work for us in migrations
|
|
||||||
as models will be far out of sync with the current data.
|
|
||||||
|
|
||||||
.. warning::
|
|
||||||
|
|
||||||
Do not use this method when creating ForeignKeys in database migrations
|
|
||||||
because sqlalchemy needs the same MetaData object to hold information
|
|
||||||
about the parent table and the reference table in the ForeignKey. This
|
|
||||||
method uses a unique MetaData object per table object so it won't work
|
|
||||||
with ForeignKey creation.
|
|
||||||
"""
|
|
||||||
metadata = MetaData()
|
|
||||||
metadata.bind = engine
|
|
||||||
return Table(name, metadata, autoload=True)
|
|
||||||
|
|
||||||
|
|
||||||
class InsertFromSelect(UpdateBase):
|
|
||||||
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
|
|
||||||
def __init__(self, table, select):
|
|
||||||
self.table = table
|
|
||||||
self.select = select
|
|
||||||
|
|
||||||
|
|
||||||
@compiles(InsertFromSelect)
|
|
||||||
def visit_insert_from_select(element, compiler, **kw):
|
|
||||||
"""Form the `INSERT INTO table (SELECT ... )` statement."""
|
|
||||||
return "INSERT INTO %s %s" % (
|
|
||||||
compiler.process(element.table, asfrom=True),
|
|
||||||
compiler.process(element.select))
|
|
||||||
|
|
||||||
|
|
||||||
class ColumnError(Exception):
|
|
||||||
"""Error raised when no column or an invalid column is found."""
|
|
||||||
|
|
||||||
|
|
||||||
def _get_not_supported_column(col_name_col_instance, column_name):
|
|
||||||
try:
|
|
||||||
column = col_name_col_instance[column_name]
|
|
||||||
except KeyError:
|
|
||||||
msg = _("Please specify column %s in col_name_col_instance "
|
|
||||||
"param. It is required because column has unsupported "
|
|
||||||
"type by sqlite).")
|
|
||||||
raise ColumnError(msg % column_name)
|
|
||||||
|
|
||||||
if not isinstance(column, Column):
|
|
||||||
msg = _("col_name_col_instance param has wrong type of "
|
|
||||||
"column instance for column %s It should be instance "
|
|
||||||
"of sqlalchemy.Column.")
|
|
||||||
raise ColumnError(msg % column_name)
|
|
||||||
return column
|
|
||||||
|
|
||||||
|
|
||||||
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
|
||||||
**col_name_col_instance):
|
|
||||||
"""Drop unique constraint from table.
|
|
||||||
|
|
||||||
DEPRECATED: this function is deprecated and will be removed from ec2api.db
|
|
||||||
in a few releases. Please use UniqueConstraint.drop() method directly for
|
|
||||||
sqlalchemy-migrate migration scripts.
|
|
||||||
|
|
||||||
This method drops UC from table and works for mysql, postgresql and sqlite.
|
|
||||||
In mysql and postgresql we are able to use "alter table" construction.
|
|
||||||
Sqlalchemy doesn't support some sqlite column types and replaces their
|
|
||||||
type with NullType in metadata. We process these columns and replace
|
|
||||||
NullType with the correct column type.
|
|
||||||
|
|
||||||
:param migrate_engine: sqlalchemy engine
|
|
||||||
:param table_name: name of table that contains uniq constraint.
|
|
||||||
:param uc_name: name of uniq constraint that will be dropped.
|
|
||||||
:param columns: columns that are in uniq constraint.
|
|
||||||
:param col_name_col_instance: contains pair column_name=column_instance.
|
|
||||||
column_instance is instance of Column. These params
|
|
||||||
are required only for columns that have unsupported
|
|
||||||
types by sqlite. For example BigInteger.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from migrate.changeset import UniqueConstraint
|
|
||||||
|
|
||||||
meta = MetaData()
|
|
||||||
meta.bind = migrate_engine
|
|
||||||
t = Table(table_name, meta, autoload=True)
|
|
||||||
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
override_cols = [
|
|
||||||
_get_not_supported_column(col_name_col_instance, col.name)
|
|
||||||
for col in t.columns
|
|
||||||
if isinstance(col.type, NullType)
|
|
||||||
]
|
|
||||||
for col in override_cols:
|
|
||||||
t.columns.replace(col)
|
|
||||||
|
|
||||||
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
|
||||||
uc.drop()
|
|
||||||
|
|
||||||
|
|
||||||
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
|
||||||
use_soft_delete, *uc_column_names):
|
|
||||||
"""Drop all old rows having the same values for columns in uc_columns.
|
|
||||||
|
|
||||||
This method drop (or mark ad `deleted` if use_soft_delete is True) old
|
|
||||||
duplicate rows form table with name `table_name`.
|
|
||||||
|
|
||||||
:param migrate_engine: Sqlalchemy engine
|
|
||||||
:param table_name: Table with duplicates
|
|
||||||
:param use_soft_delete: If True - values will be marked as `deleted`,
|
|
||||||
if False - values will be removed from table
|
|
||||||
:param uc_column_names: Unique constraint columns
|
|
||||||
"""
|
|
||||||
meta = MetaData()
|
|
||||||
meta.bind = migrate_engine
|
|
||||||
|
|
||||||
table = Table(table_name, meta, autoload=True)
|
|
||||||
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
|
||||||
|
|
||||||
columns_for_select = [func.max(table.c.id)]
|
|
||||||
columns_for_select.extend(columns_for_group_by)
|
|
||||||
|
|
||||||
duplicated_rows_select = sqlalchemy.sql.select(
|
|
||||||
columns_for_select, group_by=columns_for_group_by,
|
|
||||||
having=func.count(table.c.id) > 1)
|
|
||||||
|
|
||||||
for row in migrate_engine.execute(duplicated_rows_select):
|
|
||||||
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
|
||||||
delete_condition = table.c.id != row[0]
|
|
||||||
is_none = None # workaround for pyflakes
|
|
||||||
delete_condition &= table.c.deleted_at == is_none
|
|
||||||
for name in uc_column_names:
|
|
||||||
delete_condition &= table.c[name] == row[name]
|
|
||||||
|
|
||||||
rows_to_delete_select = sqlalchemy.sql.select(
|
|
||||||
[table.c.id]).where(delete_condition)
|
|
||||||
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
|
||||||
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
|
|
||||||
"%(table)s") % dict(id=row[0], table=table_name))
|
|
||||||
|
|
||||||
if use_soft_delete:
|
|
||||||
delete_statement = table.update().\
|
|
||||||
where(delete_condition).\
|
|
||||||
values({
|
|
||||||
'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
delete_statement = table.delete().where(delete_condition)
|
|
||||||
migrate_engine.execute(delete_statement)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_default_deleted_value(table):
|
|
||||||
if isinstance(table.c.id.type, Integer):
|
|
||||||
return 0
|
|
||||||
if isinstance(table.c.id.type, String):
|
|
||||||
return ""
|
|
||||||
raise ColumnError(_("Unsupported id columns type"))
|
|
||||||
|
|
||||||
|
|
||||||
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
real_indexes = insp.get_indexes(table_name)
|
|
||||||
existing_index_names = dict(
|
|
||||||
[(index['name'], index['column_names']) for index in real_indexes])
|
|
||||||
|
|
||||||
# NOTE(boris-42): Restore indexes on `deleted` column
|
|
||||||
for index in indexes:
|
|
||||||
if 'deleted' not in index['column_names']:
|
|
||||||
continue
|
|
||||||
name = index['name']
|
|
||||||
if name in existing_index_names:
|
|
||||||
column_names = [table.c[c] for c in existing_index_names[name]]
|
|
||||||
old_index = Index(name, *column_names, unique=index["unique"])
|
|
||||||
old_index.drop(migrate_engine)
|
|
||||||
|
|
||||||
column_names = [table.c[c] for c in index['column_names']]
|
|
||||||
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
|
||||||
new_index.create(migrate_engine)
|
|
||||||
|
|
||||||
|
|
||||||
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
return _change_deleted_column_type_to_boolean_sqlite(
|
|
||||||
migrate_engine, table_name, **col_name_col_instance)
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
indexes = insp.get_indexes(table_name)
|
|
||||||
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
old_deleted = Column('old_deleted', Boolean, default=False)
|
|
||||||
old_deleted.create(table, populate_default=False)
|
|
||||||
|
|
||||||
table.update().\
|
|
||||||
where(table.c.deleted == table.c.id).\
|
|
||||||
values(old_deleted=True).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
table.c.deleted.drop()
|
|
||||||
table.c.old_deleted.alter(name="deleted")
|
|
||||||
|
|
||||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
|
||||||
|
|
||||||
|
|
||||||
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
columns = []
|
|
||||||
for column in table.columns:
|
|
||||||
column_copy = None
|
|
||||||
if column.name != "deleted":
|
|
||||||
if isinstance(column.type, NullType):
|
|
||||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
|
||||||
column.name)
|
|
||||||
else:
|
|
||||||
column_copy = column.copy()
|
|
||||||
else:
|
|
||||||
column_copy = Column('deleted', Boolean, default=0)
|
|
||||||
columns.append(column_copy)
|
|
||||||
|
|
||||||
constraints = [constraint.copy() for constraint in table.constraints]
|
|
||||||
|
|
||||||
meta = table.metadata
|
|
||||||
new_table = Table(table_name + "__tmp__", meta,
|
|
||||||
*(columns + constraints))
|
|
||||||
new_table.create()
|
|
||||||
|
|
||||||
indexes = []
|
|
||||||
for index in insp.get_indexes(table_name):
|
|
||||||
column_names = [new_table.c[c] for c in index['column_names']]
|
|
||||||
indexes.append(Index(index["name"], *column_names,
|
|
||||||
unique=index["unique"]))
|
|
||||||
|
|
||||||
c_select = []
|
|
||||||
for c in table.c:
|
|
||||||
if c.name != "deleted":
|
|
||||||
c_select.append(c)
|
|
||||||
else:
|
|
||||||
c_select.append(table.c.deleted == table.c.id)
|
|
||||||
|
|
||||||
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
|
|
||||||
migrate_engine.execute(ins)
|
|
||||||
|
|
||||||
table.drop()
|
|
||||||
[index.create(migrate_engine) for index in indexes]
|
|
||||||
|
|
||||||
new_table.rename(table_name)
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == new_table.c.id).\
|
|
||||||
values(deleted=True).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
|
|
||||||
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
return _change_deleted_column_type_to_id_type_sqlite(
|
|
||||||
migrate_engine, table_name, **col_name_col_instance)
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
indexes = insp.get_indexes(table_name)
|
|
||||||
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
new_deleted = Column('new_deleted', table.c.id.type,
|
|
||||||
default=_get_default_deleted_value(table))
|
|
||||||
new_deleted.create(table, populate_default=True)
|
|
||||||
|
|
||||||
deleted = True # workaround for pyflakes
|
|
||||||
table.update().\
|
|
||||||
where(table.c.deleted == deleted).\
|
|
||||||
values(new_deleted=table.c.id).\
|
|
||||||
execute()
|
|
||||||
table.c.deleted.drop()
|
|
||||||
table.c.new_deleted.alter(name="deleted")
|
|
||||||
|
|
||||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
|
||||||
|
|
||||||
|
|
||||||
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
|
||||||
# constraints in sqlite DB and our `deleted` column has
|
|
||||||
# 2 check constraints. So there is only one way to remove
|
|
||||||
# these constraints:
|
|
||||||
# 1) Create new table with the same columns, constraints
|
|
||||||
# and indexes. (except deleted column).
|
|
||||||
# 2) Copy all data from old to new table.
|
|
||||||
# 3) Drop old table.
|
|
||||||
# 4) Rename new table to old table name.
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
meta = MetaData(bind=migrate_engine)
|
|
||||||
table = Table(table_name, meta, autoload=True)
|
|
||||||
default_deleted_value = _get_default_deleted_value(table)
|
|
||||||
|
|
||||||
columns = []
|
|
||||||
for column in table.columns:
|
|
||||||
column_copy = None
|
|
||||||
if column.name != "deleted":
|
|
||||||
if isinstance(column.type, NullType):
|
|
||||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
|
||||||
column.name)
|
|
||||||
else:
|
|
||||||
column_copy = column.copy()
|
|
||||||
else:
|
|
||||||
column_copy = Column('deleted', table.c.id.type,
|
|
||||||
default=default_deleted_value)
|
|
||||||
columns.append(column_copy)
|
|
||||||
|
|
||||||
def is_deleted_column_constraint(constraint):
|
|
||||||
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
|
||||||
# associated with deleted column.
|
|
||||||
if not isinstance(constraint, CheckConstraint):
|
|
||||||
return False
|
|
||||||
sqltext = str(constraint.sqltext)
|
|
||||||
return (sqltext.endswith("deleted in (0, 1)") or
|
|
||||||
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
|
|
||||||
|
|
||||||
constraints = []
|
|
||||||
for constraint in table.constraints:
|
|
||||||
if not is_deleted_column_constraint(constraint):
|
|
||||||
constraints.append(constraint.copy())
|
|
||||||
|
|
||||||
new_table = Table(table_name + "__tmp__", meta,
|
|
||||||
*(columns + constraints))
|
|
||||||
new_table.create()
|
|
||||||
|
|
||||||
indexes = []
|
|
||||||
for index in insp.get_indexes(table_name):
|
|
||||||
column_names = [new_table.c[c] for c in index['column_names']]
|
|
||||||
indexes.append(Index(index["name"], *column_names,
|
|
||||||
unique=index["unique"]))
|
|
||||||
|
|
||||||
ins = InsertFromSelect(new_table, table.select())
|
|
||||||
migrate_engine.execute(ins)
|
|
||||||
|
|
||||||
table.drop()
|
|
||||||
[index.create(migrate_engine) for index in indexes]
|
|
||||||
|
|
||||||
new_table.rename(table_name)
|
|
||||||
deleted = True # workaround for pyflakes
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == deleted).\
|
|
||||||
values(deleted=new_table.c.id).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
|
||||||
deleted = False # workaround for pyflakes
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == deleted).\
|
|
||||||
values(deleted=default_deleted_value).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
|
|
||||||
def get_connect_string(backend, database, user=None, passwd=None):
|
|
||||||
"""Get database connection
|
|
||||||
|
|
||||||
Try to get a connection with a very specific set of values, if we get
|
|
||||||
these then we'll run the tests, otherwise they are skipped
|
|
||||||
"""
|
|
||||||
args = {'backend': backend,
|
|
||||||
'user': user,
|
|
||||||
'passwd': passwd,
|
|
||||||
'database': database}
|
|
||||||
if backend == 'sqlite':
|
|
||||||
template = '%(backend)s:///%(database)s'
|
|
||||||
else:
|
|
||||||
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
|
||||||
return template % args
|
|
||||||
|
|
||||||
|
|
||||||
def is_backend_avail(backend, database, user=None, passwd=None):
|
|
||||||
try:
|
|
||||||
connect_uri = get_connect_string(backend=backend,
|
|
||||||
database=database,
|
|
||||||
user=user,
|
|
||||||
passwd=passwd)
|
|
||||||
engine = sqlalchemy.create_engine(connect_uri)
|
|
||||||
connection = engine.connect()
|
|
||||||
except Exception:
|
|
||||||
# intentionally catch all to handle exceptions even if we don't
|
|
||||||
# have any backend code loaded.
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
connection.close()
|
|
||||||
engine.dispose()
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def get_db_connection_info(conn_pieces):
|
|
||||||
database = conn_pieces.path.strip('/')
|
|
||||||
loc_pieces = conn_pieces.netloc.split('@')
|
|
||||||
host = loc_pieces[1]
|
|
||||||
|
|
||||||
auth_pieces = loc_pieces[0].split(':')
|
|
||||||
user = auth_pieces[0]
|
|
||||||
password = ""
|
|
||||||
if len(auth_pieces) > 1:
|
|
||||||
password = auth_pieces[1].strip()
|
|
||||||
|
|
||||||
return (user, password, database, host)
|
|
|
@ -16,21 +16,21 @@
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import copy
|
||||||
import errno
|
import errno
|
||||||
import gc
|
import gc
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import eventlet.backdoor
|
import eventlet.backdoor
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _LI
|
from ec2api.openstack.common._i18n import _LI
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
help_for_backdoor_port = (
|
help_for_backdoor_port = (
|
||||||
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||||
|
@ -49,6 +49,12 @@ CONF.register_opts(eventlet_backdoor_opts)
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
"""Entry point for oslo-config-generator.
|
||||||
|
"""
|
||||||
|
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
|
||||||
|
|
||||||
|
|
||||||
class EventletBackdoorConfigValueError(Exception):
|
class EventletBackdoorConfigValueError(Exception):
|
||||||
def __init__(self, port_range, help_msg, ex):
|
def __init__(self, port_range, help_msg, ex):
|
||||||
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||||
|
|
|
@ -1,479 +0,0 @@
|
||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
# Copyright 2013 IBM Corp.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
gettext for openstack-common modules.
|
|
||||||
|
|
||||||
Usual usage in an openstack.common module:
|
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
"""
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import gettext
|
|
||||||
import locale
|
|
||||||
from logging import handlers
|
|
||||||
import os
|
|
||||||
|
|
||||||
from babel import localedata
|
|
||||||
import six
|
|
||||||
|
|
||||||
_AVAILABLE_LANGUAGES = {}
|
|
||||||
|
|
||||||
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
|
|
||||||
USE_LAZY = False
|
|
||||||
|
|
||||||
|
|
||||||
class TranslatorFactory(object):
|
|
||||||
"""Create translator functions
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, domain, localedir=None):
|
|
||||||
"""Establish a set of translation functions for the domain.
|
|
||||||
|
|
||||||
:param domain: Name of translation domain,
|
|
||||||
specifying a message catalog.
|
|
||||||
:type domain: str
|
|
||||||
:param lazy: Delays translation until a message is emitted.
|
|
||||||
Defaults to False.
|
|
||||||
:type lazy: Boolean
|
|
||||||
:param localedir: Directory with translation catalogs.
|
|
||||||
:type localedir: str
|
|
||||||
"""
|
|
||||||
self.domain = domain
|
|
||||||
if localedir is None:
|
|
||||||
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
|
||||||
self.localedir = localedir
|
|
||||||
|
|
||||||
def _make_translation_func(self, domain=None):
|
|
||||||
"""Return a new translation function ready for use.
|
|
||||||
|
|
||||||
Takes into account whether or not lazy translation is being
|
|
||||||
done.
|
|
||||||
|
|
||||||
The domain can be specified to override the default from the
|
|
||||||
factory, but the localedir from the factory is always used
|
|
||||||
because we assume the log-level translation catalogs are
|
|
||||||
installed in the same directory as the main application
|
|
||||||
catalog.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if domain is None:
|
|
||||||
domain = self.domain
|
|
||||||
t = gettext.translation(domain,
|
|
||||||
localedir=self.localedir,
|
|
||||||
fallback=True)
|
|
||||||
# Use the appropriate method of the translation object based
|
|
||||||
# on the python version.
|
|
||||||
m = t.gettext if six.PY3 else t.ugettext
|
|
||||||
|
|
||||||
def f(msg):
|
|
||||||
"""oslo.i18n.gettextutils translation function."""
|
|
||||||
if USE_LAZY:
|
|
||||||
return Message(msg, domain=domain)
|
|
||||||
return m(msg)
|
|
||||||
return f
|
|
||||||
|
|
||||||
@property
|
|
||||||
def primary(self):
|
|
||||||
"The default translation function."
|
|
||||||
return self._make_translation_func()
|
|
||||||
|
|
||||||
def _make_log_translation_func(self, level):
|
|
||||||
return self._make_translation_func(self.domain + '-log-' + level)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def log_info(self):
|
|
||||||
"Translate info-level log messages."
|
|
||||||
return self._make_log_translation_func('info')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def log_warning(self):
|
|
||||||
"Translate warning-level log messages."
|
|
||||||
return self._make_log_translation_func('warning')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def log_error(self):
|
|
||||||
"Translate error-level log messages."
|
|
||||||
return self._make_log_translation_func('error')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def log_critical(self):
|
|
||||||
"Translate critical-level log messages."
|
|
||||||
return self._make_log_translation_func('critical')
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(dhellmann): When this module moves out of the incubator into
|
|
||||||
# oslo.i18n, these global variables can be moved to an integration
|
|
||||||
# module within each application.
|
|
||||||
|
|
||||||
# Create the global translation functions.
|
|
||||||
_translators = TranslatorFactory('ec2api')
|
|
||||||
|
|
||||||
# The primary translation function using the well-known name "_"
|
|
||||||
_ = _translators.primary
|
|
||||||
|
|
||||||
# Translators for log levels.
|
|
||||||
#
|
|
||||||
# The abbreviated names are meant to reflect the usual use of a short
|
|
||||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
|
||||||
# the level.
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|
||||||
# NOTE(dhellmann): End of globals that will move to the application's
|
|
||||||
# integration module.
|
|
||||||
|
|
||||||
|
|
||||||
def enable_lazy():
|
|
||||||
"""Convenience function for configuring _() to use lazy gettext
|
|
||||||
|
|
||||||
Call this at the start of execution to enable the gettextutils._
|
|
||||||
function to use lazy gettext functionality. This is useful if
|
|
||||||
your project is importing _ directly instead of using the
|
|
||||||
gettextutils.install() way of importing the _ function.
|
|
||||||
"""
|
|
||||||
global USE_LAZY
|
|
||||||
USE_LAZY = True
|
|
||||||
|
|
||||||
|
|
||||||
def install(domain):
|
|
||||||
"""Install a _() function using the given translation domain.
|
|
||||||
|
|
||||||
Given a translation domain, install a _() function using gettext's
|
|
||||||
install() function.
|
|
||||||
|
|
||||||
The main difference from gettext.install() is that we allow
|
|
||||||
overriding the default localedir (e.g. /usr/share/locale) using
|
|
||||||
a translation-domain-specific environment variable (e.g.
|
|
||||||
NOVA_LOCALEDIR).
|
|
||||||
|
|
||||||
Note that to enable lazy translation, enable_lazy must be
|
|
||||||
called.
|
|
||||||
|
|
||||||
:param domain: the translation domain
|
|
||||||
"""
|
|
||||||
from six import moves
|
|
||||||
tf = TranslatorFactory(domain)
|
|
||||||
moves.builtins.__dict__['_'] = tf.primary
|
|
||||||
|
|
||||||
|
|
||||||
class Message(six.text_type):
|
|
||||||
"""A Message object is a unicode object that can be translated.
|
|
||||||
|
|
||||||
Translation of Message is done explicitly using the translate() method.
|
|
||||||
For all non-translation intents and purposes, a Message is simply unicode,
|
|
||||||
and can be treated as such.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __new__(cls, msgid, msgtext=None, params=None,
|
|
||||||
domain='ec2api', *args):
|
|
||||||
"""Create a new Message object.
|
|
||||||
|
|
||||||
In order for translation to work gettext requires a message ID, this
|
|
||||||
msgid will be used as the base unicode text. It is also possible
|
|
||||||
for the msgid and the base unicode text to be different by passing
|
|
||||||
the msgtext parameter.
|
|
||||||
"""
|
|
||||||
# If the base msgtext is not given, we use the default translation
|
|
||||||
# of the msgid (which is in English) just in case the system locale is
|
|
||||||
# not English, so that the base text will be in that locale by default.
|
|
||||||
if not msgtext:
|
|
||||||
msgtext = Message._translate_msgid(msgid, domain)
|
|
||||||
# We want to initialize the parent unicode with the actual object that
|
|
||||||
# would have been plain unicode if 'Message' was not enabled.
|
|
||||||
msg = super(Message, cls).__new__(cls, msgtext)
|
|
||||||
msg.msgid = msgid
|
|
||||||
msg.domain = domain
|
|
||||||
msg.params = params
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def translate(self, desired_locale=None):
|
|
||||||
"""Translate this message to the desired locale.
|
|
||||||
|
|
||||||
:param desired_locale: The desired locale to translate the message to,
|
|
||||||
if no locale is provided the message will be
|
|
||||||
translated to the system's default locale.
|
|
||||||
|
|
||||||
:returns: the translated message in unicode
|
|
||||||
"""
|
|
||||||
|
|
||||||
translated_message = Message._translate_msgid(self.msgid,
|
|
||||||
self.domain,
|
|
||||||
desired_locale)
|
|
||||||
if self.params is None:
|
|
||||||
# No need for more translation
|
|
||||||
return translated_message
|
|
||||||
|
|
||||||
# This Message object may have been formatted with one or more
|
|
||||||
# Message objects as substitution arguments, given either as a single
|
|
||||||
# argument, part of a tuple, or as one or more values in a dictionary.
|
|
||||||
# When translating this Message we need to translate those Messages too
|
|
||||||
translated_params = _translate_args(self.params, desired_locale)
|
|
||||||
|
|
||||||
translated_message = translated_message % translated_params
|
|
||||||
|
|
||||||
return translated_message
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _translate_msgid(msgid, domain, desired_locale=None):
|
|
||||||
if not desired_locale:
|
|
||||||
system_locale = locale.getdefaultlocale()
|
|
||||||
# If the system locale is not available to the runtime use English
|
|
||||||
if not system_locale[0]:
|
|
||||||
desired_locale = 'en_US'
|
|
||||||
else:
|
|
||||||
desired_locale = system_locale[0]
|
|
||||||
|
|
||||||
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
|
||||||
lang = gettext.translation(domain,
|
|
||||||
localedir=locale_dir,
|
|
||||||
languages=[desired_locale],
|
|
||||||
fallback=True)
|
|
||||||
if six.PY3:
|
|
||||||
translator = lang.gettext
|
|
||||||
else:
|
|
||||||
translator = lang.ugettext
|
|
||||||
|
|
||||||
translated_message = translator(msgid)
|
|
||||||
return translated_message
|
|
||||||
|
|
||||||
def __mod__(self, other):
|
|
||||||
# When we mod a Message we want the actual operation to be performed
|
|
||||||
# by the parent class (i.e. unicode()), the only thing we do here is
|
|
||||||
# save the original msgid and the parameters in case of a translation
|
|
||||||
params = self._sanitize_mod_params(other)
|
|
||||||
unicode_mod = super(Message, self).__mod__(params)
|
|
||||||
modded = Message(self.msgid,
|
|
||||||
msgtext=unicode_mod,
|
|
||||||
params=params,
|
|
||||||
domain=self.domain)
|
|
||||||
return modded
|
|
||||||
|
|
||||||
def _sanitize_mod_params(self, other):
|
|
||||||
"""Sanitize the object being modded with this Message.
|
|
||||||
|
|
||||||
- Add support for modding 'None' so translation supports it
|
|
||||||
- Trim the modded object, which can be a large dictionary, to only
|
|
||||||
those keys that would actually be used in a translation
|
|
||||||
- Snapshot the object being modded, in case the message is
|
|
||||||
translated, it will be used as it was when the Message was created
|
|
||||||
"""
|
|
||||||
if other is None:
|
|
||||||
params = (other,)
|
|
||||||
elif isinstance(other, dict):
|
|
||||||
# Merge the dictionaries
|
|
||||||
# Copy each item in case one does not support deep copy.
|
|
||||||
params = {}
|
|
||||||
if isinstance(self.params, dict):
|
|
||||||
for key, val in self.params.items():
|
|
||||||
params[key] = self._copy_param(val)
|
|
||||||
for key, val in other.items():
|
|
||||||
params[key] = self._copy_param(val)
|
|
||||||
else:
|
|
||||||
params = self._copy_param(other)
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _copy_param(self, param):
|
|
||||||
try:
|
|
||||||
return copy.deepcopy(param)
|
|
||||||
except Exception:
|
|
||||||
# Fallback to casting to unicode this will handle the
|
|
||||||
# python code-like objects that can't be deep-copied
|
|
||||||
return six.text_type(param)
|
|
||||||
|
|
||||||
def __add__(self, other):
|
|
||||||
msg = _('Message objects do not support addition.')
|
|
||||||
raise TypeError(msg)
|
|
||||||
|
|
||||||
def __radd__(self, other):
|
|
||||||
return self.__add__(other)
|
|
||||||
|
|
||||||
if six.PY2:
|
|
||||||
def __str__(self):
|
|
||||||
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
|
||||||
# and it expects specifically a UnicodeError in order to proceed.
|
|
||||||
msg = _('Message objects do not support str() because they may '
|
|
||||||
'contain non-ascii characters. '
|
|
||||||
'Please use unicode() or translate() instead.')
|
|
||||||
raise UnicodeError(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages(domain):
|
|
||||||
"""Lists the available languages for the given translation domain.
|
|
||||||
|
|
||||||
:param domain: the domain to get languages for
|
|
||||||
"""
|
|
||||||
if domain in _AVAILABLE_LANGUAGES:
|
|
||||||
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
|
||||||
|
|
||||||
localedir = '%s_LOCALEDIR' % domain.upper()
|
|
||||||
find = lambda x: gettext.find(domain,
|
|
||||||
localedir=os.environ.get(localedir),
|
|
||||||
languages=[x])
|
|
||||||
|
|
||||||
# NOTE(mrodden): en_US should always be available (and first in case
|
|
||||||
# order matters) since our in-line message strings are en_US
|
|
||||||
language_list = ['en_US']
|
|
||||||
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
|
||||||
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
|
||||||
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
|
||||||
# this check when the master list updates to >=1.0, and update all projects
|
|
||||||
list_identifiers = (getattr(localedata, 'list', None) or
|
|
||||||
getattr(localedata, 'locale_identifiers'))
|
|
||||||
locale_identifiers = list_identifiers()
|
|
||||||
|
|
||||||
for i in locale_identifiers:
|
|
||||||
if find(i) is not None:
|
|
||||||
language_list.append(i)
|
|
||||||
|
|
||||||
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
|
||||||
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
|
||||||
# are perfectly legitimate locales:
|
|
||||||
# https://github.com/mitsuhiko/babel/issues/37
|
|
||||||
# In Babel 1.3 they fixed the bug and they support these locales, but
|
|
||||||
# they are still not explicitly "listed" by locale_identifiers().
|
|
||||||
# That is why we add the locales here explicitly if necessary so that
|
|
||||||
# they are listed as supported.
|
|
||||||
aliases = {'zh': 'zh_CN',
|
|
||||||
'zh_Hant_HK': 'zh_HK',
|
|
||||||
'zh_Hant': 'zh_TW',
|
|
||||||
'fil': 'tl_PH'}
|
|
||||||
for (locale_, alias) in six.iteritems(aliases):
|
|
||||||
if locale_ in language_list and alias not in language_list:
|
|
||||||
language_list.append(alias)
|
|
||||||
|
|
||||||
_AVAILABLE_LANGUAGES[domain] = language_list
|
|
||||||
return copy.copy(language_list)
|
|
||||||
|
|
||||||
|
|
||||||
def translate(obj, desired_locale=None):
|
|
||||||
"""Gets the translated unicode representation of the given object.
|
|
||||||
|
|
||||||
If the object is not translatable it is returned as-is.
|
|
||||||
If the locale is None the object is translated to the system locale.
|
|
||||||
|
|
||||||
:param obj: the object to translate
|
|
||||||
:param desired_locale: the locale to translate the message to, if None the
|
|
||||||
default system locale will be used
|
|
||||||
:returns: the translated object in unicode, or the original object if
|
|
||||||
it could not be translated
|
|
||||||
"""
|
|
||||||
message = obj
|
|
||||||
if not isinstance(message, Message):
|
|
||||||
# If the object to translate is not already translatable,
|
|
||||||
# let's first get its unicode representation
|
|
||||||
message = six.text_type(obj)
|
|
||||||
if isinstance(message, Message):
|
|
||||||
# Even after unicoding() we still need to check if we are
|
|
||||||
# running with translatable unicode before translating
|
|
||||||
return message.translate(desired_locale)
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
def _translate_args(args, desired_locale=None):
|
|
||||||
"""Translates all the translatable elements of the given arguments object.
|
|
||||||
|
|
||||||
This method is used for translating the translatable values in method
|
|
||||||
arguments which include values of tuples or dictionaries.
|
|
||||||
If the object is not a tuple or a dictionary the object itself is
|
|
||||||
translated if it is translatable.
|
|
||||||
|
|
||||||
If the locale is None the object is translated to the system locale.
|
|
||||||
|
|
||||||
:param args: the args to translate
|
|
||||||
:param desired_locale: the locale to translate the args to, if None the
|
|
||||||
default system locale will be used
|
|
||||||
:returns: a new args object with the translated contents of the original
|
|
||||||
"""
|
|
||||||
if isinstance(args, tuple):
|
|
||||||
return tuple(translate(v, desired_locale) for v in args)
|
|
||||||
if isinstance(args, dict):
|
|
||||||
translated_dict = {}
|
|
||||||
for (k, v) in six.iteritems(args):
|
|
||||||
translated_v = translate(v, desired_locale)
|
|
||||||
translated_dict[k] = translated_v
|
|
||||||
return translated_dict
|
|
||||||
return translate(args, desired_locale)
|
|
||||||
|
|
||||||
|
|
||||||
class TranslationHandler(handlers.MemoryHandler):
|
|
||||||
"""Handler that translates records before logging them.
|
|
||||||
|
|
||||||
The TranslationHandler takes a locale and a target logging.Handler object
|
|
||||||
to forward LogRecord objects to after translating them. This handler
|
|
||||||
depends on Message objects being logged, instead of regular strings.
|
|
||||||
|
|
||||||
The handler can be configured declaratively in the logging.conf as follows:
|
|
||||||
|
|
||||||
[handlers]
|
|
||||||
keys = translatedlog, translator
|
|
||||||
|
|
||||||
[handler_translatedlog]
|
|
||||||
class = handlers.WatchedFileHandler
|
|
||||||
args = ('/var/log/api-localized.log',)
|
|
||||||
formatter = context
|
|
||||||
|
|
||||||
[handler_translator]
|
|
||||||
class = openstack.common.log.TranslationHandler
|
|
||||||
target = translatedlog
|
|
||||||
args = ('zh_CN',)
|
|
||||||
|
|
||||||
If the specified locale is not available in the system, the handler will
|
|
||||||
log in the default locale.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, locale=None, target=None):
|
|
||||||
"""Initialize a TranslationHandler
|
|
||||||
|
|
||||||
:param locale: locale to use for translating messages
|
|
||||||
:param target: logging.Handler object to forward
|
|
||||||
LogRecord objects to after translation
|
|
||||||
"""
|
|
||||||
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
|
||||||
# other handlers, such as a FileHandler, and still be able to
|
|
||||||
# configure it using logging.conf, this handler has to extend
|
|
||||||
# MemoryHandler because only the MemoryHandlers' logging.conf
|
|
||||||
# parsing is implemented such that it accepts a target handler.
|
|
||||||
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
|
||||||
self.locale = locale
|
|
||||||
|
|
||||||
def setFormatter(self, fmt):
|
|
||||||
self.target.setFormatter(fmt)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
# We save the message from the original record to restore it
|
|
||||||
# after translation, so other handlers are not affected by this
|
|
||||||
original_msg = record.msg
|
|
||||||
original_args = record.args
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._translate_and_log_record(record)
|
|
||||||
finally:
|
|
||||||
record.msg = original_msg
|
|
||||||
record.args = original_args
|
|
||||||
|
|
||||||
def _translate_and_log_record(self, record):
|
|
||||||
record.msg = translate(record.msg, self.locale)
|
|
||||||
|
|
||||||
# In addition to translating the message, we also need to translate
|
|
||||||
# arguments that were passed to the log method that were not part
|
|
||||||
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
|
||||||
record.args = _translate_args(record.args, self.locale)
|
|
||||||
|
|
||||||
self.target.emit(record)
|
|
|
@ -1,73 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Import related utilities and helper functions.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
|
|
||||||
def import_class(import_str):
|
|
||||||
"""Returns a class from a string including module and class."""
|
|
||||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
|
||||||
__import__(mod_str)
|
|
||||||
try:
|
|
||||||
return getattr(sys.modules[mod_str], class_str)
|
|
||||||
except AttributeError:
|
|
||||||
raise ImportError('Class %s cannot be found (%s)' %
|
|
||||||
(class_str,
|
|
||||||
traceback.format_exception(*sys.exc_info())))
|
|
||||||
|
|
||||||
|
|
||||||
def import_object(import_str, *args, **kwargs):
|
|
||||||
"""Import a class and return an instance of it."""
|
|
||||||
return import_class(import_str)(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
|
||||||
"""Tries to import object from default namespace.
|
|
||||||
|
|
||||||
Imports a class and return an instance of it, first by trying
|
|
||||||
to find the class in a default namespace, then failing back to
|
|
||||||
a full path if not found in the default namespace.
|
|
||||||
"""
|
|
||||||
import_value = "%s.%s" % (name_space, import_str)
|
|
||||||
try:
|
|
||||||
return import_class(import_value)(*args, **kwargs)
|
|
||||||
except ImportError:
|
|
||||||
return import_class(import_str)(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def import_module(import_str):
|
|
||||||
"""Import a module."""
|
|
||||||
__import__(import_str)
|
|
||||||
return sys.modules[import_str]
|
|
||||||
|
|
||||||
|
|
||||||
def import_versioned_module(version, submodule=None):
|
|
||||||
module = 'ec2api.v%s' % version
|
|
||||||
if submodule:
|
|
||||||
module = '.'.join((module, submodule))
|
|
||||||
return import_module(module)
|
|
||||||
|
|
||||||
|
|
||||||
def try_import(import_str, default=None):
|
|
||||||
"""Try to import a module and if it fails return default."""
|
|
||||||
try:
|
|
||||||
return import_module(import_str)
|
|
||||||
except ImportError:
|
|
||||||
return default
|
|
|
@ -1,190 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Justin Santa Barbara
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
'''
|
|
||||||
JSON related utilities.
|
|
||||||
|
|
||||||
This module provides a few things:
|
|
||||||
|
|
||||||
1) A handy function for getting an object down to something that can be
|
|
||||||
JSON serialized. See to_primitive().
|
|
||||||
|
|
||||||
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
|
||||||
automatically use to_primitive() for you if needed.
|
|
||||||
|
|
||||||
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
|
||||||
is available.
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
import codecs
|
|
||||||
import datetime
|
|
||||||
import functools
|
|
||||||
import inspect
|
|
||||||
import itertools
|
|
||||||
import sys
|
|
||||||
|
|
||||||
if sys.version_info < (2, 7):
|
|
||||||
# On Python <= 2.6, json module is not C boosted, so try to use
|
|
||||||
# simplejson module if available
|
|
||||||
try:
|
|
||||||
import simplejson as json
|
|
||||||
except ImportError:
|
|
||||||
import json
|
|
||||||
else:
|
|
||||||
import json
|
|
||||||
|
|
||||||
import six
|
|
||||||
import six.moves.xmlrpc_client as xmlrpclib
|
|
||||||
|
|
||||||
from ec2api.openstack.common import gettextutils
|
|
||||||
from ec2api.openstack.common import importutils
|
|
||||||
from ec2api.openstack.common import strutils
|
|
||||||
from ec2api.openstack.common import timeutils
|
|
||||||
|
|
||||||
netaddr = importutils.try_import("netaddr")
|
|
||||||
|
|
||||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
|
||||||
inspect.isfunction, inspect.isgeneratorfunction,
|
|
||||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
|
||||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
|
||||||
inspect.isabstract]
|
|
||||||
|
|
||||||
_simple_types = (six.string_types + six.integer_types
|
|
||||||
+ (type(None), bool, float))
|
|
||||||
|
|
||||||
|
|
||||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|
||||||
level=0, max_depth=3):
|
|
||||||
"""Convert a complex object into primitives.
|
|
||||||
|
|
||||||
Handy for JSON serialization. We can optionally handle instances,
|
|
||||||
but since this is a recursive function, we could have cyclical
|
|
||||||
data structures.
|
|
||||||
|
|
||||||
To handle cyclical data structures we could track the actual objects
|
|
||||||
visited in a set, but not all objects are hashable. Instead we just
|
|
||||||
track the depth of the object inspections and don't go too deep.
|
|
||||||
|
|
||||||
Therefore, convert_instances=True is lossy ... be aware.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# handle obvious types first - order of basic types determined by running
|
|
||||||
# full tests on nova project, resulting in the following counts:
|
|
||||||
# 572754 <type 'NoneType'>
|
|
||||||
# 460353 <type 'int'>
|
|
||||||
# 379632 <type 'unicode'>
|
|
||||||
# 274610 <type 'str'>
|
|
||||||
# 199918 <type 'dict'>
|
|
||||||
# 114200 <type 'datetime.datetime'>
|
|
||||||
# 51817 <type 'bool'>
|
|
||||||
# 26164 <type 'list'>
|
|
||||||
# 6491 <type 'float'>
|
|
||||||
# 283 <type 'tuple'>
|
|
||||||
# 19 <type 'long'>
|
|
||||||
if isinstance(value, _simple_types):
|
|
||||||
return value
|
|
||||||
|
|
||||||
if isinstance(value, datetime.datetime):
|
|
||||||
if convert_datetime:
|
|
||||||
return timeutils.strtime(value)
|
|
||||||
else:
|
|
||||||
return value
|
|
||||||
|
|
||||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
|
||||||
# and results in infinite loop when list(value) is called.
|
|
||||||
if type(value) == itertools.count:
|
|
||||||
return six.text_type(value)
|
|
||||||
|
|
||||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
|
||||||
# tests that raise an exception in a mocked method that
|
|
||||||
# has a @wrap_exception with a notifier will fail. If
|
|
||||||
# we up the dependency to 0.5.4 (when it is released) we
|
|
||||||
# can remove this workaround.
|
|
||||||
if getattr(value, '__module__', None) == 'mox':
|
|
||||||
return 'mock'
|
|
||||||
|
|
||||||
if level > max_depth:
|
|
||||||
return '?'
|
|
||||||
|
|
||||||
# The try block may not be necessary after the class check above,
|
|
||||||
# but just in case ...
|
|
||||||
try:
|
|
||||||
recursive = functools.partial(to_primitive,
|
|
||||||
convert_instances=convert_instances,
|
|
||||||
convert_datetime=convert_datetime,
|
|
||||||
level=level,
|
|
||||||
max_depth=max_depth)
|
|
||||||
if isinstance(value, dict):
|
|
||||||
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
|
||||||
elif isinstance(value, (list, tuple)):
|
|
||||||
return [recursive(lv) for lv in value]
|
|
||||||
|
|
||||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
|
||||||
# for our purposes, make it a datetime type which is explicitly
|
|
||||||
# handled
|
|
||||||
if isinstance(value, xmlrpclib.DateTime):
|
|
||||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
|
||||||
|
|
||||||
if convert_datetime and isinstance(value, datetime.datetime):
|
|
||||||
return timeutils.strtime(value)
|
|
||||||
elif isinstance(value, gettextutils.Message):
|
|
||||||
return value.data
|
|
||||||
elif hasattr(value, 'iteritems'):
|
|
||||||
return recursive(dict(value.iteritems()), level=level + 1)
|
|
||||||
elif hasattr(value, '__iter__'):
|
|
||||||
return recursive(list(value))
|
|
||||||
elif convert_instances and hasattr(value, '__dict__'):
|
|
||||||
# Likely an instance of something. Watch for cycles.
|
|
||||||
# Ignore class member vars.
|
|
||||||
return recursive(value.__dict__, level=level + 1)
|
|
||||||
elif netaddr and isinstance(value, netaddr.IPAddress):
|
|
||||||
return six.text_type(value)
|
|
||||||
else:
|
|
||||||
if any(test(value) for test in _nasty_type_tests):
|
|
||||||
return six.text_type(value)
|
|
||||||
return value
|
|
||||||
except TypeError:
|
|
||||||
# Class objects are tricky since they may define something like
|
|
||||||
# __iter__ defined but it isn't callable as list().
|
|
||||||
return six.text_type(value)
|
|
||||||
|
|
||||||
|
|
||||||
def dumps(value, default=to_primitive, **kwargs):
|
|
||||||
return json.dumps(value, default=default, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def dump(obj, fp, *args, **kwargs):
|
|
||||||
return json.dump(obj, fp, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def loads(s, encoding='utf-8', **kwargs):
|
|
||||||
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def load(fp, encoding='utf-8', **kwargs):
|
|
||||||
return json.load(codecs.getreader(encoding)(fp), **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
import anyjson
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
anyjson._modules.append((__name__, 'dumps', TypeError,
|
|
||||||
'loads', ValueError, 'load'))
|
|
||||||
anyjson.force_implementation(__name__)
|
|
|
@ -1,689 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""OpenStack logging handler.
|
|
||||||
|
|
||||||
This module adds to logging functionality by adding the option to specify
|
|
||||||
a context object when calling the various log methods. If the context object
|
|
||||||
is not specified, default formatting is used. Additionally, an instance uuid
|
|
||||||
may be passed as part of the log message, which is intended to make it easier
|
|
||||||
for admins to find messages related to a specific instance.
|
|
||||||
|
|
||||||
It also allows setting of formatting information through conf.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import itertools
|
|
||||||
import logging
|
|
||||||
import logging.config
|
|
||||||
import logging.handlers
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
from six import moves
|
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
from ec2api.openstack.common import importutils
|
|
||||||
from ec2api.openstack.common import jsonutils
|
|
||||||
from ec2api.openstack.common import local
|
|
||||||
|
|
||||||
|
|
||||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
|
||||||
|
|
||||||
|
|
||||||
common_cli_opts = [
|
|
||||||
cfg.BoolOpt('debug',
|
|
||||||
short='d',
|
|
||||||
default=False,
|
|
||||||
help='Print debugging output (set logging level to '
|
|
||||||
'DEBUG instead of default WARNING level).'),
|
|
||||||
cfg.BoolOpt('verbose',
|
|
||||||
short='v',
|
|
||||||
default=False,
|
|
||||||
help='Print more verbose output (set logging level to '
|
|
||||||
'INFO instead of default WARNING level).'),
|
|
||||||
]
|
|
||||||
|
|
||||||
logging_cli_opts = [
|
|
||||||
cfg.StrOpt('log-config-append',
|
|
||||||
metavar='PATH',
|
|
||||||
deprecated_name='log-config',
|
|
||||||
help='The name of a logging configuration file. This file '
|
|
||||||
'is appended to any existing logging configuration '
|
|
||||||
'files. For details about logging configuration files, '
|
|
||||||
'see the Python logging module documentation.'),
|
|
||||||
cfg.StrOpt('log-format',
|
|
||||||
metavar='FORMAT',
|
|
||||||
help='DEPRECATED. '
|
|
||||||
'A logging.Formatter log message format string which may '
|
|
||||||
'use any of the available logging.LogRecord attributes. '
|
|
||||||
'This option is deprecated. Please use '
|
|
||||||
'logging_context_format_string and '
|
|
||||||
'logging_default_format_string instead.'),
|
|
||||||
cfg.StrOpt('log-date-format',
|
|
||||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
|
||||||
metavar='DATE_FORMAT',
|
|
||||||
help='Format string for %%(asctime)s in log records. '
|
|
||||||
'Default: %(default)s .'),
|
|
||||||
cfg.StrOpt('log-file',
|
|
||||||
metavar='PATH',
|
|
||||||
deprecated_name='logfile',
|
|
||||||
help='(Optional) Name of log file to output to. '
|
|
||||||
'If no default is set, logging will go to stdout.'),
|
|
||||||
cfg.StrOpt('log-dir',
|
|
||||||
deprecated_name='logdir',
|
|
||||||
help='(Optional) The base directory used for relative '
|
|
||||||
'--log-file paths.'),
|
|
||||||
cfg.BoolOpt('use-syslog',
|
|
||||||
default=False,
|
|
||||||
help='Use syslog for logging. '
|
|
||||||
'Existing syslog format is DEPRECATED during I, '
|
|
||||||
'and will change in J to honor RFC5424.'),
|
|
||||||
cfg.BoolOpt('use-syslog-rfc-format',
|
|
||||||
# TODO(bogdando) remove or use True after existing
|
|
||||||
# syslog format deprecation in J
|
|
||||||
default=False,
|
|
||||||
help='(Optional) Enables or disables syslog rfc5424 format '
|
|
||||||
'for logging. If enabled, prefixes the MSG part of the '
|
|
||||||
'syslog message with APP-NAME (RFC5424). The '
|
|
||||||
'format without the APP-NAME is deprecated in I, '
|
|
||||||
'and will be removed in J.'),
|
|
||||||
cfg.StrOpt('syslog-log-facility',
|
|
||||||
default='LOG_USER',
|
|
||||||
help='Syslog facility to receive log lines.')
|
|
||||||
]
|
|
||||||
|
|
||||||
generic_log_opts = [
|
|
||||||
cfg.BoolOpt('use_stderr',
|
|
||||||
default=True,
|
|
||||||
help='Log output to standard error.')
|
|
||||||
]
|
|
||||||
|
|
||||||
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
|
||||||
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
|
|
||||||
'oslo.messaging=INFO', 'iso8601=WARN',
|
|
||||||
'requests.packages.urllib3.connectionpool=WARN',
|
|
||||||
'urllib3.connectionpool=WARN', 'websocket=WARN']
|
|
||||||
|
|
||||||
log_opts = [
|
|
||||||
cfg.StrOpt('logging_context_format_string',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
|
||||||
'%(name)s [%(request_id)s %(user_identity)s] '
|
|
||||||
'%(instance)s%(message)s',
|
|
||||||
help='Format string to use for log messages with context.'),
|
|
||||||
cfg.StrOpt('logging_default_format_string',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
|
||||||
'%(name)s [-] %(instance)s%(message)s',
|
|
||||||
help='Format string to use for log messages without context.'),
|
|
||||||
cfg.StrOpt('logging_debug_format_suffix',
|
|
||||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
|
||||||
help='Data to append to log format when level is DEBUG.'),
|
|
||||||
cfg.StrOpt('logging_exception_prefix',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
|
||||||
'%(instance)s',
|
|
||||||
help='Prefix each line of exception output with this format.'),
|
|
||||||
cfg.ListOpt('default_log_levels',
|
|
||||||
default=DEFAULT_LOG_LEVELS,
|
|
||||||
help='List of logger=LEVEL pairs.'),
|
|
||||||
cfg.BoolOpt('publish_errors',
|
|
||||||
default=False,
|
|
||||||
help='Enables or disables publication of error events.'),
|
|
||||||
cfg.BoolOpt('fatal_deprecations',
|
|
||||||
default=False,
|
|
||||||
help='Enables or disables fatal status of deprecations.'),
|
|
||||||
|
|
||||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
|
||||||
# a full instance (and could include more information), and other times we
|
|
||||||
# are just handed a UUID for the instance.
|
|
||||||
cfg.StrOpt('instance_format',
|
|
||||||
default='[instance: %(uuid)s] ',
|
|
||||||
help='The format for an instance that is passed with the log '
|
|
||||||
'message.'),
|
|
||||||
cfg.StrOpt('instance_uuid_format',
|
|
||||||
default='[instance: %(uuid)s] ',
|
|
||||||
help='The format for an instance UUID that is passed with the '
|
|
||||||
'log message.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_cli_opts(common_cli_opts)
|
|
||||||
CONF.register_cli_opts(logging_cli_opts)
|
|
||||||
CONF.register_opts(generic_log_opts)
|
|
||||||
CONF.register_opts(log_opts)
|
|
||||||
|
|
||||||
# our new audit level
|
|
||||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
|
||||||
# module aware of it so it acts like other levels.
|
|
||||||
logging.AUDIT = logging.INFO + 1
|
|
||||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
NullHandler = logging.NullHandler
|
|
||||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
|
||||||
class NullHandler(logging.Handler):
|
|
||||||
def handle(self, record):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def createLock(self):
|
|
||||||
self.lock = None
|
|
||||||
|
|
||||||
|
|
||||||
def _dictify_context(context):
|
|
||||||
if context is None:
|
|
||||||
return None
|
|
||||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
|
||||||
context = context.to_dict()
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
def _get_binary_name():
|
|
||||||
return os.path.basename(inspect.stack()[-1][1])
|
|
||||||
|
|
||||||
|
|
||||||
def _get_log_file_path(binary=None):
|
|
||||||
logfile = CONF.log_file
|
|
||||||
logdir = CONF.log_dir
|
|
||||||
|
|
||||||
if logfile and not logdir:
|
|
||||||
return logfile
|
|
||||||
|
|
||||||
if logfile and logdir:
|
|
||||||
return os.path.join(logdir, logfile)
|
|
||||||
|
|
||||||
if logdir:
|
|
||||||
binary = binary or _get_binary_name()
|
|
||||||
return '%s.log' % (os.path.join(logdir, binary),)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
|
||||||
|
|
||||||
def audit(self, msg, *args, **kwargs):
|
|
||||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class LazyAdapter(BaseLoggerAdapter):
|
|
||||||
def __init__(self, name='unknown', version='unknown'):
|
|
||||||
self._logger = None
|
|
||||||
self.extra = {}
|
|
||||||
self.name = name
|
|
||||||
self.version = version
|
|
||||||
|
|
||||||
@property
|
|
||||||
def logger(self):
|
|
||||||
if not self._logger:
|
|
||||||
self._logger = getLogger(self.name, self.version)
|
|
||||||
if six.PY3:
|
|
||||||
# In Python 3, the code fails because the 'manager' attribute
|
|
||||||
# cannot be found when using a LoggerAdapter as the
|
|
||||||
# underlying logger. Work around this issue.
|
|
||||||
self._logger.manager = self._logger.logger.manager
|
|
||||||
return self._logger
|
|
||||||
|
|
||||||
|
|
||||||
class ContextAdapter(BaseLoggerAdapter):
|
|
||||||
warn = logging.LoggerAdapter.warning
|
|
||||||
|
|
||||||
def __init__(self, logger, project_name, version_string):
|
|
||||||
self.logger = logger
|
|
||||||
self.project = project_name
|
|
||||||
self.version = version_string
|
|
||||||
self._deprecated_messages_sent = dict()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def handlers(self):
|
|
||||||
return self.logger.handlers
|
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
|
||||||
"""Call this method when a deprecated feature is used.
|
|
||||||
|
|
||||||
If the system is configured for fatal deprecations then the message
|
|
||||||
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
|
||||||
be raised.
|
|
||||||
|
|
||||||
Otherwise, the message will be logged (once) at the 'warn' level.
|
|
||||||
|
|
||||||
:raises: :class:`DeprecatedConfig` if the system is configured for
|
|
||||||
fatal deprecations.
|
|
||||||
|
|
||||||
"""
|
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
|
||||||
if CONF.fatal_deprecations:
|
|
||||||
self.critical(stdmsg, *args, **kwargs)
|
|
||||||
raise DeprecatedConfig(msg=stdmsg)
|
|
||||||
|
|
||||||
# Using a list because a tuple with dict can't be stored in a set.
|
|
||||||
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
|
||||||
|
|
||||||
if args in sent_args:
|
|
||||||
# Already logged this message, so don't log it again.
|
|
||||||
return
|
|
||||||
|
|
||||||
sent_args.append(args)
|
|
||||||
self.warn(stdmsg, *args, **kwargs)
|
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
|
||||||
# NOTE(mrodden): catch any Message/other object and
|
|
||||||
# coerce to unicode before they can get
|
|
||||||
# to the python logging and possibly
|
|
||||||
# cause string encoding trouble
|
|
||||||
if not isinstance(msg, six.string_types):
|
|
||||||
msg = six.text_type(msg)
|
|
||||||
|
|
||||||
if 'extra' not in kwargs:
|
|
||||||
kwargs['extra'] = {}
|
|
||||||
extra = kwargs['extra']
|
|
||||||
|
|
||||||
context = kwargs.pop('context', None)
|
|
||||||
if not context:
|
|
||||||
context = getattr(local.store, 'context', None)
|
|
||||||
if context:
|
|
||||||
extra.update(_dictify_context(context))
|
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
|
||||||
instance_uuid = (extra.get('instance_uuid') or
|
|
||||||
kwargs.pop('instance_uuid', None))
|
|
||||||
instance_extra = ''
|
|
||||||
if instance:
|
|
||||||
instance_extra = CONF.instance_format % instance
|
|
||||||
elif instance_uuid:
|
|
||||||
instance_extra = (CONF.instance_uuid_format
|
|
||||||
% {'uuid': instance_uuid})
|
|
||||||
extra['instance'] = instance_extra
|
|
||||||
|
|
||||||
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
|
||||||
|
|
||||||
extra['project'] = self.project
|
|
||||||
extra['version'] = self.version
|
|
||||||
extra['extra'] = extra.copy()
|
|
||||||
return msg, kwargs
|
|
||||||
|
|
||||||
|
|
||||||
class JSONFormatter(logging.Formatter):
|
|
||||||
def __init__(self, fmt=None, datefmt=None):
|
|
||||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
|
||||||
# since logging.config.fileConfig passes it.
|
|
||||||
self.datefmt = datefmt
|
|
||||||
|
|
||||||
def formatException(self, ei, strip_newlines=True):
|
|
||||||
lines = traceback.format_exception(*ei)
|
|
||||||
if strip_newlines:
|
|
||||||
lines = [moves.filter(
|
|
||||||
lambda x: x,
|
|
||||||
line.rstrip().splitlines()) for line in lines]
|
|
||||||
lines = list(itertools.chain(*lines))
|
|
||||||
return lines
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
message = {'message': record.getMessage(),
|
|
||||||
'asctime': self.formatTime(record, self.datefmt),
|
|
||||||
'name': record.name,
|
|
||||||
'msg': record.msg,
|
|
||||||
'args': record.args,
|
|
||||||
'levelname': record.levelname,
|
|
||||||
'levelno': record.levelno,
|
|
||||||
'pathname': record.pathname,
|
|
||||||
'filename': record.filename,
|
|
||||||
'module': record.module,
|
|
||||||
'lineno': record.lineno,
|
|
||||||
'funcname': record.funcName,
|
|
||||||
'created': record.created,
|
|
||||||
'msecs': record.msecs,
|
|
||||||
'relative_created': record.relativeCreated,
|
|
||||||
'thread': record.thread,
|
|
||||||
'thread_name': record.threadName,
|
|
||||||
'process_name': record.processName,
|
|
||||||
'process': record.process,
|
|
||||||
'traceback': None}
|
|
||||||
|
|
||||||
if hasattr(record, 'extra'):
|
|
||||||
message['extra'] = record.extra
|
|
||||||
|
|
||||||
if record.exc_info:
|
|
||||||
message['traceback'] = self.formatException(record.exc_info)
|
|
||||||
|
|
||||||
return jsonutils.dumps(message)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_logging_excepthook(product_name):
|
|
||||||
def logging_excepthook(exc_type, value, tb):
|
|
||||||
extra = {'exc_info': (exc_type, value, tb)}
|
|
||||||
getLogger(product_name).critical(
|
|
||||||
"".join(traceback.format_exception_only(exc_type, value)),
|
|
||||||
**extra)
|
|
||||||
return logging_excepthook
|
|
||||||
|
|
||||||
|
|
||||||
class LogConfigError(Exception):
|
|
||||||
|
|
||||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
|
||||||
|
|
||||||
def __init__(self, log_config, err_msg):
|
|
||||||
self.log_config = log_config
|
|
||||||
self.err_msg = err_msg
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.message % dict(log_config=self.log_config,
|
|
||||||
err_msg=self.err_msg)
|
|
||||||
|
|
||||||
|
|
||||||
def _load_log_config(log_config_append):
|
|
||||||
try:
|
|
||||||
logging.config.fileConfig(log_config_append,
|
|
||||||
disable_existing_loggers=False)
|
|
||||||
except (moves.configparser.Error, KeyError) as exc:
|
|
||||||
raise LogConfigError(log_config_append, six.text_type(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name, version='unknown'):
|
|
||||||
"""Setup logging."""
|
|
||||||
if CONF.log_config_append:
|
|
||||||
_load_log_config(CONF.log_config_append)
|
|
||||||
else:
|
|
||||||
_setup_logging_from_conf(product_name, version)
|
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(logging_context_format_string=None,
|
|
||||||
default_log_levels=None):
|
|
||||||
# Just in case the caller is not setting the
|
|
||||||
# default_log_level. This is insurance because
|
|
||||||
# we introduced the default_log_level parameter
|
|
||||||
# later in a backwards in-compatible change
|
|
||||||
if default_log_levels is not None:
|
|
||||||
cfg.set_defaults(
|
|
||||||
log_opts,
|
|
||||||
default_log_levels=default_log_levels)
|
|
||||||
if logging_context_format_string is not None:
|
|
||||||
cfg.set_defaults(
|
|
||||||
log_opts,
|
|
||||||
logging_context_format_string=logging_context_format_string)
|
|
||||||
|
|
||||||
|
|
||||||
def _find_facility_from_conf():
|
|
||||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
|
||||||
facility = getattr(logging.handlers.SysLogHandler,
|
|
||||||
CONF.syslog_log_facility,
|
|
||||||
None)
|
|
||||||
|
|
||||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
|
||||||
facility = facility_names.get(CONF.syslog_log_facility)
|
|
||||||
|
|
||||||
if facility is None:
|
|
||||||
valid_facilities = facility_names.keys()
|
|
||||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
|
||||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
|
||||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
|
||||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
|
||||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
|
||||||
valid_facilities.extend(consts)
|
|
||||||
raise TypeError(_('syslog facility must be one of: %s') %
|
|
||||||
', '.join("'%s'" % fac
|
|
||||||
for fac in valid_facilities))
|
|
||||||
|
|
||||||
return facility
|
|
||||||
|
|
||||||
|
|
||||||
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.binary_name = _get_binary_name()
|
|
||||||
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
|
||||||
# is 'type' (Python 2.7).
|
|
||||||
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
|
||||||
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
|
||||||
# is 'type' (Python 2.7).
|
|
||||||
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
|
||||||
msg = logging.handlers.SysLogHandler.format(self, record)
|
|
||||||
msg = self.binary_name + ' ' + msg
|
|
||||||
return msg
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_logging_from_conf(project, version):
|
|
||||||
log_root = getLogger(None).logger
|
|
||||||
for handler in log_root.handlers:
|
|
||||||
log_root.removeHandler(handler)
|
|
||||||
|
|
||||||
if CONF.use_syslog:
|
|
||||||
facility = _find_facility_from_conf()
|
|
||||||
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
|
||||||
# after existing syslog format deprecation in J
|
|
||||||
if CONF.use_syslog_rfc_format:
|
|
||||||
syslog = RFCSysLogHandler(address='/dev/log',
|
|
||||||
facility=facility)
|
|
||||||
else:
|
|
||||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
|
||||||
facility=facility)
|
|
||||||
log_root.addHandler(syslog)
|
|
||||||
|
|
||||||
logpath = _get_log_file_path()
|
|
||||||
if logpath:
|
|
||||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
|
||||||
log_root.addHandler(filelog)
|
|
||||||
|
|
||||||
if CONF.use_stderr:
|
|
||||||
streamlog = ColorHandler()
|
|
||||||
log_root.addHandler(streamlog)
|
|
||||||
|
|
||||||
elif not logpath:
|
|
||||||
# pass sys.stdout as a positional argument
|
|
||||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
|
||||||
streamlog = logging.StreamHandler(sys.stdout)
|
|
||||||
log_root.addHandler(streamlog)
|
|
||||||
|
|
||||||
if CONF.publish_errors:
|
|
||||||
try:
|
|
||||||
handler = importutils.import_object(
|
|
||||||
"ec2api.openstack.common.log_handler.PublishErrorsHandler",
|
|
||||||
logging.ERROR)
|
|
||||||
except ImportError:
|
|
||||||
handler = importutils.import_object(
|
|
||||||
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
|
|
||||||
logging.ERROR)
|
|
||||||
log_root.addHandler(handler)
|
|
||||||
|
|
||||||
datefmt = CONF.log_date_format
|
|
||||||
for handler in log_root.handlers:
|
|
||||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
|
||||||
# should be deprecated in favor of context aware formatting.
|
|
||||||
if CONF.log_format:
|
|
||||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
|
||||||
datefmt=datefmt))
|
|
||||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
|
||||||
'be removed in the next release')
|
|
||||||
else:
|
|
||||||
handler.setFormatter(ContextFormatter(project=project,
|
|
||||||
version=version,
|
|
||||||
datefmt=datefmt))
|
|
||||||
|
|
||||||
if CONF.debug:
|
|
||||||
log_root.setLevel(logging.DEBUG)
|
|
||||||
elif CONF.verbose:
|
|
||||||
log_root.setLevel(logging.INFO)
|
|
||||||
else:
|
|
||||||
log_root.setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
for pair in CONF.default_log_levels:
|
|
||||||
mod, _sep, level_name = pair.partition('=')
|
|
||||||
logger = logging.getLogger(mod)
|
|
||||||
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
|
|
||||||
# to integer code.
|
|
||||||
if sys.version_info < (2, 7):
|
|
||||||
level = logging.getLevelName(level_name)
|
|
||||||
logger.setLevel(level)
|
|
||||||
else:
|
|
||||||
logger.setLevel(level_name)
|
|
||||||
|
|
||||||
|
|
||||||
_loggers = {}
|
|
||||||
|
|
||||||
|
|
||||||
def getLogger(name='unknown', version='unknown'):
|
|
||||||
if name not in _loggers:
|
|
||||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
|
||||||
name,
|
|
||||||
version)
|
|
||||||
return _loggers[name]
|
|
||||||
|
|
||||||
|
|
||||||
def getLazyLogger(name='unknown', version='unknown'):
|
|
||||||
"""Returns lazy logger.
|
|
||||||
|
|
||||||
Creates a pass-through logger that does not create the real logger
|
|
||||||
until it is really needed and delegates all calls to the real logger
|
|
||||||
once it is created.
|
|
||||||
"""
|
|
||||||
return LazyAdapter(name, version)
|
|
||||||
|
|
||||||
|
|
||||||
class WritableLogger(object):
|
|
||||||
"""A thin wrapper that responds to `write` and logs."""
|
|
||||||
|
|
||||||
def __init__(self, logger, level=logging.INFO):
|
|
||||||
self.logger = logger
|
|
||||||
self.level = level
|
|
||||||
|
|
||||||
def write(self, msg):
|
|
||||||
self.logger.log(self.level, msg.rstrip())
|
|
||||||
|
|
||||||
|
|
||||||
class ContextFormatter(logging.Formatter):
|
|
||||||
"""A context.RequestContext aware formatter configured through flags.
|
|
||||||
|
|
||||||
The flags used to set format strings are: logging_context_format_string
|
|
||||||
and logging_default_format_string. You can also specify
|
|
||||||
logging_debug_format_suffix to append extra formatting if the log level is
|
|
||||||
debug.
|
|
||||||
|
|
||||||
For information about what variables are available for the formatter see:
|
|
||||||
http://docs.python.org/library/logging.html#formatter
|
|
||||||
|
|
||||||
If available, uses the context value stored in TLS - local.store.context
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
"""Initialize ContextFormatter instance
|
|
||||||
|
|
||||||
Takes additional keyword arguments which can be used in the message
|
|
||||||
format string.
|
|
||||||
|
|
||||||
:keyword project: project name
|
|
||||||
:type project: string
|
|
||||||
:keyword version: project version
|
|
||||||
:type version: string
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.project = kwargs.pop('project', 'unknown')
|
|
||||||
self.version = kwargs.pop('version', 'unknown')
|
|
||||||
|
|
||||||
logging.Formatter.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
"""Uses contextstring if request_id is set, otherwise default."""
|
|
||||||
|
|
||||||
# store project info
|
|
||||||
record.project = self.project
|
|
||||||
record.version = self.version
|
|
||||||
|
|
||||||
# store request info
|
|
||||||
context = getattr(local.store, 'context', None)
|
|
||||||
if context:
|
|
||||||
d = _dictify_context(context)
|
|
||||||
for k, v in d.items():
|
|
||||||
setattr(record, k, v)
|
|
||||||
|
|
||||||
# NOTE(sdague): default the fancier formatting params
|
|
||||||
# to an empty string so we don't throw an exception if
|
|
||||||
# they get used
|
|
||||||
for key in ('instance', 'color', 'user_identity'):
|
|
||||||
if key not in record.__dict__:
|
|
||||||
record.__dict__[key] = ''
|
|
||||||
|
|
||||||
if record.__dict__.get('request_id'):
|
|
||||||
fmt = CONF.logging_context_format_string
|
|
||||||
else:
|
|
||||||
fmt = CONF.logging_default_format_string
|
|
||||||
|
|
||||||
if (record.levelno == logging.DEBUG and
|
|
||||||
CONF.logging_debug_format_suffix):
|
|
||||||
fmt += " " + CONF.logging_debug_format_suffix
|
|
||||||
|
|
||||||
if sys.version_info < (3, 2):
|
|
||||||
self._fmt = fmt
|
|
||||||
else:
|
|
||||||
self._style = logging.PercentStyle(fmt)
|
|
||||||
self._fmt = self._style._fmt
|
|
||||||
# Cache this on the record, Logger will respect our formatted copy
|
|
||||||
if record.exc_info:
|
|
||||||
record.exc_text = self.formatException(record.exc_info, record)
|
|
||||||
return logging.Formatter.format(self, record)
|
|
||||||
|
|
||||||
def formatException(self, exc_info, record=None):
|
|
||||||
"""Format exception output with CONF.logging_exception_prefix."""
|
|
||||||
if not record:
|
|
||||||
return logging.Formatter.formatException(self, exc_info)
|
|
||||||
|
|
||||||
stringbuffer = moves.StringIO()
|
|
||||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
|
||||||
None, stringbuffer)
|
|
||||||
lines = stringbuffer.getvalue().split('\n')
|
|
||||||
stringbuffer.close()
|
|
||||||
|
|
||||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
|
||||||
record.asctime = self.formatTime(record, self.datefmt)
|
|
||||||
|
|
||||||
formatted_lines = []
|
|
||||||
for line in lines:
|
|
||||||
pl = CONF.logging_exception_prefix % record.__dict__
|
|
||||||
fl = '%s%s' % (pl, line)
|
|
||||||
formatted_lines.append(fl)
|
|
||||||
return '\n'.join(formatted_lines)
|
|
||||||
|
|
||||||
|
|
||||||
class ColorHandler(logging.StreamHandler):
|
|
||||||
LEVEL_COLORS = {
|
|
||||||
logging.DEBUG: '\033[00;32m', # GREEN
|
|
||||||
logging.INFO: '\033[00;36m', # CYAN
|
|
||||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
|
||||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
|
||||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
|
||||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
|
||||||
}
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
record.color = self.LEVEL_COLORS[record.levelno]
|
|
||||||
return logging.StreamHandler.format(self, record)
|
|
||||||
|
|
||||||
|
|
||||||
class DeprecatedConfig(Exception):
|
|
||||||
message = _("Fatal call to deprecated config: %(msg)s")
|
|
||||||
|
|
||||||
def __init__(self, msg):
|
|
||||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
|
@ -15,14 +15,14 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _LE, _LW
|
from ec2api.openstack.common._i18n import _LE, _LW
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -84,9 +84,9 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
||||||
break
|
break
|
||||||
delay = end - start - interval
|
delay = end - start - interval
|
||||||
if delay > 0:
|
if delay > 0:
|
||||||
LOG.warn(_LW('task %(func_name)s run outlasted '
|
LOG.warn(_LW('task %(func_name)r run outlasted '
|
||||||
'interval by %(delay).2f sec'),
|
'interval by %(delay).2f sec'),
|
||||||
{'func_name': repr(self.f), 'delay': delay})
|
{'func_name': self.f, 'delay': delay})
|
||||||
greenthread.sleep(-delay if delay < 0 else 0)
|
greenthread.sleep(-delay if delay < 0 else 0)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
|
@ -127,9 +127,9 @@ class DynamicLoopingCall(LoopingCallBase):
|
||||||
|
|
||||||
if periodic_interval_max is not None:
|
if periodic_interval_max is not None:
|
||||||
idle = min(idle, periodic_interval_max)
|
idle = min(idle, periodic_interval_max)
|
||||||
LOG.debug('Dynamic looping call %(func_name)s sleeping '
|
LOG.debug('Dynamic looping call %(func_name)r sleeping '
|
||||||
'for %(idle).02f seconds',
|
'for %(idle).02f seconds',
|
||||||
{'func_name': repr(self.f), 'idle': idle})
|
{'func_name': self.f, 'idle': idle})
|
||||||
greenthread.sleep(idle)
|
greenthread.sleep(idle)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
"""Generic Node base class for all workers that run on hosts."""
|
"""Generic Node base class for all workers that run on hosts."""
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
import logging as std_logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
|
@ -35,17 +35,14 @@ except ImportError:
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import event
|
from eventlet import event
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api.openstack.common import eventlet_backdoor
|
from ec2api.openstack.common import eventlet_backdoor
|
||||||
from ec2api.openstack.common.gettextutils import _LE, _LI, _LW
|
from ec2api.openstack.common._i18n import _LE, _LI, _LW
|
||||||
from ec2api.openstack.common import importutils
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api.openstack.common import systemd
|
from ec2api.openstack.common import systemd
|
||||||
from ec2api.openstack.common import threadgroup
|
from ec2api.openstack.common import threadgroup
|
||||||
|
|
||||||
|
|
||||||
rpc = importutils.try_import('ec2api.openstack.common.rpc')
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -165,7 +162,7 @@ class ServiceLauncher(Launcher):
|
||||||
signo = 0
|
signo = 0
|
||||||
|
|
||||||
LOG.debug('Full set of CONF:')
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if ready_callback:
|
if ready_callback:
|
||||||
|
@ -180,12 +177,6 @@ class ServiceLauncher(Launcher):
|
||||||
status = exc.code
|
status = exc.code
|
||||||
finally:
|
finally:
|
||||||
self.stop()
|
self.stop()
|
||||||
if rpc:
|
|
||||||
try:
|
|
||||||
rpc.cleanup()
|
|
||||||
except Exception:
|
|
||||||
# We're shutting down, so it doesn't matter at this point.
|
|
||||||
LOG.exception(_LE('Exception during rpc cleanup.'))
|
|
||||||
|
|
||||||
return status, signo
|
return status, signo
|
||||||
|
|
||||||
|
@ -385,7 +376,7 @@ class ProcessLauncher(object):
|
||||||
|
|
||||||
systemd.notify_once()
|
systemd.notify_once()
|
||||||
LOG.debug('Full set of CONF:')
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
|
@ -405,7 +396,7 @@ class ProcessLauncher(object):
|
||||||
self.running = True
|
self.running = True
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
except eventlet.greenlet.GreenletExit:
|
except eventlet.greenlet.GreenletExit:
|
||||||
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||||
|
|
||||||
self.stop()
|
self.stop()
|
||||||
|
|
||||||
|
@ -442,8 +433,8 @@ class Service(object):
|
||||||
def start(self):
|
def start(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def stop(self):
|
def stop(self, graceful=False):
|
||||||
self.tg.stop()
|
self.tg.stop(graceful)
|
||||||
self.tg.wait()
|
self.tg.wait()
|
||||||
# Signal that service cleanup is done:
|
# Signal that service cleanup is done:
|
||||||
if not self._done.ready():
|
if not self._done.ready():
|
||||||
|
|
|
@ -1,295 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
System-level utilities and helper functions.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import math
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import unicodedata
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
UNIT_PREFIX_EXPONENT = {
|
|
||||||
'k': 1,
|
|
||||||
'K': 1,
|
|
||||||
'Ki': 1,
|
|
||||||
'M': 2,
|
|
||||||
'Mi': 2,
|
|
||||||
'G': 3,
|
|
||||||
'Gi': 3,
|
|
||||||
'T': 4,
|
|
||||||
'Ti': 4,
|
|
||||||
}
|
|
||||||
UNIT_SYSTEM_INFO = {
|
|
||||||
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
|
|
||||||
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
|
|
||||||
}
|
|
||||||
|
|
||||||
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
|
||||||
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
|
||||||
|
|
||||||
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
|
|
||||||
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(flaper87): The following 3 globals are used by `mask_password`
|
|
||||||
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
|
||||||
|
|
||||||
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
|
||||||
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
|
||||||
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
|
||||||
# for XML and JSON automatically.
|
|
||||||
_SANITIZE_PATTERNS = []
|
|
||||||
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
|
||||||
r'(<%(key)s>).*?(</%(key)s>)',
|
|
||||||
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
|
||||||
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
|
|
||||||
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
|
|
||||||
'.*?([\'"])',
|
|
||||||
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
|
|
||||||
|
|
||||||
for key in _SANITIZE_KEYS:
|
|
||||||
for pattern in _FORMAT_PATTERNS:
|
|
||||||
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
|
||||||
_SANITIZE_PATTERNS.append(reg_ex)
|
|
||||||
|
|
||||||
|
|
||||||
def int_from_bool_as_string(subject):
|
|
||||||
"""Interpret a string as a boolean and return either 1 or 0.
|
|
||||||
|
|
||||||
Any string value in:
|
|
||||||
|
|
||||||
('True', 'true', 'On', 'on', '1')
|
|
||||||
|
|
||||||
is interpreted as a boolean True.
|
|
||||||
|
|
||||||
Useful for JSON-decoded stuff and config file parsing
|
|
||||||
"""
|
|
||||||
return bool_from_string(subject) and 1 or 0
|
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(subject, strict=False, default=False):
|
|
||||||
"""Interpret a string as a boolean.
|
|
||||||
|
|
||||||
A case-insensitive match is performed such that strings matching 't',
|
|
||||||
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
|
||||||
`strict=False`, anything else returns the value specified by 'default'.
|
|
||||||
|
|
||||||
Useful for JSON-decoded stuff and config file parsing.
|
|
||||||
|
|
||||||
If `strict=True`, unrecognized values, including None, will raise a
|
|
||||||
ValueError which is useful when parsing values passed in from an API call.
|
|
||||||
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
|
|
||||||
"""
|
|
||||||
if not isinstance(subject, six.string_types):
|
|
||||||
subject = six.text_type(subject)
|
|
||||||
|
|
||||||
lowered = subject.strip().lower()
|
|
||||||
|
|
||||||
if lowered in TRUE_STRINGS:
|
|
||||||
return True
|
|
||||||
elif lowered in FALSE_STRINGS:
|
|
||||||
return False
|
|
||||||
elif strict:
|
|
||||||
acceptable = ', '.join(
|
|
||||||
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
|
|
||||||
msg = _("Unrecognized value '%(val)s', acceptable values are:"
|
|
||||||
" %(acceptable)s") % {'val': subject,
|
|
||||||
'acceptable': acceptable}
|
|
||||||
raise ValueError(msg)
|
|
||||||
else:
|
|
||||||
return default
|
|
||||||
|
|
||||||
|
|
||||||
def safe_decode(text, incoming=None, errors='strict'):
|
|
||||||
"""Decodes incoming text/bytes string using `incoming` if they're not
|
|
||||||
already unicode.
|
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
|
||||||
:param errors: Errors handling policy. See here for valid
|
|
||||||
values http://docs.python.org/2/library/codecs.html
|
|
||||||
:returns: text or a unicode `incoming` encoded
|
|
||||||
representation of it.
|
|
||||||
:raises TypeError: If text is not an instance of str
|
|
||||||
"""
|
|
||||||
if not isinstance(text, (six.string_types, six.binary_type)):
|
|
||||||
raise TypeError("%s can't be decoded" % type(text))
|
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
|
||||||
return text
|
|
||||||
|
|
||||||
if not incoming:
|
|
||||||
incoming = (sys.stdin.encoding or
|
|
||||||
sys.getdefaultencoding())
|
|
||||||
|
|
||||||
try:
|
|
||||||
return text.decode(incoming, errors)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
# Note(flaper87) If we get here, it means that
|
|
||||||
# sys.stdin.encoding / sys.getdefaultencoding
|
|
||||||
# didn't return a suitable encoding to decode
|
|
||||||
# text. This happens mostly when global LANG
|
|
||||||
# var is not set correctly and there's no
|
|
||||||
# default encoding. In this case, most likely
|
|
||||||
# python will use ASCII or ANSI encoders as
|
|
||||||
# default encodings but they won't be capable
|
|
||||||
# of decoding non-ASCII characters.
|
|
||||||
#
|
|
||||||
# Also, UTF-8 is being used since it's an ASCII
|
|
||||||
# extension.
|
|
||||||
return text.decode('utf-8', errors)
|
|
||||||
|
|
||||||
|
|
||||||
def safe_encode(text, incoming=None,
|
|
||||||
encoding='utf-8', errors='strict'):
|
|
||||||
"""Encodes incoming text/bytes string using `encoding`.
|
|
||||||
|
|
||||||
If incoming is not specified, text is expected to be encoded with
|
|
||||||
current python's default encoding. (`sys.getdefaultencoding`)
|
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
|
||||||
:param encoding: Expected encoding for text (Default UTF-8)
|
|
||||||
:param errors: Errors handling policy. See here for valid
|
|
||||||
values http://docs.python.org/2/library/codecs.html
|
|
||||||
:returns: text or a bytestring `encoding` encoded
|
|
||||||
representation of it.
|
|
||||||
:raises TypeError: If text is not an instance of str
|
|
||||||
"""
|
|
||||||
if not isinstance(text, (six.string_types, six.binary_type)):
|
|
||||||
raise TypeError("%s can't be encoded" % type(text))
|
|
||||||
|
|
||||||
if not incoming:
|
|
||||||
incoming = (sys.stdin.encoding or
|
|
||||||
sys.getdefaultencoding())
|
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
|
||||||
return text.encode(encoding, errors)
|
|
||||||
elif text and encoding != incoming:
|
|
||||||
# Decode text before encoding it with `encoding`
|
|
||||||
text = safe_decode(text, incoming, errors)
|
|
||||||
return text.encode(encoding, errors)
|
|
||||||
else:
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def string_to_bytes(text, unit_system='IEC', return_int=False):
|
|
||||||
"""Converts a string into an float representation of bytes.
|
|
||||||
|
|
||||||
The units supported for IEC ::
|
|
||||||
|
|
||||||
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
|
|
||||||
KB, KiB, MB, MiB, GB, GiB, TB, TiB
|
|
||||||
|
|
||||||
The units supported for SI ::
|
|
||||||
|
|
||||||
kb(it), Mb(it), Gb(it), Tb(it)
|
|
||||||
kB, MB, GB, TB
|
|
||||||
|
|
||||||
Note that the SI unit system does not support capital letter 'K'
|
|
||||||
|
|
||||||
:param text: String input for bytes size conversion.
|
|
||||||
:param unit_system: Unit system for byte size conversion.
|
|
||||||
:param return_int: If True, returns integer representation of text
|
|
||||||
in bytes. (default: decimal)
|
|
||||||
:returns: Numerical representation of text in bytes.
|
|
||||||
:raises ValueError: If text has an invalid value.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
|
|
||||||
except KeyError:
|
|
||||||
msg = _('Invalid unit system: "%s"') % unit_system
|
|
||||||
raise ValueError(msg)
|
|
||||||
match = reg_ex.match(text)
|
|
||||||
if match:
|
|
||||||
magnitude = float(match.group(1))
|
|
||||||
unit_prefix = match.group(2)
|
|
||||||
if match.group(3) in ['b', 'bit']:
|
|
||||||
magnitude /= 8
|
|
||||||
else:
|
|
||||||
msg = _('Invalid string format: %s') % text
|
|
||||||
raise ValueError(msg)
|
|
||||||
if not unit_prefix:
|
|
||||||
res = magnitude
|
|
||||||
else:
|
|
||||||
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
|
|
||||||
if return_int:
|
|
||||||
return int(math.ceil(res))
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def to_slug(value, incoming=None, errors="strict"):
|
|
||||||
"""Normalize string.
|
|
||||||
|
|
||||||
Convert to lowercase, remove non-word characters, and convert spaces
|
|
||||||
to hyphens.
|
|
||||||
|
|
||||||
Inspired by Django's `slugify` filter.
|
|
||||||
|
|
||||||
:param value: Text to slugify
|
|
||||||
:param incoming: Text's current encoding
|
|
||||||
:param errors: Errors handling policy. See here for valid
|
|
||||||
values http://docs.python.org/2/library/codecs.html
|
|
||||||
:returns: slugified unicode representation of `value`
|
|
||||||
:raises TypeError: If text is not an instance of str
|
|
||||||
"""
|
|
||||||
value = safe_decode(value, incoming, errors)
|
|
||||||
# NOTE(aababilov): no need to use safe_(encode|decode) here:
|
|
||||||
# encodings are always "ascii", error handling is always "ignore"
|
|
||||||
# and types are always known (first: unicode; second: str)
|
|
||||||
value = unicodedata.normalize("NFKD", value).encode(
|
|
||||||
"ascii", "ignore").decode("ascii")
|
|
||||||
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
|
|
||||||
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
|
||||||
|
|
||||||
|
|
||||||
def mask_password(message, secret="***"):
|
|
||||||
"""Replace password with 'secret' in message.
|
|
||||||
|
|
||||||
:param message: The string which includes security information.
|
|
||||||
:param secret: value with which to replace passwords.
|
|
||||||
:returns: The unicode value of message with the password fields masked.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
>>> mask_password("'adminPass' : 'aaaaa'")
|
|
||||||
"'adminPass' : '***'"
|
|
||||||
>>> mask_password("'admin_pass' : 'aaaaa'")
|
|
||||||
"'admin_pass' : '***'"
|
|
||||||
>>> mask_password('"password" : "aaaaa"')
|
|
||||||
'"password" : "***"'
|
|
||||||
>>> mask_password("'original_password' : 'aaaaa'")
|
|
||||||
"'original_password' : '***'"
|
|
||||||
>>> mask_password("u'original_password' : u'aaaaa'")
|
|
||||||
"u'original_password' : u'***'"
|
|
||||||
"""
|
|
||||||
message = six.text_type(message)
|
|
||||||
|
|
||||||
# NOTE(ldbragst): Check to see if anything in message contains any key
|
|
||||||
# specified in _SANITIZE_KEYS, if not then just return the message since
|
|
||||||
# we don't have to mask any passwords.
|
|
||||||
if not any(key in message for key in _SANITIZE_KEYS):
|
|
||||||
return message
|
|
||||||
|
|
||||||
secret = r'\g<1>' + secret + r'\g<2>'
|
|
||||||
for pattern in _SANITIZE_PATTERNS:
|
|
||||||
message = re.sub(pattern, secret, message)
|
|
||||||
return message
|
|
|
@ -16,12 +16,11 @@
|
||||||
Helper module for systemd service readiness notification.
|
Helper module for systemd service readiness notification.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
@ -11,12 +11,12 @@
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import logging
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
|
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api.openstack.common import loopingcall
|
from ec2api.openstack.common import loopingcall
|
||||||
|
|
||||||
|
|
||||||
|
@ -96,6 +96,8 @@ class ThreadGroup(object):
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
x.stop()
|
x.stop()
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
pass
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
|
|
||||||
|
|
|
@ -1,210 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Time related utilities and helper functions.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import calendar
|
|
||||||
import datetime
|
|
||||||
import time
|
|
||||||
|
|
||||||
import iso8601
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
# ISO 8601 extended time format with microseconds
|
|
||||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
|
||||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
|
||||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
|
||||||
|
|
||||||
|
|
||||||
def isotime(at=None, subsecond=False):
|
|
||||||
"""Stringify time in ISO 8601 format."""
|
|
||||||
if not at:
|
|
||||||
at = utcnow()
|
|
||||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
|
||||||
if not subsecond
|
|
||||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
|
||||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
|
||||||
st += ('Z' if tz == 'UTC' else tz)
|
|
||||||
return st
|
|
||||||
|
|
||||||
|
|
||||||
def parse_isotime(timestr):
|
|
||||||
"""Parse time from ISO 8601 format."""
|
|
||||||
try:
|
|
||||||
return iso8601.parse_date(timestr)
|
|
||||||
except iso8601.ParseError as e:
|
|
||||||
raise ValueError(six.text_type(e))
|
|
||||||
except TypeError as e:
|
|
||||||
raise ValueError(six.text_type(e))
|
|
||||||
|
|
||||||
|
|
||||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
|
||||||
"""Returns formatted utcnow."""
|
|
||||||
if not at:
|
|
||||||
at = utcnow()
|
|
||||||
return at.strftime(fmt)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
|
||||||
"""Turn a formatted time back into a datetime."""
|
|
||||||
return datetime.datetime.strptime(timestr, fmt)
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_time(timestamp):
|
|
||||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
|
||||||
offset = timestamp.utcoffset()
|
|
||||||
if offset is None:
|
|
||||||
return timestamp
|
|
||||||
return timestamp.replace(tzinfo=None) - offset
|
|
||||||
|
|
||||||
|
|
||||||
def is_older_than(before, seconds):
|
|
||||||
"""Return True if before is older than seconds."""
|
|
||||||
if isinstance(before, six.string_types):
|
|
||||||
before = parse_strtime(before).replace(tzinfo=None)
|
|
||||||
else:
|
|
||||||
before = before.replace(tzinfo=None)
|
|
||||||
|
|
||||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
|
||||||
|
|
||||||
|
|
||||||
def is_newer_than(after, seconds):
|
|
||||||
"""Return True if after is newer than seconds."""
|
|
||||||
if isinstance(after, six.string_types):
|
|
||||||
after = parse_strtime(after).replace(tzinfo=None)
|
|
||||||
else:
|
|
||||||
after = after.replace(tzinfo=None)
|
|
||||||
|
|
||||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
|
||||||
|
|
||||||
|
|
||||||
def utcnow_ts():
|
|
||||||
"""Timestamp version of our utcnow function."""
|
|
||||||
if utcnow.override_time is None:
|
|
||||||
# NOTE(kgriffs): This is several times faster
|
|
||||||
# than going through calendar.timegm(...)
|
|
||||||
return int(time.time())
|
|
||||||
|
|
||||||
return calendar.timegm(utcnow().timetuple())
|
|
||||||
|
|
||||||
|
|
||||||
def utcnow():
|
|
||||||
"""Overridable version of utils.utcnow."""
|
|
||||||
if utcnow.override_time:
|
|
||||||
try:
|
|
||||||
return utcnow.override_time.pop(0)
|
|
||||||
except AttributeError:
|
|
||||||
return utcnow.override_time
|
|
||||||
return datetime.datetime.utcnow()
|
|
||||||
|
|
||||||
|
|
||||||
def iso8601_from_timestamp(timestamp):
|
|
||||||
"""Returns an iso8601 formatted date from timestamp."""
|
|
||||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
|
||||||
|
|
||||||
|
|
||||||
utcnow.override_time = None
|
|
||||||
|
|
||||||
|
|
||||||
def set_time_override(override_time=None):
|
|
||||||
"""Overrides utils.utcnow.
|
|
||||||
|
|
||||||
Make it return a constant time or a list thereof, one at a time.
|
|
||||||
|
|
||||||
:param override_time: datetime instance or list thereof. If not
|
|
||||||
given, defaults to the current UTC time.
|
|
||||||
"""
|
|
||||||
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
|
||||||
|
|
||||||
|
|
||||||
def advance_time_delta(timedelta):
|
|
||||||
"""Advance overridden time using a datetime.timedelta."""
|
|
||||||
assert utcnow.override_time is not None
|
|
||||||
try:
|
|
||||||
for dt in utcnow.override_time:
|
|
||||||
dt += timedelta
|
|
||||||
except TypeError:
|
|
||||||
utcnow.override_time += timedelta
|
|
||||||
|
|
||||||
|
|
||||||
def advance_time_seconds(seconds):
|
|
||||||
"""Advance overridden time by seconds."""
|
|
||||||
advance_time_delta(datetime.timedelta(0, seconds))
|
|
||||||
|
|
||||||
|
|
||||||
def clear_time_override():
|
|
||||||
"""Remove the overridden time."""
|
|
||||||
utcnow.override_time = None
|
|
||||||
|
|
||||||
|
|
||||||
def marshall_now(now=None):
|
|
||||||
"""Make an rpc-safe datetime with microseconds.
|
|
||||||
|
|
||||||
Note: tzinfo is stripped, but not required for relative times.
|
|
||||||
"""
|
|
||||||
if not now:
|
|
||||||
now = utcnow()
|
|
||||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
|
||||||
minute=now.minute, second=now.second,
|
|
||||||
microsecond=now.microsecond)
|
|
||||||
|
|
||||||
|
|
||||||
def unmarshall_time(tyme):
|
|
||||||
"""Unmarshall a datetime dict."""
|
|
||||||
return datetime.datetime(day=tyme['day'],
|
|
||||||
month=tyme['month'],
|
|
||||||
year=tyme['year'],
|
|
||||||
hour=tyme['hour'],
|
|
||||||
minute=tyme['minute'],
|
|
||||||
second=tyme['second'],
|
|
||||||
microsecond=tyme['microsecond'])
|
|
||||||
|
|
||||||
|
|
||||||
def delta_seconds(before, after):
|
|
||||||
"""Return the difference between two timing objects.
|
|
||||||
|
|
||||||
Compute the difference in seconds between two date, time, or
|
|
||||||
datetime objects (as a float, to microsecond resolution).
|
|
||||||
"""
|
|
||||||
delta = after - before
|
|
||||||
return total_seconds(delta)
|
|
||||||
|
|
||||||
|
|
||||||
def total_seconds(delta):
|
|
||||||
"""Return the total seconds of datetime.timedelta object.
|
|
||||||
|
|
||||||
Compute total seconds of datetime.timedelta, datetime.timedelta
|
|
||||||
doesn't have method total_seconds in Python2.6, calculate it manually.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return delta.total_seconds()
|
|
||||||
except AttributeError:
|
|
||||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
|
||||||
float(delta.microseconds) / (10 ** 6))
|
|
||||||
|
|
||||||
|
|
||||||
def is_soon(dt, window):
|
|
||||||
"""Determines if time is going to happen in the next window seconds.
|
|
||||||
|
|
||||||
:param dt: the time
|
|
||||||
:param window: minimum seconds to remain to consider the time not soon
|
|
||||||
|
|
||||||
:return: True if expiration is within the given duration
|
|
||||||
"""
|
|
||||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
|
||||||
return normalize_time(dt) <= soon
|
|
|
@ -15,7 +15,7 @@
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
path_opts = [
|
path_opts = [
|
||||||
cfg.StrOpt('pybasedir',
|
cfg.StrOpt('pybasedir',
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
|
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
from ec2api.openstack.common import service
|
from ec2api.openstack.common import service
|
||||||
from ec2api import wsgi
|
from ec2api import wsgi
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ from __future__ import print_function
|
||||||
import logging as std_logging
|
import logging as std_logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
|
|
||||||
import ec2api.api.apirequest
|
import ec2api.api.apirequest
|
||||||
|
|
|
@ -17,7 +17,7 @@ import copy
|
||||||
import mock
|
import mock
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api.api import address
|
from ec2api.api import address
|
||||||
from ec2api.tests.unit import base
|
from ec2api.tests.unit import base
|
||||||
|
|
|
@ -12,12 +12,13 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
from sqlalchemy import event
|
from sqlalchemy import event
|
||||||
from sqlalchemy.orm import exc as orm_exception
|
from sqlalchemy.orm import exc as orm_exception
|
||||||
|
|
||||||
from ec2api.api import validator
|
from ec2api.api import validator
|
||||||
|
from ec2api import config
|
||||||
from ec2api import context as ec2_context
|
from ec2api import context as ec2_context
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api.db import migration
|
from ec2api.db import migration
|
||||||
|
@ -35,6 +36,7 @@ class DbApiTestCase(test_base.BaseTestCase):
|
||||||
super(DbApiTestCase, cls).setUpClass()
|
super(DbApiTestCase, cls).setUpClass()
|
||||||
conf = cfg.CONF
|
conf = cfg.CONF
|
||||||
try:
|
try:
|
||||||
|
config.parse_args([], default_config_files=[])
|
||||||
conf.set_override('connection', 'sqlite://', group='database')
|
conf.set_override('connection', 'sqlite://', group='database')
|
||||||
conf.set_override('sqlite_synchronous', False, group='database')
|
conf.set_override('sqlite_synchronous', False, group='database')
|
||||||
|
|
||||||
|
@ -49,11 +51,6 @@ class DbApiTestCase(test_base.BaseTestCase):
|
||||||
# also stops it from emitting COMMIT before any DDL.
|
# also stops it from emitting COMMIT before any DDL.
|
||||||
dbapi_connection.isolation_level = None
|
dbapi_connection.isolation_level = None
|
||||||
|
|
||||||
@event.listens_for(engine, "begin")
|
|
||||||
def do_begin(conn):
|
|
||||||
# emit our own BEGIN
|
|
||||||
conn.execute("BEGIN")
|
|
||||||
|
|
||||||
conn = engine.connect()
|
conn = engine.connect()
|
||||||
migration.db_sync()
|
migration.db_sync()
|
||||||
cls.DB_SCHEMA = "".join(line
|
cls.DB_SCHEMA = "".join(line
|
||||||
|
|
|
@ -19,7 +19,7 @@ import tempfile
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
|
|
||||||
from ec2api.api import image as image_api
|
from ec2api.api import image as image_api
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
from neutronclient.common import exceptions as neutron_exception
|
from neutronclient.common import exceptions as neutron_exception
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from ec2api.tests.unit import base
|
from ec2api.tests.unit import base
|
||||||
from ec2api.tests.unit import fakes
|
from ec2api.tests.unit import fakes
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
import testtools
|
import testtools
|
||||||
import webob
|
import webob
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
import requests
|
import requests
|
||||||
import webob.dec
|
import webob.dec
|
||||||
|
|
|
@ -21,10 +21,10 @@ import shutil
|
||||||
import socket
|
import socket
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
utils_opts = [
|
utils_opts = [
|
||||||
cfg.StrOpt('tempdir',
|
cfg.StrOpt('tempdir',
|
||||||
|
|
|
@ -23,7 +23,9 @@ import sys
|
||||||
|
|
||||||
import eventlet.wsgi
|
import eventlet.wsgi
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_log import loggers
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from paste import deploy
|
from paste import deploy
|
||||||
import routes.middleware
|
import routes.middleware
|
||||||
|
@ -31,8 +33,7 @@ import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.openstack.common.gettextutils import _
|
from ec2api.i18n import _
|
||||||
from ec2api.openstack.common import log as logging
|
|
||||||
|
|
||||||
wsgi_opts = [
|
wsgi_opts = [
|
||||||
cfg.StrOpt('api_paste_config',
|
cfg.StrOpt('api_paste_config',
|
||||||
|
@ -101,7 +102,7 @@ class Server(object):
|
||||||
self._protocol = protocol
|
self._protocol = protocol
|
||||||
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
|
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
|
||||||
self._logger = logging.getLogger("ec2api.wsgi.server")
|
self._logger = logging.getLogger("ec2api.wsgi.server")
|
||||||
self._wsgi_logger = logging.WritableLogger(self._logger)
|
self._wsgi_logger = loggers.WritableLogger(self._logger)
|
||||||
self._use_ssl = use_ssl
|
self._use_ssl = use_ssl
|
||||||
self._max_url_len = max_url_len
|
self._max_url_len = max_url_len
|
||||||
|
|
||||||
|
|
|
@ -1,191 +1,5 @@
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Use durable queues in AMQP. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
|
||||||
#amqp_durable_queues=false
|
|
||||||
|
|
||||||
# Auto-delete queues in AMQP. (boolean value)
|
|
||||||
#amqp_auto_delete=false
|
|
||||||
|
|
||||||
# Size of RPC connection pool. (integer value)
|
|
||||||
#rpc_conn_pool_size=30
|
|
||||||
|
|
||||||
# Qpid broker hostname. (string value)
|
|
||||||
#qpid_hostname=nova
|
|
||||||
|
|
||||||
# Qpid broker port. (integer value)
|
|
||||||
#qpid_port=5672
|
|
||||||
|
|
||||||
# Qpid HA cluster host:port pairs. (list value)
|
|
||||||
#qpid_hosts=$qpid_hostname:$qpid_port
|
|
||||||
|
|
||||||
# Username for Qpid connection. (string value)
|
|
||||||
#qpid_username=
|
|
||||||
|
|
||||||
# Password for Qpid connection. (string value)
|
|
||||||
#qpid_password=
|
|
||||||
|
|
||||||
# Space separated list of SASL mechanisms to use for auth.
|
|
||||||
# (string value)
|
|
||||||
#qpid_sasl_mechanisms=
|
|
||||||
|
|
||||||
# Seconds between connection keepalive heartbeats. (integer
|
|
||||||
# value)
|
|
||||||
#qpid_heartbeat=60
|
|
||||||
|
|
||||||
# Transport to use, either 'tcp' or 'ssl'. (string value)
|
|
||||||
#qpid_protocol=tcp
|
|
||||||
|
|
||||||
# Whether to disable the Nagle algorithm. (boolean value)
|
|
||||||
#qpid_tcp_nodelay=true
|
|
||||||
|
|
||||||
# The number of prefetched messages held by receiver. (integer
|
|
||||||
# value)
|
|
||||||
#qpid_receiver_capacity=1
|
|
||||||
|
|
||||||
# The qpid topology version to use. Version 1 is what was
|
|
||||||
# originally used by impl_qpid. Version 2 includes some
|
|
||||||
# backwards-incompatible changes that allow broker federation
|
|
||||||
# to work. Users should update to version 2 when they are
|
|
||||||
# able to take everything down, as it requires a clean break.
|
|
||||||
# (integer value)
|
|
||||||
#qpid_topology_version=1
|
|
||||||
|
|
||||||
# SSL version to use (valid only if SSL enabled). valid values
|
|
||||||
# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on
|
|
||||||
# some distributions. (string value)
|
|
||||||
#kombu_ssl_version=
|
|
||||||
|
|
||||||
# SSL key file (valid only if SSL enabled). (string value)
|
|
||||||
#kombu_ssl_keyfile=
|
|
||||||
|
|
||||||
# SSL cert file (valid only if SSL enabled). (string value)
|
|
||||||
#kombu_ssl_certfile=
|
|
||||||
|
|
||||||
# SSL certification authority file (valid only if SSL
|
|
||||||
# enabled). (string value)
|
|
||||||
#kombu_ssl_ca_certs=
|
|
||||||
|
|
||||||
# How long to wait before reconnecting in response to an AMQP
|
|
||||||
# consumer cancel notification. (floating point value)
|
|
||||||
#kombu_reconnect_delay=1.0
|
|
||||||
|
|
||||||
# The RabbitMQ broker address where a single node is used.
|
|
||||||
# (string value)
|
|
||||||
#rabbit_host=nova
|
|
||||||
|
|
||||||
# The RabbitMQ broker port where a single node is used.
|
|
||||||
# (integer value)
|
|
||||||
#rabbit_port=5672
|
|
||||||
|
|
||||||
# RabbitMQ HA cluster host:port pairs. (list value)
|
|
||||||
#rabbit_hosts=$rabbit_host:$rabbit_port
|
|
||||||
|
|
||||||
# Connect over SSL for RabbitMQ. (boolean value)
|
|
||||||
#rabbit_use_ssl=false
|
|
||||||
|
|
||||||
# The RabbitMQ userid. (string value)
|
|
||||||
#rabbit_userid=guest
|
|
||||||
|
|
||||||
# The RabbitMQ password. (string value)
|
|
||||||
#rabbit_password=guest
|
|
||||||
|
|
||||||
# The RabbitMQ login method. (string value)
|
|
||||||
#rabbit_login_method=AMQPLAIN
|
|
||||||
|
|
||||||
# The RabbitMQ virtual host. (string value)
|
|
||||||
#rabbit_virtual_host=/
|
|
||||||
|
|
||||||
# How frequently to retry connecting with RabbitMQ. (integer
|
|
||||||
# value)
|
|
||||||
#rabbit_retry_interval=1
|
|
||||||
|
|
||||||
# How long to backoff for between retries when connecting to
|
|
||||||
# RabbitMQ. (integer value)
|
|
||||||
#rabbit_retry_backoff=2
|
|
||||||
|
|
||||||
# Maximum number of RabbitMQ connection retries. Default is 0
|
|
||||||
# (infinite retry count). (integer value)
|
|
||||||
#rabbit_max_retries=0
|
|
||||||
|
|
||||||
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
|
|
||||||
# this option, you must wipe the RabbitMQ database. (boolean
|
|
||||||
# value)
|
|
||||||
#rabbit_ha_queues=false
|
|
||||||
|
|
||||||
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
|
|
||||||
# (boolean value)
|
|
||||||
#fake_rabbit=false
|
|
||||||
|
|
||||||
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
|
||||||
# interface, or IP. The "host" option should point or resolve
|
|
||||||
# to this address. (string value)
|
|
||||||
#rpc_zmq_bind_address=*
|
|
||||||
|
|
||||||
# MatchMaker driver. (string value)
|
|
||||||
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
|
|
||||||
|
|
||||||
# ZeroMQ receiver listening port. (integer value)
|
|
||||||
#rpc_zmq_port=9501
|
|
||||||
|
|
||||||
# Number of ZeroMQ contexts, defaults to 1. (integer value)
|
|
||||||
#rpc_zmq_contexts=1
|
|
||||||
|
|
||||||
# Maximum number of ingress messages to locally buffer per
|
|
||||||
# topic. Default is unlimited. (integer value)
|
|
||||||
#rpc_zmq_topic_backlog=<None>
|
|
||||||
|
|
||||||
# Directory for holding IPC sockets. (string value)
|
|
||||||
#rpc_zmq_ipc_dir=/var/run/openstack
|
|
||||||
|
|
||||||
# Name of this node. Must be a valid hostname, FQDN, or IP
|
|
||||||
# address. Must match "host" option, if running Nova. (string
|
|
||||||
# value)
|
|
||||||
#rpc_zmq_host=nova
|
|
||||||
|
|
||||||
# Seconds to wait before a cast expires (TTL). Only supported
|
|
||||||
# by impl_zmq. (integer value)
|
|
||||||
#rpc_cast_timeout=30
|
|
||||||
|
|
||||||
# Heartbeat frequency. (integer value)
|
|
||||||
#matchmaker_heartbeat_freq=300
|
|
||||||
|
|
||||||
# Heartbeat time-to-live. (integer value)
|
|
||||||
#matchmaker_heartbeat_ttl=600
|
|
||||||
|
|
||||||
# Size of RPC greenthread pool. (integer value)
|
|
||||||
#rpc_thread_pool_size=64
|
|
||||||
|
|
||||||
# Driver or drivers to handle sending notifications. (multi
|
|
||||||
# valued)
|
|
||||||
#notification_driver=
|
|
||||||
|
|
||||||
# AMQP topic used for OpenStack notifications. (list value)
|
|
||||||
# Deprecated group/name - [rpc_notifier2]/topics
|
|
||||||
#notification_topics=notifications
|
|
||||||
|
|
||||||
# Seconds to wait for a response from a call. (integer value)
|
|
||||||
#rpc_response_timeout=60
|
|
||||||
|
|
||||||
# A URL representing the messaging driver to use and its full
|
|
||||||
# configuration. If not set, we fall back to the rpc_backend
|
|
||||||
# option and driver specific configuration. (string value)
|
|
||||||
#transport_url=<None>
|
|
||||||
|
|
||||||
# The messaging driver to use, defaults to rabbit. Other
|
|
||||||
# drivers include qpid and zmq. (string value)
|
|
||||||
#rpc_backend=rabbit
|
|
||||||
|
|
||||||
# The default exchange under which topics are scoped. May be
|
|
||||||
# overridden by an exchange name specified in the
|
|
||||||
# transport_url option. (string value)
|
|
||||||
#control_exchange=openstack
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in ec2api.exception
|
# Options defined in ec2api.exception
|
||||||
#
|
#
|
||||||
|
@ -430,100 +244,6 @@
|
||||||
#backdoor_port=<None>
|
#backdoor_port=<None>
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ec2api.openstack.common.log
|
|
||||||
#
|
|
||||||
|
|
||||||
# Print debugging output (set logging level to DEBUG instead
|
|
||||||
# of default WARNING level). (boolean value)
|
|
||||||
#debug=false
|
|
||||||
|
|
||||||
# Print more verbose output (set logging level to INFO instead
|
|
||||||
# of default WARNING level). (boolean value)
|
|
||||||
#verbose=false
|
|
||||||
|
|
||||||
# Log output to standard error. (boolean value)
|
|
||||||
#use_stderr=true
|
|
||||||
|
|
||||||
# Format string to use for log messages with context. (string
|
|
||||||
# value)
|
|
||||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
|
||||||
|
|
||||||
# Format string to use for log messages without context.
|
|
||||||
# (string value)
|
|
||||||
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
|
||||||
|
|
||||||
# Data to append to log format when level is DEBUG. (string
|
|
||||||
# value)
|
|
||||||
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
|
|
||||||
|
|
||||||
# Prefix each line of exception output with this format.
|
|
||||||
# (string value)
|
|
||||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
|
||||||
|
|
||||||
# List of logger=LEVEL pairs. (list value)
|
|
||||||
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN
|
|
||||||
|
|
||||||
# Enables or disables publication of error events. (boolean
|
|
||||||
# value)
|
|
||||||
#publish_errors=false
|
|
||||||
|
|
||||||
# Enables or disables fatal status of deprecations. (boolean
|
|
||||||
# value)
|
|
||||||
#fatal_deprecations=false
|
|
||||||
|
|
||||||
# The format for an instance that is passed with the log
|
|
||||||
# message. (string value)
|
|
||||||
#instance_format="[instance: %(uuid)s] "
|
|
||||||
|
|
||||||
# The format for an instance UUID that is passed with the log
|
|
||||||
# message. (string value)
|
|
||||||
#instance_uuid_format="[instance: %(uuid)s] "
|
|
||||||
|
|
||||||
# The name of a logging configuration file. This file is
|
|
||||||
# appended to any existing logging configuration files. For
|
|
||||||
# details about logging configuration files, see the Python
|
|
||||||
# logging module documentation. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/log_config
|
|
||||||
#log_config_append=<None>
|
|
||||||
|
|
||||||
# DEPRECATED. A logging.Formatter log message format string
|
|
||||||
# which may use any of the available logging.LogRecord
|
|
||||||
# attributes. This option is deprecated. Please use
|
|
||||||
# logging_context_format_string and
|
|
||||||
# logging_default_format_string instead. (string value)
|
|
||||||
#log_format=<None>
|
|
||||||
|
|
||||||
# Format string for %%(asctime)s in log records. Default:
|
|
||||||
# %(default)s . (string value)
|
|
||||||
#log_date_format=%Y-%m-%d %H:%M:%S
|
|
||||||
|
|
||||||
# (Optional) Name of log file to output to. If no default is
|
|
||||||
# set, logging will go to stdout. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/logfile
|
|
||||||
#log_file=<None>
|
|
||||||
|
|
||||||
# (Optional) The base directory used for relative --log-file
|
|
||||||
# paths. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/logdir
|
|
||||||
#log_dir=<None>
|
|
||||||
|
|
||||||
# Use syslog for logging. Existing syslog format is DEPRECATED
|
|
||||||
# during I, and will change in J to honor RFC5424. (boolean
|
|
||||||
# value)
|
|
||||||
#use_syslog=false
|
|
||||||
|
|
||||||
# (Optional) Enables or disables syslog rfc5424 format for
|
|
||||||
# logging. If enabled, prefixes the MSG part of the syslog
|
|
||||||
# message with APP-NAME (RFC5424). The format without the APP-
|
|
||||||
# NAME is deprecated in I, and will be removed in J. (boolean
|
|
||||||
# value)
|
|
||||||
#use_syslog_rfc_format=false
|
|
||||||
|
|
||||||
# Syslog facility to receive log lines. (string value)
|
|
||||||
#syslog_log_facility=LOG_USER
|
|
||||||
|
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -544,110 +264,6 @@
|
||||||
# database (string value)
|
# database (string value)
|
||||||
#connection_nova=<None>
|
#connection_nova=<None>
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
|
||||||
# slave database (string value)
|
|
||||||
#slave_connection=<None>
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ec2api.openstack.common.db.options
|
|
||||||
#
|
|
||||||
|
|
||||||
# The file name to use with SQLite (string value)
|
|
||||||
#sqlite_db=ec2api.sqlite
|
|
||||||
|
|
||||||
# If True, SQLite uses synchronous mode (boolean value)
|
|
||||||
#sqlite_synchronous=true
|
|
||||||
|
|
||||||
# The backend to use for db (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
|
||||||
#backend=sqlalchemy
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
|
||||||
# database (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_connection
|
|
||||||
# Deprecated group/name - [sql]/connection
|
|
||||||
#connection=<None>
|
|
||||||
|
|
||||||
# The SQL mode to be used for MySQL sessions. This option,
|
|
||||||
# including the default, overrides any server-set SQL mode. To
|
|
||||||
# use whatever SQL mode is set by the server configuration,
|
|
||||||
# set this to no value. Example: mysql_sql_mode= (string
|
|
||||||
# value)
|
|
||||||
#mysql_sql_mode=TRADITIONAL
|
|
||||||
|
|
||||||
# Timeout before idle sql connections are reaped (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
|
||||||
# Deprecated group/name - [sql]/idle_timeout
|
|
||||||
#idle_timeout=3600
|
|
||||||
|
|
||||||
# Minimum number of SQL connections to keep open in a pool
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
|
||||||
#min_pool_size=1
|
|
||||||
|
|
||||||
# Maximum number of SQL connections to keep open in a pool
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
|
||||||
#max_pool_size=<None>
|
|
||||||
|
|
||||||
# Maximum db connection retries during startup. (setting -1
|
|
||||||
# implies an infinite retry count) (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
|
||||||
#max_retries=10
|
|
||||||
|
|
||||||
# Interval between retries of opening a sql connection
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
|
||||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
|
||||||
#retry_interval=10
|
|
||||||
|
|
||||||
# If set, use this value for max_overflow with sqlalchemy
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
|
||||||
#max_overflow=<None>
|
|
||||||
|
|
||||||
# Verbosity of SQL debugging information. 0=None,
|
|
||||||
# 100=Everything (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
|
||||||
#connection_debug=0
|
|
||||||
|
|
||||||
# Add python stack traces to SQL as comment strings (boolean
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
|
||||||
#connection_trace=false
|
|
||||||
|
|
||||||
# If set, use this value for pool_timeout with sqlalchemy
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
|
||||||
#pool_timeout=<None>
|
|
||||||
|
|
||||||
# Enable the experimental use of database reconnect on
|
|
||||||
# connection lost (boolean value)
|
|
||||||
#use_db_reconnect=false
|
|
||||||
|
|
||||||
# seconds between db connection retries (integer value)
|
|
||||||
#db_retry_interval=1
|
|
||||||
|
|
||||||
# Whether to increase interval between db connection retries,
|
|
||||||
# up to db_max_retry_interval (boolean value)
|
|
||||||
#db_inc_retry_interval=true
|
|
||||||
|
|
||||||
# max seconds between db connection retries, if
|
|
||||||
# db_inc_retry_interval is enabled (integer value)
|
|
||||||
#db_max_retry_interval=10
|
|
||||||
|
|
||||||
# maximum db connection retries before error is raised.
|
|
||||||
# (setting -1 implies an infinite retry count) (integer value)
|
|
||||||
#db_max_retries=20
|
|
||||||
|
|
||||||
|
|
||||||
[keystone_authtoken]
|
[keystone_authtoken]
|
||||||
|
|
||||||
|
@ -804,33 +420,6 @@
|
||||||
#hash_algorithms=md5
|
#hash_algorithms=md5
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_redis]
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Host to locate redis. (string value)
|
|
||||||
#host=127.0.0.1
|
|
||||||
|
|
||||||
# Use this port to connect to redis host. (integer value)
|
|
||||||
#port=6379
|
|
||||||
|
|
||||||
# Password for Redis server (optional). (string value)
|
|
||||||
#password=<None>
|
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_ring]
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Matchmaker ring file (JSON). (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
|
|
||||||
#ringfile=/etc/oslo/matchmaker_ring.json
|
|
||||||
|
|
||||||
|
|
||||||
[metadata]
|
[metadata]
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -875,51 +464,3 @@
|
||||||
#metadata_proxy_shared_secret=
|
#metadata_proxy_shared_secret=
|
||||||
|
|
||||||
|
|
||||||
[oslo_messaging_amqp]
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# address prefix used when sending to a specific server
|
|
||||||
# (string value)
|
|
||||||
#server_request_prefix=exclusive
|
|
||||||
|
|
||||||
# address prefix used when broadcasting to all servers (string
|
|
||||||
# value)
|
|
||||||
#broadcast_prefix=broadcast
|
|
||||||
|
|
||||||
# address prefix when sending to any server in group (string
|
|
||||||
# value)
|
|
||||||
#group_request_prefix=unicast
|
|
||||||
|
|
||||||
# Name for the AMQP container (string value)
|
|
||||||
#container_name=<None>
|
|
||||||
|
|
||||||
# Timeout for inactive connections (in seconds) (integer
|
|
||||||
# value)
|
|
||||||
#idle_timeout=0
|
|
||||||
|
|
||||||
# Debug: dump AMQP frames to stdout (boolean value)
|
|
||||||
#trace=false
|
|
||||||
|
|
||||||
# CA certificate PEM file for verifing server certificate
|
|
||||||
# (string value)
|
|
||||||
#ssl_ca_file=
|
|
||||||
|
|
||||||
# Identifying certificate PEM file to present to clients
|
|
||||||
# (string value)
|
|
||||||
#ssl_cert_file=
|
|
||||||
|
|
||||||
# Private key PEM file used to sign cert_file certificate
|
|
||||||
# (string value)
|
|
||||||
#ssl_key_file=
|
|
||||||
|
|
||||||
# Password for decrypting ssl_key_file (if encrypted) (string
|
|
||||||
# value)
|
|
||||||
#ssl_key_password=<None>
|
|
||||||
|
|
||||||
# Accept clients using either SSL or plain TCP (boolean value)
|
|
||||||
#allow_insecure_clients=false
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
|
|
||||||
# The list of modules to copy from openstack-common
|
# The list of modules to copy from openstack-common
|
||||||
modules=db,db.sqlalchemy,eventlet_backdoor,gettextutils,jsonutils,local,timeutils,service
|
modules=eventlet_backdoor,local,service
|
||||||
|
|
||||||
# The base module to hold the copy of openstack.common
|
# The base module to hold the copy of openstack.common
|
||||||
base=ec2api
|
base=ec2api
|
||||||
|
|
|
@ -11,7 +11,10 @@ lxml>=2.3
|
||||||
oslo.concurrency>=1.4.1 # Apache-2.0
|
oslo.concurrency>=1.4.1 # Apache-2.0
|
||||||
oslo.config>=1.6.0 # Apache-2.0
|
oslo.config>=1.6.0 # Apache-2.0
|
||||||
oslo.messaging>=1.6.0 # Apache-2.0
|
oslo.messaging>=1.6.0 # Apache-2.0
|
||||||
|
oslo.log>=0.1.0 # Apache-2.0
|
||||||
oslo.utils>=1.2.0 # Apache-2.0
|
oslo.utils>=1.2.0 # Apache-2.0
|
||||||
|
oslo.serialization>=1.2.0 # Apache-2.0
|
||||||
|
oslo.db>=1.4.1 # Apache-2.0
|
||||||
Paste
|
Paste
|
||||||
PasteDeploy>=1.5.0
|
PasteDeploy>=1.5.0
|
||||||
pbr>=0.6,!=0.7,<1.0
|
pbr>=0.6,!=0.7,<1.0
|
||||||
|
|
|
@ -12,4 +12,3 @@ sphinx>=1.1.2,!=1.2.0,<1.3
|
||||||
tempest-lib>=0.2.0
|
tempest-lib>=0.2.0
|
||||||
testrepository>=0.0.18
|
testrepository>=0.0.18
|
||||||
testtools>=0.9.34
|
testtools>=0.9.34
|
||||||
oslo.log>=0.1.0 # Apache-2.0
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ def main(argv):
|
||||||
pip_requires = os.path.join(root, 'requirements.txt')
|
pip_requires = os.path.join(root, 'requirements.txt')
|
||||||
test_requires = os.path.join(root, 'test-requirements.txt')
|
test_requires = os.path.join(root, 'test-requirements.txt')
|
||||||
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||||
project = 'ec2-api'
|
project = 'ec2api'
|
||||||
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
|
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
|
||||||
py_version, project)
|
py_version, project)
|
||||||
options = install.parse_args(argv)
|
options = install.parse_args(argv)
|
||||||
|
|
|
@ -1,199 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""pylint error checking."""
|
|
||||||
|
|
||||||
import cStringIO as StringIO
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from pylint import lint
|
|
||||||
from pylint.reporters import text
|
|
||||||
|
|
||||||
# Note(maoy): E1103 is error code related to partial type inference
|
|
||||||
ignore_codes = ["E1103"]
|
|
||||||
# Note(maoy): the error message is the pattern of E0202. It should be ignored
|
|
||||||
# for ec2api.tests modules
|
|
||||||
ignore_messages = ["An attribute affected in ec2api.tests"]
|
|
||||||
# Note(maoy): we ignore all errors in openstack.common because it should be
|
|
||||||
# checked elsewhere. We also ignore ec2api.tests for now due to high false
|
|
||||||
# positive rate.
|
|
||||||
ignore_modules = ["ec2api/openstack/common/", "ec2api/tests/"]
|
|
||||||
|
|
||||||
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
|
|
||||||
|
|
||||||
|
|
||||||
class LintOutput(object):
|
|
||||||
|
|
||||||
_cached_filename = None
|
|
||||||
_cached_content = None
|
|
||||||
|
|
||||||
def __init__(self, filename, lineno, line_content, code, message,
|
|
||||||
lintoutput):
|
|
||||||
self.filename = filename
|
|
||||||
self.lineno = lineno
|
|
||||||
self.line_content = line_content
|
|
||||||
self.code = code
|
|
||||||
self.message = message
|
|
||||||
self.lintoutput = lintoutput
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_line(cls, line):
|
|
||||||
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
|
|
||||||
matched = m.groups()
|
|
||||||
filename, lineno, code, message = (matched[0], int(matched[1]),
|
|
||||||
matched[2], matched[-1])
|
|
||||||
if cls._cached_filename != filename:
|
|
||||||
with open(filename) as f:
|
|
||||||
cls._cached_content = list(f.readlines())
|
|
||||||
cls._cached_filename = filename
|
|
||||||
line_content = cls._cached_content[lineno - 1].rstrip()
|
|
||||||
return cls(filename, lineno, line_content, code, message,
|
|
||||||
line.rstrip())
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_msg_to_dict(cls, msg):
|
|
||||||
"""From the output of pylint msg, to a dict, where each key
|
|
||||||
is a unique error identifier, value is a list of LintOutput
|
|
||||||
"""
|
|
||||||
result = {}
|
|
||||||
for line in msg.splitlines():
|
|
||||||
obj = cls.from_line(line)
|
|
||||||
if obj.is_ignored():
|
|
||||||
continue
|
|
||||||
key = obj.key()
|
|
||||||
if key not in result:
|
|
||||||
result[key] = []
|
|
||||||
result[key].append(obj)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def is_ignored(self):
|
|
||||||
if self.code in ignore_codes:
|
|
||||||
return True
|
|
||||||
if any(self.filename.startswith(name) for name in ignore_modules):
|
|
||||||
return True
|
|
||||||
if any(msg in self.message for msg in ignore_messages):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def key(self):
|
|
||||||
if self.code in ["E1101", "E1103"]:
|
|
||||||
# These two types of errors are like Foo class has no member bar.
|
|
||||||
# We discard the source code so that the error will be ignored
|
|
||||||
# next time another Foo.bar is encountered.
|
|
||||||
return self.message, ""
|
|
||||||
return self.message, self.line_content.strip()
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return json.dumps(self.__dict__)
|
|
||||||
|
|
||||||
def review_str(self):
|
|
||||||
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
|
|
||||||
"%(code)s: %(message)s" % self.__dict__)
|
|
||||||
|
|
||||||
|
|
||||||
class ErrorKeys(object):
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def print_json(cls, errors, output=sys.stdout):
|
|
||||||
print >>output, "# automatically generated by tools/lintstack.py"
|
|
||||||
for i in sorted(errors.keys()):
|
|
||||||
print >>output, json.dumps(i)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_file(cls, filename):
|
|
||||||
keys = set()
|
|
||||||
for line in open(filename):
|
|
||||||
if line and line[0] != "#":
|
|
||||||
d = json.loads(line)
|
|
||||||
keys.add(tuple(d))
|
|
||||||
return keys
|
|
||||||
|
|
||||||
|
|
||||||
def run_pylint():
|
|
||||||
buff = StringIO.StringIO()
|
|
||||||
reporter = text.ParseableTextReporter(output=buff)
|
|
||||||
args = ["--include-ids=y", "-E", "ec2api"]
|
|
||||||
lint.Run(args, reporter=reporter, exit=False)
|
|
||||||
val = buff.getvalue()
|
|
||||||
buff.close()
|
|
||||||
return val
|
|
||||||
|
|
||||||
|
|
||||||
def generate_error_keys(msg=None):
|
|
||||||
print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE
|
|
||||||
if msg is None:
|
|
||||||
msg = run_pylint()
|
|
||||||
errors = LintOutput.from_msg_to_dict(msg)
|
|
||||||
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
|
|
||||||
ErrorKeys.print_json(errors, output=f)
|
|
||||||
|
|
||||||
|
|
||||||
def validate(newmsg=None):
|
|
||||||
print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE
|
|
||||||
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
|
|
||||||
if newmsg is None:
|
|
||||||
print "Running pylint. Be patient..."
|
|
||||||
newmsg = run_pylint()
|
|
||||||
errors = LintOutput.from_msg_to_dict(newmsg)
|
|
||||||
|
|
||||||
print "Unique errors reported by pylint: was %d, now %d." \
|
|
||||||
% (len(known), len(errors))
|
|
||||||
passed = True
|
|
||||||
for err_key, err_list in errors.items():
|
|
||||||
for err in err_list:
|
|
||||||
if err_key not in known:
|
|
||||||
print err.lintoutput
|
|
||||||
print
|
|
||||||
passed = False
|
|
||||||
if passed:
|
|
||||||
print "Congrats! pylint check passed."
|
|
||||||
redundant = known - set(errors.keys())
|
|
||||||
if redundant:
|
|
||||||
print "Extra credit: some known pylint exceptions disappeared."
|
|
||||||
for i in sorted(redundant):
|
|
||||||
print json.dumps(i)
|
|
||||||
print "Consider regenerating the exception file if you will."
|
|
||||||
else:
|
|
||||||
print ("Please fix the errors above. If you believe they are false"
|
|
||||||
" positives, run 'tools/lintstack.py generate' to overwrite.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
|
||||||
print """Usage: tools/lintstack.py [generate|validate]
|
|
||||||
To generate pylint_exceptions file: tools/lintstack.py generate
|
|
||||||
To validate the current commit: tools/lintstack.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
option = "validate"
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
option = sys.argv[1]
|
|
||||||
if option == "generate":
|
|
||||||
generate_error_keys()
|
|
||||||
elif option == "validate":
|
|
||||||
validate()
|
|
||||||
else:
|
|
||||||
usage()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,59 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Copyright (c) 2012-2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# Use lintstack.py to compare pylint errors.
|
|
||||||
# We run pylint twice, once on HEAD, once on the code before the latest
|
|
||||||
# commit for review.
|
|
||||||
set -e
|
|
||||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
|
||||||
# Get the current branch name.
|
|
||||||
GITHEAD=`git rev-parse --abbrev-ref HEAD`
|
|
||||||
if [[ "$GITHEAD" == "HEAD" ]]; then
|
|
||||||
# In detached head mode, get revision number instead
|
|
||||||
GITHEAD=`git rev-parse HEAD`
|
|
||||||
echo "Currently we are at commit $GITHEAD"
|
|
||||||
else
|
|
||||||
echo "Currently we are at branch $GITHEAD"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
|
|
||||||
|
|
||||||
if git rev-parse HEAD^2 2>/dev/null; then
|
|
||||||
# The HEAD is a Merge commit. Here, the patch to review is
|
|
||||||
# HEAD^2, the master branch is at HEAD^1, and the patch was
|
|
||||||
# written based on HEAD^2~1.
|
|
||||||
PREV_COMMIT=`git rev-parse HEAD^2~1`
|
|
||||||
git checkout HEAD~1
|
|
||||||
# The git merge is necessary for reviews with a series of patches.
|
|
||||||
# If not, this is a no-op so won't hurt either.
|
|
||||||
git merge $PREV_COMMIT
|
|
||||||
else
|
|
||||||
# The HEAD is not a merge commit. This won't happen on gerrit.
|
|
||||||
# Most likely you are running against your own patch locally.
|
|
||||||
# We assume the patch to examine is HEAD, and we compare it against
|
|
||||||
# HEAD~1
|
|
||||||
git checkout HEAD~1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# First generate tools/pylint_exceptions from HEAD~1
|
|
||||||
$TOOLS_DIR/lintstack.head.py generate
|
|
||||||
# Then use that as a reference to compare against HEAD
|
|
||||||
git checkout $GITHEAD
|
|
||||||
$TOOLS_DIR/lintstack.head.py
|
|
||||||
echo "Check passed. FYI: the pylint exceptions are:"
|
|
||||||
cat $TOOLS_DIR/pylint_exceptions
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ def main(argv):
|
||||||
os.path.join(root, 'tools', 'test-requires'),
|
os.path.join(root, 'tools', 'test-requires'),
|
||||||
])
|
])
|
||||||
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||||
project = 'ec2-api'
|
project = 'ec2api'
|
||||||
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
|
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
|
||||||
py_version, project)
|
py_version, project)
|
||||||
#NOTE(dprince): For Tox we only run post_process (which patches files, etc)
|
#NOTE(dprince): For Tox we only run post_process (which patches files, etc)
|
||||||
|
|
21
tox.ini
21
tox.ini
|
@ -4,9 +4,9 @@ envlist = py26,py27,py33,py34,pep8
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
sitepackages = True
|
sitepackages = False
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
install_command = pip install -U {opts} {packages}
|
install_command = pip install -U --force-reinstall {opts} {packages}
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
LANG=en_US.UTF-8
|
LANG=en_US.UTF-8
|
||||||
LANGUAGE=en_US:en
|
LANGUAGE=en_US:en
|
||||||
|
@ -18,7 +18,7 @@ commands =
|
||||||
python setup.py testr --slowest --testr-args='{posargs}'
|
python setup.py testr --slowest --testr-args='{posargs}'
|
||||||
|
|
||||||
[tox:jenkins]
|
[tox:jenkins]
|
||||||
sitepackages = True
|
sitepackages = False
|
||||||
downloadcache = ~/cache/pip
|
downloadcache = ~/cache/pip
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
|
@ -26,18 +26,14 @@ sitepackages = False
|
||||||
commands =
|
commands =
|
||||||
flake8 {posargs}
|
flake8 {posargs}
|
||||||
|
|
||||||
[testenv:pylint]
|
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
|
||||||
commands = bash tools/lintstack.sh
|
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
# Also do not run test_coverage_ext tests while gathering coverage as those
|
# Also do not run test_coverage_ext tests while gathering coverage as those
|
||||||
# tests conflict with coverage.
|
# tests conflict with coverage.
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
|
||||||
commands =
|
commands =
|
||||||
python tools/patch_tox_venv.py
|
coverage erase
|
||||||
python setup.py testr --coverage \
|
python setup.py test --coverage --testr-args='{posargs}'
|
||||||
--testr-args='^(?!.*test.*coverage).*$'
|
coverage combine
|
||||||
|
coverage html --include='ec2api/*' --omit='ec2api/openstack/common/*' --omit='ec2api/tests/*' -d covhtml -i
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
@ -51,6 +47,7 @@ commands = {posargs}
|
||||||
|
|
||||||
ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H102,H303,H404,F403,F811,F841,H803
|
ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H102,H303,H404,F403,F811,F841,H803
|
||||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
|
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
|
||||||
|
max-complexity=25
|
||||||
|
|
||||||
[hacking]
|
[hacking]
|
||||||
import_exceptions = ec2api.openstack.common.gettextutils._
|
import_exceptions = ec2api.i18n
|
||||||
|
|
Loading…
Reference in New Issue