Allow using different database backends
Abstract storage configuration section to load different database backend drivers. so we can keep supporting elasticsearch for a while after moving to oslo.db also this will help in adding API v2 as the elasticsearch driver will be changed. Each db driver will register it's configuration on the run time. Change-Id: Ie861e569f4add783c6f580cd0c08ed0fb3440151
This commit is contained in:
parent
7579e59f20
commit
528f3234e9
|
@ -115,10 +115,10 @@ function configure_freezer_api {
|
|||
sudo chown -R $USER $FREEZER_API_CONF_DIR
|
||||
|
||||
#set elasticsearch configuration
|
||||
iniset $FREEZER_API_CONF 'storage' db elasticsearch
|
||||
iniset $FREEZER_API_CONF 'storage' index freezer
|
||||
iniset $FREEZER_API_CONF 'storage' number_of_replicas 0
|
||||
iniset $FREEZER_API_CONF 'storage' hosts http://$SERVICE_HOST:9200
|
||||
iniset $FREEZER_API_CONF 'storage' backend elasticsearch
|
||||
iniset $FREEZER_API_CONF 'elasticsearch' index freezer
|
||||
iniset $FREEZER_API_CONF 'elasticsearch' number_of_replicas 0
|
||||
iniset $FREEZER_API_CONF 'elasticsearch' hosts http://$SERVICE_HOST:9200
|
||||
|
||||
# set keystone configuration
|
||||
iniset $FREEZER_API_CONF 'keystone_authtoken' auth_protocol $KEYSTONE_AUTH_PROTOCOL
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
|
||||
# Log output to standard error. This option is ignored if log_config_append is
|
||||
# set. (boolean value)
|
||||
#use_stderr = true
|
||||
#use_stderr = false
|
||||
|
||||
# Format string to use for log messages with context. (string value)
|
||||
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
||||
|
@ -106,6 +106,18 @@
|
|||
# value)
|
||||
#instance_uuid_format = "[instance: %(uuid)s] "
|
||||
|
||||
# Interval, number of seconds, of log rate limiting. (integer value)
|
||||
#rate_limit_interval = 0
|
||||
|
||||
# Maximum number of logged messages per rate_limit_interval. (integer value)
|
||||
#rate_limit_burst = 0
|
||||
|
||||
# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
|
||||
# or empty string. Logs with level greater or equal to rate_limit_except_level
|
||||
# are not filtered. An empty string means that all levels are filtered. (string
|
||||
# value)
|
||||
#rate_limit_except_level = CRITICAL
|
||||
|
||||
# Enables or disables fatal status of deprecations. (boolean value)
|
||||
#fatal_deprecations = false
|
||||
|
||||
|
@ -168,13 +180,81 @@
|
|||
#allow_headers =
|
||||
|
||||
|
||||
[elasticsearch]
|
||||
|
||||
#
|
||||
# From freezer-api
|
||||
#
|
||||
|
||||
# specify the storage hosts (string value)
|
||||
# Deprecated group/name - [elasticsearch]/endpoint
|
||||
#hosts = http://localhost:9200
|
||||
|
||||
# specify the name of the elasticsearch index (string value)
|
||||
#index = freezer
|
||||
|
||||
# specify the connection timeout (integer value)
|
||||
#timeout = 60
|
||||
|
||||
# number of retries to allow before raising and error (integer value)
|
||||
#retries = 20
|
||||
|
||||
# explicitly turn on SSL (boolean value)
|
||||
#use_ssl = false
|
||||
|
||||
# turn on SSL certs verification (boolean value)
|
||||
#verify_certs = false
|
||||
|
||||
# path to CA certs on disk (string value)
|
||||
#ca_certs = <None>
|
||||
|
||||
# Number of replicas for elk cluster. Default is 0. Use 0 for no replicas. This
|
||||
# should be set to (number of node in the ES cluter -1). (integer value)
|
||||
#number_of_replicas = 0
|
||||
|
||||
|
||||
[healthcheck]
|
||||
|
||||
#
|
||||
# From oslo.middleware
|
||||
#
|
||||
|
||||
# DEPRECATED: The path to respond to healtcheck requests on. (string value)
|
||||
# This option is deprecated for removal.
|
||||
# Its value may be silently ignored in the future.
|
||||
#path = /healthcheck
|
||||
|
||||
# Show more detailed information as part of the response (boolean value)
|
||||
#detailed = false
|
||||
|
||||
# Additional backends that can perform health checks and report that
|
||||
# information back as part of a request. (list value)
|
||||
#backends =
|
||||
|
||||
# Check the presence of a file to determine if an application is running on a
|
||||
# port. Used by DisableByFileHealthcheck plugin. (string value)
|
||||
#disable_by_file_path = <None>
|
||||
|
||||
# Check the presence of a file based on a port to determine if an application
|
||||
# is running on a port. Expects a "port:path" list of strings. Used by
|
||||
# DisableByFilesPortsHealthcheck plugin. (list value)
|
||||
#disable_by_file_paths =
|
||||
|
||||
|
||||
[keystone_authtoken]
|
||||
|
||||
#
|
||||
# From freezer-api
|
||||
#
|
||||
|
||||
# Complete public Identity API endpoint. (string value)
|
||||
# Complete "public" Identity API endpoint. This endpoint should not be an
|
||||
# "admin" endpoint, as it should be accessible by all end users.
|
||||
# Unauthenticated clients are redirected to this endpoint to authenticate.
|
||||
# Although this endpoint should ideally be unversioned, client support in the
|
||||
# wild varies. If you're using a versioned v2 endpoint here, then this should
|
||||
# *not* be the same endpoint the service user utilizes for validating tokens,
|
||||
# because normal end users may not be able to reach that endpoint. (string
|
||||
# value)
|
||||
#auth_uri = <None>
|
||||
|
||||
# API version of the admin Identity API endpoint. (string value)
|
||||
|
@ -192,7 +272,10 @@
|
|||
# API Server. (integer value)
|
||||
#http_request_max_retries = 3
|
||||
|
||||
# Env key for the swift cache. (string value)
|
||||
# Request environment key where the Swift cache object is stored. When
|
||||
# auth_token middleware is deployed with a Swift cache, use this option to have
|
||||
# the middleware share a caching backend with swift. Otherwise, use the
|
||||
# ``memcached_servers`` option instead. (string value)
|
||||
#cache = <None>
|
||||
|
||||
# Required if identity server requires client certificate (string value)
|
||||
|
@ -211,7 +294,12 @@
|
|||
# The region in which the identity server can be found. (string value)
|
||||
#region_name = <None>
|
||||
|
||||
# Directory used to cache files related to PKI tokens. (string value)
|
||||
# DEPRECATED: Directory used to cache files related to PKI tokens. This option
|
||||
# has been deprecated in the Ocata release and will be removed in the P
|
||||
# release. (string value)
|
||||
# This option is deprecated for removal since Ocata.
|
||||
# Its value may be silently ignored in the future.
|
||||
# Reason: PKI token format is no longer supported.
|
||||
#signing_dir = <None>
|
||||
|
||||
# Optionally specify a list of memcached server(s) to use for caching. If left
|
||||
|
@ -224,10 +312,14 @@
|
|||
# to -1 to disable caching completely. (integer value)
|
||||
#token_cache_time = 300
|
||||
|
||||
# Determines the frequency at which the list of revoked tokens is retrieved
|
||||
# from the Identity service (in seconds). A high number of revocation events
|
||||
# combined with a low cache duration may significantly reduce performance.
|
||||
# (integer value)
|
||||
# DEPRECATED: Determines the frequency at which the list of revoked tokens is
|
||||
# retrieved from the Identity service (in seconds). A high number of revocation
|
||||
# events combined with a low cache duration may significantly reduce
|
||||
# performance. Only valid for PKI tokens. This option has been deprecated in
|
||||
# the Ocata release and will be removed in the P release. (integer value)
|
||||
# This option is deprecated for removal since Ocata.
|
||||
# Its value may be silently ignored in the future.
|
||||
# Reason: PKI token format is no longer supported.
|
||||
#revocation_cache_time = 10
|
||||
|
||||
# (Optional) If defined, indicate whether token data should be authenticated or
|
||||
|
@ -280,20 +372,78 @@
|
|||
# (string value)
|
||||
#enforce_token_bind = permissive
|
||||
|
||||
# If true, the revocation list will be checked for cached tokens. This requires
|
||||
# that PKI tokens are configured on the identity server. (boolean value)
|
||||
# DEPRECATED: If true, the revocation list will be checked for cached tokens.
|
||||
# This requires that PKI tokens are configured on the identity server. (boolean
|
||||
# value)
|
||||
# This option is deprecated for removal since Ocata.
|
||||
# Its value may be silently ignored in the future.
|
||||
# Reason: PKI token format is no longer supported.
|
||||
#check_revocations_for_cached = false
|
||||
|
||||
# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
|
||||
# or multiple. The algorithms are those supported by Python standard
|
||||
# hashlib.new(). The hashes will be tried in the order given, so put the
|
||||
# preferred one first for performance. The result of the first hash will be
|
||||
# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a
|
||||
# single algorithm or multiple. The algorithms are those supported by Python
|
||||
# standard hashlib.new(). The hashes will be tried in the order given, so put
|
||||
# the preferred one first for performance. The result of the first hash will be
|
||||
# stored in the cache. This will typically be set to multiple values only while
|
||||
# migrating from a less secure algorithm to a more secure one. Once all the old
|
||||
# tokens are expired this option should be set to a single value for better
|
||||
# performance. (list value)
|
||||
# This option is deprecated for removal since Ocata.
|
||||
# Its value may be silently ignored in the future.
|
||||
# Reason: PKI token format is no longer supported.
|
||||
#hash_algorithms = md5
|
||||
|
||||
# A choice of roles that must be present in a service token. Service tokens are
|
||||
# allowed to request that an expired token can be used and so this check should
|
||||
# tightly control that only actual services should be sending this token. Roles
|
||||
# here are applied as an ANY check so any role in this list must be present.
|
||||
# For backwards compatibility reasons this currently only affects the
|
||||
# allow_expired check. (list value)
|
||||
#service_token_roles = service
|
||||
|
||||
# For backwards compatibility reasons we must let valid service tokens pass
|
||||
# that don't pass the service_token_roles check as valid. Setting this true
|
||||
# will become the default in a future release and should be enabled if
|
||||
# possible. (boolean value)
|
||||
#service_token_roles_required = false
|
||||
|
||||
# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
|
||||
# (string value)
|
||||
#auth_admin_prefix =
|
||||
|
||||
# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
|
||||
# (string value)
|
||||
#auth_host = 127.0.0.1
|
||||
|
||||
# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
|
||||
# (integer value)
|
||||
#auth_port = 35357
|
||||
|
||||
# Protocol of the admin Identity API endpoint. Deprecated, use identity_uri.
|
||||
# (string value)
|
||||
# Allowed values: http, https
|
||||
#auth_protocol = https
|
||||
|
||||
# Complete admin Identity API endpoint. This should specify the unversioned
|
||||
# root endpoint e.g. https://localhost:35357/ (string value)
|
||||
#identity_uri = <None>
|
||||
|
||||
# This option is deprecated and may be removed in a future release. Single
|
||||
# shared secret with the Keystone configuration used for bootstrapping a
|
||||
# Keystone installation, or otherwise bypassing the normal authentication
|
||||
# process. This option should not be used, use `admin_user` and
|
||||
# `admin_password` instead. (string value)
|
||||
#admin_token = <None>
|
||||
|
||||
# Service username. (string value)
|
||||
#admin_user = <None>
|
||||
|
||||
# Service user password. (string value)
|
||||
#admin_password = <None>
|
||||
|
||||
# Service tenant name. (string value)
|
||||
#admin_tenant_name = admin
|
||||
|
||||
# Authentication type to load (string value)
|
||||
# Deprecated group/name - [keystone_authtoken]/auth_plugin
|
||||
#auth_type = <None>
|
||||
|
@ -365,33 +515,9 @@
|
|||
# From freezer-api
|
||||
#
|
||||
|
||||
# specify the storage db to use (default: elasticsearch (string value)
|
||||
#db = elasticsearch
|
||||
# Databse backend section name. This section will be loaded by the proper
|
||||
# driver to connect to the database. (string value)
|
||||
#backend = <None>
|
||||
|
||||
# specify the storage hosts (deprecated, use "hosts" (string value)
|
||||
#endpoint =
|
||||
|
||||
# specify the storage hosts (string value)
|
||||
#hosts = http://localhost:9200
|
||||
|
||||
# specify the name of the elasticsearch index (string value)
|
||||
#index = freezer
|
||||
|
||||
# specify the connection timeout (integer value)
|
||||
#timeout = 60
|
||||
|
||||
# number of retries to allow before raising and error (integer value)
|
||||
#retries = 20
|
||||
|
||||
# explicitly turn on SSL (boolean value)
|
||||
#use_ssl = false
|
||||
|
||||
# turn on SSL certs verification (boolean value)
|
||||
#verify_certs = false
|
||||
|
||||
# path to CA certs on disk (string value)
|
||||
#ca_certs = <None>
|
||||
|
||||
# Number of replicas for elk cluster. Default is 0. Use 0 for no replicas. This
|
||||
# should be set to (number of node in the ES cluter -1). (integer value)
|
||||
#number_of_replicas = 0
|
||||
# Database driver to be used. (string value)
|
||||
#driver = freezer_api.storage.elastic.ElasticSearchEngine
|
||||
|
|
|
@ -35,5 +35,5 @@
|
|||
"clients:get_all": "",
|
||||
"clients:create": "",
|
||||
"clients:get": "",
|
||||
"clients:delete": "",
|
||||
"clients:delete": ""
|
||||
}
|
||||
|
|
|
@ -82,7 +82,6 @@ def build_app_v0():
|
|||
before_hooks = utils.before_hooks() + [
|
||||
middleware.RequireJSON().as_before_hook()]
|
||||
after_hooks = [middleware.JSONTranslator().as_after_hook()]
|
||||
|
||||
# The signature of falcon.API() differs between versions, suppress pylint:
|
||||
# pylint: disable=unexpected-keyword-arg
|
||||
app = falcon.API(before=before_hooks, after=after_hooks)
|
||||
|
|
|
@ -94,9 +94,7 @@ def parse_config(mapping_choices):
|
|||
)
|
||||
|
||||
]
|
||||
opt_group = cfg.OptGroup(name='storage', title='Freezer Storage Engine')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(driver.get_elk_opts(), group=opt_group)
|
||||
driver.register_storage_opts()
|
||||
CONF.register_cli_opts(DB_INIT)
|
||||
log.register_options(CONF)
|
||||
default_config_files = cfg.find_config_files('freezer', 'freezer-api')
|
||||
|
@ -118,9 +116,18 @@ class ElasticSearchManager(object):
|
|||
|
||||
def __init__(self, mappings):
|
||||
self.mappings = mappings.copy()
|
||||
self.index = CONF.storage.index or DEFAULT_INDEX
|
||||
|
||||
grp = cfg.OptGroup(CONF.storage.backend)
|
||||
CONF.register_group(grp)
|
||||
backend_opts = driver._get_elastic_opts(backend=CONF.storage.backend)
|
||||
|
||||
CONF.register_opts(backend_opts[CONF.storage.backend],
|
||||
group=CONF.storage.backend)
|
||||
|
||||
self.conf = CONF.get(CONF.storage.backend)
|
||||
self.index = self.conf.index or DEFAULT_INDEX
|
||||
# initialize elk
|
||||
opts = dict(CONF.storage.items())
|
||||
opts = dict(self.conf.items())
|
||||
self.elk = elasticsearch.Elasticsearch(**opts)
|
||||
# check if the cluster is up or not !
|
||||
if not self.elk.ping():
|
||||
|
@ -206,7 +213,7 @@ class ElasticSearchManager(object):
|
|||
if not self._check_index_exists(index=self.index):
|
||||
body = {
|
||||
'number_of_replicas':
|
||||
CONF.storage.number_of_replicas or DEFAULT_REPLICAS
|
||||
self.conf.number_of_replicas or DEFAULT_REPLICAS
|
||||
}
|
||||
return self.elk.indices.create(index=self.index, body=body)
|
||||
|
||||
|
@ -303,7 +310,7 @@ class ElasticSearchManager(object):
|
|||
"""
|
||||
body = {
|
||||
'number_of_replicas':
|
||||
CONF.storage.number_of_replicas or DEFAULT_REPLICAS
|
||||
self.conf.number_of_replicas or DEFAULT_REPLICAS
|
||||
}
|
||||
return self.elk.indices.put_settings(body=body, index=self.index)
|
||||
|
||||
|
@ -327,6 +334,7 @@ def main():
|
|||
mappings = db_mappings.get_mappings()
|
||||
parse_config(mapping_choices=mappings.keys())
|
||||
config.setup_logging()
|
||||
|
||||
if not CONF.db:
|
||||
CONF.print_help()
|
||||
sys.exit(0)
|
||||
|
|
|
@ -54,7 +54,7 @@ def api_common_opts():
|
|||
|
||||
def parse_args(args=[]):
|
||||
CONF.register_cli_opts(api_common_opts())
|
||||
driver.register_elk_opts()
|
||||
driver.register_storage_opts()
|
||||
# register paste configuration
|
||||
paste_grp = cfg.OptGroup('paste_deploy',
|
||||
'Paste Configuration')
|
||||
|
@ -116,8 +116,9 @@ def find_paste_config():
|
|||
def list_opts():
|
||||
_OPTS = {
|
||||
None: api_common_opts(),
|
||||
'storage': driver.get_elk_opts(),
|
||||
'paste_deploy': paste_deploy,
|
||||
AUTH_GROUP: AUTH_OPTS
|
||||
}
|
||||
# update the current list of opts with db backend drivers opts
|
||||
_OPTS.update(driver.get_storage_opts())
|
||||
return _OPTS.items()
|
||||
|
|
|
@ -15,88 +15,59 @@ limitations under the License.
|
|||
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from freezer_api.common import _i18n
|
||||
from freezer_api.storage import elastic
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def get_elk_opts():
|
||||
storage_opts = [
|
||||
cfg.StrOpt('db',
|
||||
default='elasticsearch',
|
||||
help='specify the storage db to use '
|
||||
'(default: elasticsearch)'),
|
||||
# use of 'endpoint' parameter name is deprecated, please use 'hosts'
|
||||
cfg.StrOpt('endpoint',
|
||||
default='',
|
||||
help='specify the storage hosts (deprecated, use "hosts"'),
|
||||
cfg.StrOpt('hosts',
|
||||
default='http://localhost:9200',
|
||||
help='specify the storage hosts'),
|
||||
cfg.StrOpt('index',
|
||||
default='freezer',
|
||||
help='specify the name of the elasticsearch index'),
|
||||
cfg.IntOpt('timeout',
|
||||
default=60,
|
||||
help='specify the connection timeout'),
|
||||
cfg.IntOpt('retries',
|
||||
default=20,
|
||||
help='number of retries to allow before raising and error'),
|
||||
cfg.BoolOpt('use_ssl',
|
||||
default=False,
|
||||
help='explicitly turn on SSL'),
|
||||
cfg.BoolOpt('verify_certs',
|
||||
default=False,
|
||||
help='turn on SSL certs verification'),
|
||||
cfg.StrOpt('ca_certs',
|
||||
help='path to CA certs on disk'),
|
||||
cfg.IntOpt('number_of_replicas',
|
||||
default=0,
|
||||
help='Number of replicas for elk cluster. Default is 0. '
|
||||
'Use 0 for no replicas. This should be set to (number '
|
||||
'of node in the ES cluter -1).')
|
||||
]
|
||||
return storage_opts
|
||||
# storage backend options to be registered
|
||||
_OPTS = [
|
||||
cfg.StrOpt("backend",
|
||||
help="Databse backend section name. This section "
|
||||
"will be loaded by the proper driver to connect to "
|
||||
"the database."
|
||||
),
|
||||
cfg.StrOpt('driver',
|
||||
default='freezer_api.storage.elastic.ElasticSearchEngine',
|
||||
help="Database driver to be used."
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def register_elk_opts():
|
||||
def register_storage_opts():
|
||||
"""Register storage configuration options"""
|
||||
opt_group = cfg.OptGroup(name='storage',
|
||||
title='Freezer Storage Engine')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(get_elk_opts(), opt_group)
|
||||
|
||||
|
||||
def get_options():
|
||||
if CONF.storage.ca_certs:
|
||||
if not os.path.isfile(CONF.storage.ca_certs):
|
||||
raise Exception('ElasticSearch configuration error: '
|
||||
'CA_certs file not found ({0})'
|
||||
.format(CONF.storage.ca_certs))
|
||||
|
||||
hosts = CONF.storage.endpoint or CONF.storage.hosts
|
||||
if not hosts:
|
||||
raise Exception('Elasticsearch configuration error: no host specified')
|
||||
|
||||
opts = dict(CONF.storage)
|
||||
opts.pop('endpoint')
|
||||
opts['hosts'] = hosts.split(',')
|
||||
return opts
|
||||
CONF.register_opts(_OPTS, group=opt_group)
|
||||
|
||||
|
||||
def get_db():
|
||||
opts = get_options()
|
||||
db_engine = opts.pop('db')
|
||||
if db_engine == 'elasticsearch':
|
||||
logging.debug(
|
||||
_i18n._LI('ElasticSearch config options: %s') % str(opts))
|
||||
db = elastic.ElasticSearchEngine(**opts)
|
||||
else:
|
||||
raise Exception(
|
||||
_i18n._('Database Engine %s not supported') % db_engine)
|
||||
return db
|
||||
"""Automatically loads the database driver to be used."""
|
||||
storage = CONF.get('storage')
|
||||
driver_instance = importutils.import_object(
|
||||
storage['driver'],
|
||||
backend=storage['backend']
|
||||
)
|
||||
return driver_instance
|
||||
|
||||
|
||||
def get_storage_opts():
|
||||
"""Returns a dict that contains list of options for db backend"""
|
||||
opts = {"storage": _OPTS}
|
||||
opts.update(_get_elastic_opts())
|
||||
return opts
|
||||
|
||||
|
||||
def _get_elastic_opts(backend=None):
|
||||
"""Return Opts for elasticsearch driver"""
|
||||
if not backend:
|
||||
backend = "elasticsearch"
|
||||
es = elastic.ElasticSearchEngine(backend=backend)
|
||||
return {backend: es.get_opts()}
|
||||
|
|
|
@ -17,11 +17,18 @@ limitations under the License.
|
|||
|
||||
import elasticsearch
|
||||
import logging
|
||||
import os
|
||||
|
||||
from freezer_api.common import _i18n
|
||||
from freezer_api.common import exceptions as freezer_api_exc
|
||||
from freezer_api.common import utils
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class TypeManager(object):
|
||||
def __init__(self, es, doc_type, index):
|
||||
|
@ -272,7 +279,69 @@ class SessionTypeManager(TypeManager):
|
|||
|
||||
|
||||
class ElasticSearchEngine(object):
|
||||
def __init__(self, index='freezer', **kwargs):
|
||||
|
||||
_OPTS = [
|
||||
cfg.StrOpt('hosts',
|
||||
default='http://localhost:9200',
|
||||
deprecated_name='endpoint',
|
||||
help='specify the storage hosts'),
|
||||
cfg.StrOpt('index',
|
||||
default='freezer',
|
||||
help='specify the name of the elasticsearch index'),
|
||||
cfg.IntOpt('timeout',
|
||||
default=60,
|
||||
help='specify the connection timeout'),
|
||||
cfg.IntOpt('retries',
|
||||
default=20,
|
||||
help='number of retries to allow before raising and error'),
|
||||
cfg.BoolOpt('use_ssl',
|
||||
default=False,
|
||||
help='explicitly turn on SSL'),
|
||||
cfg.BoolOpt('verify_certs',
|
||||
default=False,
|
||||
help='turn on SSL certs verification'),
|
||||
cfg.StrOpt('ca_certs',
|
||||
help='path to CA certs on disk'),
|
||||
cfg.IntOpt('number_of_replicas',
|
||||
default=0,
|
||||
help='Number of replicas for elk cluster. Default is 0. '
|
||||
'Use 0 for no replicas. This should be set to (number '
|
||||
'of node in the ES cluter -1).')
|
||||
]
|
||||
|
||||
def __init__(self, backend):
|
||||
"""backend: name of the section in the config file to load
|
||||
elasticsearch opts
|
||||
"""
|
||||
self.index = None
|
||||
self.es = None
|
||||
self.backup_manager = None
|
||||
self.client_manager = None
|
||||
self.job_manager = None
|
||||
self.action_manager = None
|
||||
self.session_manager = None
|
||||
# register elasticsearch opts
|
||||
CONF.register_opts(self._OPTS, group=backend)
|
||||
self.conf = dict(CONF.get(backend))
|
||||
self.conf['hosts'] = self.conf['hosts'].split(',')
|
||||
self.backend = backend
|
||||
self._validate_opts()
|
||||
self.init(**self.conf)
|
||||
|
||||
def _validate_opts(self):
|
||||
if not 'hosts' or 'endpoint' in self.conf.keys():
|
||||
raise ValueError("Couldn't find hosts in {0} section".format(
|
||||
self.backend)
|
||||
)
|
||||
if self.conf.get('ca_certs'):
|
||||
if not os.path.isfile(self.conf.get('ca_certs')):
|
||||
raise Exception("File not found: ca_certs file ({0}) not "
|
||||
"found".format(self.conf.get('ca_certs')))
|
||||
|
||||
def get_opts(self):
|
||||
return self._OPTS
|
||||
|
||||
def init(self, index='freezer', **kwargs):
|
||||
self.index = index
|
||||
self.es = elasticsearch.Elasticsearch(**kwargs)
|
||||
logging.info(_i18n._LI('Storage backend: Elasticsearch '
|
||||
|
|
|
@ -16,56 +16,59 @@ limitations under the License.
|
|||
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
from mock import patch
|
||||
|
||||
from freezer_api.storage import driver
|
||||
from freezer_api.tests.unit import common
|
||||
|
||||
|
||||
class TestStorageDriver(unittest.TestCase):
|
||||
@patch('freezer_api.storage.driver.logging')
|
||||
def test_get_db_raises_when_db_not_supported(self, mock_logging):
|
||||
class TestStorageDriver(common.FreezerBaseTestCase):
|
||||
@patch('freezer_api.storage.driver.LOG')
|
||||
def test_get_db_raises_when_db_not_supported(self, mock_LOG):
|
||||
mock_CONF = mock.Mock()
|
||||
mock_CONF.storage.db = 'nodb'
|
||||
driver.CONF = mock_CONF
|
||||
self.assertRaises(Exception, driver.get_db)
|
||||
|
||||
@patch('freezer_api.storage.driver.elastic')
|
||||
@patch('freezer_api.storage.driver.logging')
|
||||
def test_get_db_elastic(self, mock_logging, mock_elastic):
|
||||
driver.register_elk_opts()
|
||||
@patch('freezer_api.storage.driver.LOG')
|
||||
@patch('freezer_api.storage.driver.get_db')
|
||||
def test_get_db_elastic(self, mock_LOG, mock_elastic, mock_get_db):
|
||||
mock_get_db.return_value = object()
|
||||
driver.register_storage_opts()
|
||||
driver.get_db()
|
||||
self.assertTrue(mock_elastic.ElasticSearchEngine)
|
||||
|
||||
@patch('freezer_api.storage.driver.elastic')
|
||||
@patch('freezer_api.storage.driver.logging')
|
||||
@patch('freezer_api.storage.driver.LOG')
|
||||
def test_get_db_elastic_raises_Exception_when_cert_file_not_found(
|
||||
self, mock_logging, mock_elastic):
|
||||
self, mock_LOG, mock_elastic):
|
||||
mock_CONF = mock.Mock()
|
||||
mock_CONF.storage.db = 'elasticsearch'
|
||||
mock_CONF.storage.hosts = 'es_server'
|
||||
mock_CONF.storage.verify_certs = 'False'
|
||||
mock_CONF.storage.ca_certs = 'not_existant'
|
||||
mock_CONF.storage.use_ssl = False
|
||||
mock_CONF.storage.timeout = 43
|
||||
mock_CONF.storage.retries = 37
|
||||
mock_CONF.storage.backend = 'elasticsearch'
|
||||
mock_CONF.storage.driver = 'freezer_api.storage.elastic.' \
|
||||
'ElasticSearchEngine'
|
||||
mock_CONF.elasticsearch.hosts = 'es_server'
|
||||
mock_CONF.elasticsearch.verify_certs = 'False'
|
||||
mock_CONF.elasticsearch.ca_certs = 'not_existant'
|
||||
mock_CONF.elasticsearch.use_ssl = False
|
||||
mock_CONF.elasticsearch.timeout = 43
|
||||
mock_CONF.elasticsearch.retries = 37
|
||||
driver.CONF = mock_CONF
|
||||
self.assertRaises(Exception, driver.get_db)
|
||||
|
||||
@patch('freezer_api.storage.driver.elastic')
|
||||
@patch('freezer_api.storage.driver.logging')
|
||||
@patch('freezer_api.storage.driver.LOG')
|
||||
def test_get_db_elastic_raises_Exception_when_hosts_not_defined(
|
||||
self, mock_logging, mock_elastic):
|
||||
self, mock_LOG, mock_elastic):
|
||||
mock_CONF = mock.Mock()
|
||||
mock_CONF.storage.db = 'elasticsearch'
|
||||
mock_CONF.storage.hosts = ''
|
||||
mock_CONF.storage.endpoint = ''
|
||||
mock_CONF.storage.verify_certs = 'False'
|
||||
mock_CONF.storage.ca_certs = ''
|
||||
mock_CONF.storage.use_ssl = False
|
||||
mock_CONF.storage.timeout = 43
|
||||
mock_CONF.storage.retries = 37
|
||||
mock_CONF.storage.backend = 'elasticsearch'
|
||||
mock_CONF.elasticsearch.hosts = ''
|
||||
mock_CONF.elasticsearch.endpoint = ''
|
||||
mock_CONF.elasticsearch.verify_certs = 'False'
|
||||
mock_CONF.elasticsearch.ca_certs = ''
|
||||
mock_CONF.elasticsearch.use_ssl = False
|
||||
mock_CONF.elasticsearch.timeout = 43
|
||||
mock_CONF.elasticsearch.retries = 37
|
||||
driver.CONF = mock_CONF
|
||||
self.assertRaises(Exception, driver.get_db)
|
||||
|
|
|
@ -628,7 +628,8 @@ class TestElasticSearchEngine_backup(unittest.TestCase):
|
|||
def setUp(self, mock_logging, mock_elasticsearch):
|
||||
mock_elasticsearch.Elasticsearch.return_value = mock.Mock()
|
||||
kwargs = {'hosts': 'http://elasticservaddr:1997'}
|
||||
self.eng = elastic.ElasticSearchEngine(index='freezer', **kwargs)
|
||||
self.eng = elastic.ElasticSearchEngine(backend='elasticsearch')
|
||||
self.eng.init(index='freezer', **kwargs)
|
||||
self.eng.backup_manager = mock.Mock()
|
||||
|
||||
def test_get_backup_userid_and_backup_id_return_ok(self):
|
||||
|
@ -730,7 +731,8 @@ class TestElasticSearchEngine_client(unittest.TestCase):
|
|||
def setUp(self, mock_logging, mock_elasticsearch):
|
||||
mock_elasticsearch.Elasticsearch.return_value = mock.Mock()
|
||||
kwargs = {'hosts': 'http://elasticservaddr:1997'}
|
||||
self.eng = elastic.ElasticSearchEngine(index='freezer', **kwargs)
|
||||
self.eng = elastic.ElasticSearchEngine(backend="elasticsearch")
|
||||
self.eng.init(index='freezer', **kwargs)
|
||||
self.eng.client_manager = mock.Mock()
|
||||
|
||||
def test_get_client_userid_and_client_id_return_1elem_list_(self):
|
||||
|
@ -839,7 +841,8 @@ class TestElasticSearchEngine_job(unittest.TestCase):
|
|||
def setUp(self, mock_elasticsearch, mock_logging):
|
||||
mock_elasticsearch.Elasticsearch.return_value = mock.Mock()
|
||||
kwargs = {'hosts': 'http://elasticservaddr:1997'}
|
||||
self.eng = elastic.ElasticSearchEngine(index='freezer', **kwargs)
|
||||
self.eng = elastic.ElasticSearchEngine(backend="elasticsearch")
|
||||
self.eng.init(index='freezer', **kwargs)
|
||||
self.eng.job_manager = mock.Mock()
|
||||
|
||||
def test_get_job_userid_and_job_id_return_doc(self):
|
||||
|
@ -986,7 +989,8 @@ class TestElasticSearchEngine_action(unittest.TestCase):
|
|||
def setUp(self, mock_elasticsearch, mock_logging):
|
||||
mock_elasticsearch.Elasticsearch.return_value = mock.Mock()
|
||||
kwargs = {'hosts': 'http://elasticservaddr:1997'}
|
||||
self.eng = elastic.ElasticSearchEngine(index='freezer', **kwargs)
|
||||
self.eng = elastic.ElasticSearchEngine(backend="elasticsearch")
|
||||
self.eng.init(index='freezer', **kwargs)
|
||||
self.eng.action_manager = mock.Mock()
|
||||
|
||||
def test_get_action_userid_and_action_id_return_doc(self):
|
||||
|
@ -1142,7 +1146,8 @@ class TestElasticSearchEngine_session(unittest.TestCase):
|
|||
def setUp(self, mock_elasticsearch, mock_logging):
|
||||
mock_elasticsearch.Elasticsearch.return_value = mock.Mock()
|
||||
kwargs = {'hosts': 'http://elasticservaddr:1997'}
|
||||
self.eng = elastic.ElasticSearchEngine(index='freezer', **kwargs)
|
||||
self.eng = elastic.ElasticSearchEngine(backend="elasticsearch")
|
||||
self.eng.init(index='freezer', **kwargs)
|
||||
self.eng.session_manager = mock.Mock()
|
||||
|
||||
def test_get_session_userid_and_session_id_return_doc(self):
|
||||
|
|
Loading…
Reference in New Issue