PostgreSQL configuration groups

Implement configuration groups for PostgreSQL.

Notes:

- Improved the PropertiesCodec to handle (strip) in-line comments.
  Also fix the codec so that it preserves quotes around string values.
- Registered min/max functions with JINJA environment.
  Python min() and max() can be used in configuration templates.
- Fixed the file-existence check in operating_system.read_file()
  to also work with files that are not readable by the Trove user.
- Extended the operating_system.list_files_in_directory() to handle
  paths not readable by the Trove user (e.i. add 'as root' flag).
- Pass 'requires_root' flag on the read_file() in the config manager.
- Improved the PropertiesCodec to remove white-spaces around the
  property name (first item).
  Also add a missing string conversion when properties with
  just a single item did not get serialized to the proper
  string value.

Implements: blueprint postgres-configuration-groups
Change-Id: Ieff1669b0ae5542b72cd7dce8921ee0c01e0cd58
This commit is contained in:
Petr Malik 2015-12-04 22:35:24 -05:00
parent 5cb2a740cf
commit be6939fc30
24 changed files with 1960 additions and 198 deletions

View File

@ -955,6 +955,8 @@ postgresql_opts = [
help='List of UDP ports and/or port ranges to open '
'in the security group (only applicable '
'if trove_security_groups_support is True).'),
cfg.PortOpt('postgresql_port', default=5432,
help='The TCP port the server listens on.'),
cfg.StrOpt('backup_strategy', default='PgDump',
help='Default strategy to perform backups.'),
cfg.DictOpt('backup_incremental_strategy', default={},

View File

@ -51,3 +51,14 @@ class MongoDBConfParser(object):
def parse(self):
return self.CODEC.deserialize(self.config).items()
class PostgresqlConfParser(object):
CODEC = stream_codecs.PropertiesCodec(delimiter='=')
def __init__(self, config):
self.config = config
def parse(self):
return self.CODEC.deserialize(self.config).items()

View File

@ -17,6 +17,7 @@ import abc
import ast
import csv
import json
import re
import six
import StringIO
import yaml
@ -65,8 +66,12 @@ class StringConverter(object):
return str(value)
def _to_object(self, value):
# Return known mappings and quoted strings right away.
if value in self._object_mappings:
return self._object_mappings[value]
elif (isinstance(value, basestring) and
re.match("^'(.*)'|\"(.*)\"$", value)):
return value
try:
return ast.literal_eval(value)
@ -252,6 +257,7 @@ class PropertiesCodec(StreamCodec):
QUOTING_MODE = csv.QUOTE_MINIMAL
STRICT_MODE = False
SKIP_INIT_SPACE = True
def __init__(self, delimiter=' ', comment_markers=('#'),
unpack_singletons=True, string_mappings={}):
@ -282,9 +288,10 @@ class PropertiesCodec(StreamCodec):
output = StringIO.StringIO()
writer = csv.writer(output, delimiter=self._delimiter,
quoting=self.QUOTING_MODE,
strict=self.STRICT_MODE)
strict=self.STRICT_MODE,
skipinitialspace=self.SKIP_INIT_SPACE)
for key, value in sorted(dict_data.items()):
for key, value in dict_data.items():
writer.writerows(self._to_rows(key, value))
return output.getvalue()
@ -293,23 +300,27 @@ class PropertiesCodec(StreamCodec):
reader = csv.reader(StringIO.StringIO(stream),
delimiter=self._delimiter,
quoting=self.QUOTING_MODE,
strict=self.STRICT_MODE)
strict=self.STRICT_MODE,
skipinitialspace=self.SKIP_INIT_SPACE)
return self._to_dict(reader)
def _to_dict(self, reader):
data_dict = {}
for row in reader:
# Ignore comment lines.
if row and not row[0].startswith(self._comment_markers):
items = self._string_converter.to_objects(
[v if v else None for v in row[1:]])
current = data_dict.get(row[0])
if current is not None:
current.append(trove_utils.unpack_singleton(items)
if self._unpack_singletons else items)
else:
data_dict.update({row[0]: [items]})
if row:
key = row[0].strip()
# Ignore comment lines.
if not key.strip().startswith(self._comment_markers):
items = self._string_converter.to_objects(
[v if v else None for v in
map(self._strip_comments, row[1:])])
current = data_dict.get(key)
if current is not None:
current.append(trove_utils.unpack_singleton(items)
if self._unpack_singletons else items)
else:
data_dict.update({key: [items]})
if self._unpack_singletons:
# Unpack singleton values.
@ -318,6 +329,12 @@ class PropertiesCodec(StreamCodec):
return data_dict
def _strip_comments(self, value):
# Strip in-line comments.
for marker in self._comment_markers:
value = value.split(marker)[0]
return value.strip()
def _to_rows(self, header, items):
rows = []
if trove_utils.is_collection(items):
@ -331,7 +348,9 @@ class PropertiesCodec(StreamCodec):
header, self._string_converter.to_strings(items)))
else:
# This is a single-row property with only one argument.
rows.append(self._to_list(header, items))
rows.append(
self._string_converter.to_strings(
self._to_list(header, items)))
return rows

View File

@ -34,6 +34,7 @@ SERVICE_PARSERS = {
'mongodb': configurations.MongoDBConfParser,
'mysql': configurations.MySQLConfParser,
'percona': configurations.MySQLConfParser,
'postgresql': configurations.PostgresqlConfParser,
'redis': configurations.RedisConfParser,
}

View File

@ -48,10 +48,18 @@ bool_from_string = strutils.bool_from_string
execute = processutils.execute
isotime = timeutils.isotime
ENV = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(CONF.template_path),
jinja2.PackageLoader("trove", "templates")
]))
def build_jinja_environment():
env = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(CONF.template_path),
jinja2.PackageLoader("trove", "templates")
]))
# Add some basic operation not built-in.
env.globals['max'] = max
env.globals['min'] = min
return env
ENV = build_jinja_environment()
def pagination_limit(limit, default_limit):

View File

@ -108,7 +108,8 @@ class ConfigurationManager(object):
"""
base_options = operating_system.read_file(
self._base_config_path, codec=self._codec)
self._base_config_path, codec=self._codec,
as_root=self._requires_root)
updates = self._override_strategy.parse_updates()
guestagent_utils.update_dict(updates, base_options)
@ -266,7 +267,7 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
within their set got applied.
"""
FILE_NAME_PATTERN = '^%s-([0-9]+)-%s\.%s$'
FILE_NAME_PATTERN = '%s-([0-9]+)-%s\.%s$'
def __init__(self, revision_dir, revision_ext):
"""
@ -323,7 +324,7 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
else:
# Update the existing file.
current = operating_system.read_file(
revision_file, codec=self._codec)
revision_file, codec=self._codec, as_root=self._requires_root)
options = guestagent_utils.update_dict(options, current)
operating_system.write_file(
@ -361,7 +362,8 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
def parse_updates(self):
parsed_options = {}
for path in self._collect_revision_files():
options = operating_system.read_file(path, codec=self._codec)
options = operating_system.read_file(path, codec=self._codec,
as_root=self._requires_root)
guestagent_utils.update_dict(options, parsed_options)
return parsed_options
@ -370,7 +372,10 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
def has_revisions(self):
"""Return True if there currently are any revision files.
"""
return len(self._collect_revision_files()) > 0
return (operating_system.exists(
self._revision_dir, is_directory=True,
as_root=self._requires_root) and
(len(self._collect_revision_files()) > 0))
def _get_last_file_index(self, group_name):
"""Get the index of the most current file in a given group.
@ -392,12 +397,14 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
"""
name_pattern = self._build_rev_name_pattern(group_name=group_name)
return sorted(operating_system.list_files_in_directory(
self._revision_dir, recursive=False, pattern=name_pattern))
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root))
def _find_revision_file(self, group_name, change_id):
name_pattern = self._build_rev_name_pattern(group_name, change_id)
found = operating_system.list_files_in_directory(
self._revision_dir, recursive=False, pattern=name_pattern)
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root)
return next(iter(found), None)
def _build_rev_name_pattern(self, group_name='.+', change_id='.+'):
@ -488,7 +495,8 @@ class OneFileOverrideStrategy(ConfigurationOverrideStrategy):
force=True, preserve=True, as_root=self._requires_root)
base_revision = operating_system.read_file(
self._base_revision_file, codec=self._codec)
self._base_revision_file, codec=self._codec,
as_root=self._requires_root)
changes = self._import_strategy.parse_updates()
updated_revision = guestagent_utils.update_dict(changes, base_revision)
operating_system.write_file(

View File

@ -52,15 +52,41 @@ def read_file(path, codec=IdentityCodec(), as_root=False):
:raises: :class:`UnprocessableEntity` if file doesn't exist.
:raises: :class:`UnprocessableEntity` if codec not given.
"""
if path and os.path.exists(path):
if path and exists(path, is_directory=False, as_root=as_root):
if as_root:
return _read_file_as_root(path, codec)
with open(path, 'r') as fp:
return codec.deserialize(fp.read())
raise exception.UnprocessableEntity(_("File does not exist: %s") % path)
def exists(path, is_directory=False, as_root=False):
"""Check a given path exists.
:param path Path to be checked.
:type path string
:param is_directory: Check that the path exists and is a directory.
Check for a regular file otherwise.
:type is_directory: boolean
:param as_root: Execute as root.
:type as_root: boolean
"""
if as_root:
test_flag = '-d' if is_directory else '-f'
cmd = 'test %s %s && echo 1 || echo 0' % (test_flag, path)
stdout, _ = utils.execute_with_timeout(
cmd, shell=True, check_exit_code=False,
run_as_root=True, root_helper='sudo')
return bool(int(stdout))
return (not is_directory and os.path.isfile(path) or
(is_directory and os.path.isdir(path)))
def _read_file_as_root(path, codec):
"""Read a file as root.
@ -91,8 +117,8 @@ def write_file(path, data, codec=IdentityCodec(), as_root=False):
:param codec: A codec used to serialize the data.
:type codec: StreamCodec
:param codec: Execute as root.
:type codec: boolean
:param as_root: Execute as root.
:type as_root: boolean
:raises: :class:`UnprocessableEntity` if path not given.
"""
@ -599,6 +625,42 @@ def get_bytes_free_on_fs(path):
return v.f_bsize * v.f_bavail
def list_files_in_directory(root_dir, recursive=False, pattern=None,
include_dirs=False, as_root=False):
"""
Return absolute paths to all files in a given root directory.
:param root_dir Path to the root directory.
:type root_dir string
:param recursive Also descend into sub-directories if True.
:type recursive boolean
:param pattern Return only names matching the pattern.
:type pattern string
:param include_dirs Include paths to individual sub-directories.
:type include_dirs boolean
"""
if as_root:
cmd_args = [root_dir, '-noleaf']
if not recursive:
cmd_args.extend(['-maxdepth', '0'])
if not include_dirs:
cmd_args.extend(['-type', 'f'])
if pattern:
cmd_args.extend(['-regextype', 'posix-extended',
'-regex', os.path.join('.*', pattern) + '$'])
files = _execute_shell_cmd('find', [], *cmd_args, as_root=True)
return {fp for fp in files.splitlines()}
return {os.path.abspath(os.path.join(root, name))
for (root, dirs, files) in os.walk(root_dir, topdown=True)
if recursive or (root == root_dir)
for name in (files + (dirs if include_dirs else []))
if not pattern or re.match(pattern, name)}
def _execute_shell_cmd(cmd, options, *args, **kwargs):
"""Execute a given shell command passing it
given options (flags) and arguments.
@ -628,7 +690,8 @@ def _execute_shell_cmd(cmd, options, *args, **kwargs):
cmd_flags = _build_command_options(options)
cmd_args = cmd_flags + list(args)
utils.execute_with_timeout(cmd, *cmd_args, **exec_args)
stdout, stderr = utils.execute_with_timeout(cmd, *cmd_args, **exec_args)
return stdout
def _build_command_options(options):
@ -638,23 +701,3 @@ def _build_command_options(options):
"""
return ['-' + item[0] for item in options if item[1]]
def list_files_in_directory(root_dir, recursive=False, pattern=None):
"""
Return absolute paths to all files in a given root directory.
:param root_dir Path to the root directory.
:type root_dir string
:param recursive Also probe subdirectories if True.
:type recursive boolean
:param pattern Return only files matching the pattern.
:type pattern string
"""
return {os.path.abspath(os.path.join(root, name))
for (root, _, files) in os.walk(root_dir, topdown=True)
if recursive or (root == root_dir)
for name in files
if not pattern or re.match(pattern, name)}

View File

@ -34,11 +34,11 @@ LOG = logging.getLogger(__name__)
class Manager(
manager.Manager,
PgSqlDatabase,
PgSqlRoot,
PgSqlConfig,
PgSqlInstall,
manager.Manager
):
PG_BUILTIN_ADMIN = 'postgres'
@ -50,6 +50,10 @@ class Manager(
def status(self):
return PgSqlAppStatus.get()
@property
def configuration_manager(self):
return self._configuration_manager
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
@ -62,8 +66,8 @@ class Manager(
if os.path.exists(mount_point):
device.migrate_data(mount_point)
device.mount(mount_point)
self.reset_configuration(context, config_contents)
self.set_db_to_listen(context)
self.configuration_manager.save_configuration(config_contents)
self.apply_initial_guestagent_configuration()
self.start_db(context)
if backup_info:

View File

@ -13,25 +13,28 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
from collections import OrderedDict
import os
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common import utils
from trove.common.stream_codecs import PropertiesCodec
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent.datastore.experimental.postgresql.service.status import(
PgSqlAppStatus)
from trove.guestagent.datastore.experimental.postgresql import pgutil
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
PGSQL_CONFIG = "/etc/postgresql/{version}/main/postgresql.conf"
PGSQL_HBA_CONFIG = "/etc/postgresql/{version}/main/pg_hba.conf"
class PgSqlConfig(PgSqlProcess):
"""Mixin that implements the config API.
@ -39,98 +42,135 @@ class PgSqlConfig(PgSqlProcess):
This mixin has a dependency on the PgSqlProcess mixin.
"""
def _get_psql_version(self):
"""Poll PgSql for the version number.
OS = operating_system.get_os()
CONFIG_BASE = {
operating_system.DEBIAN: '/etc/postgresql/',
operating_system.REDHAT: '/var/lib/postgresql/',
operating_system.SUSE: '/var/lib/pgsql/'}[OS]
LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces.
Return value is a string representing the version number.
"""
LOG.debug(
"{guest_id}: Polling for postgresql version.".format(
guest_id=CONF.guest_id,
)
)
out, err = utils.execute('psql', '--version')
pattern = re.compile('\d\.\d')
return pattern.search(out).group(0)
def __init__(self, *args, **kwargs):
super(PgSqlConfig, self).__init__(*args, **kwargs)
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(self.pgsql_config),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self._configuration_manager = ConfigurationManager(
self.pgsql_config, self.PGSQL_OWNER, self.PGSQL_OWNER,
PropertiesCodec(
delimiter='=',
string_mappings={'on': True, 'off': False, "''": None}),
requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
@property
def pgsql_config(self):
return self._find_config_file('postgresql.conf')
@property
def pgsql_hba_config(self):
return self._find_config_file('pg_hba.conf')
@property
def pgsql_ident_config(self):
return self._find_config_file('pg_ident.conf')
def _find_config_file(self, name_pattern):
version_base = guestagent_utils.build_file_path(self.CONFIG_BASE,
self.pg_version[1])
return sorted(operating_system.list_files_in_directory(
version_base, recursive=True, pattern=name_pattern,
as_root=True), key=len)[0]
def update_overrides(self, context, overrides, remove=False):
if remove:
self.configuration_manager.remove_user_override()
elif overrides:
self.configuration_manager.apply_user_override(overrides)
def apply_overrides(self, context, overrides):
# Send a signal to the server, causing configuration files to be
# reloaded by all server processes.
# Active queries or connections to the database will not be
# interrupted.
#
# NOTE: Do not use the 'SET' command as it only affects the current
# session.
pgutil.psql("SELECT pg_reload_conf()")
def reset_configuration(self, context, configuration):
"""Reset the PgSql configuration file to the one given.
The configuration parameter is a string containing the full
configuration file that should be used.
"""Reset the PgSql configuration to the one given.
"""
config_location = PGSQL_CONFIG.format(
version=self._get_psql_version(),
)
LOG.debug(
"{guest_id}: Writing configuration file to /tmp/pgsql_config."
.format(
guest_id=CONF.guest_id,
)
)
with open('/tmp/pgsql_config', 'w+') as config_file:
config_file.write(configuration)
operating_system.chown('/tmp/pgsql_config', 'postgres', None,
recursive=False, as_root=True)
operating_system.move('/tmp/pgsql_config', config_location, timeout=30,
as_root=True)
config_contents = configuration['config_contents']
self.configuration_manager.save_configuration(config_contents)
def set_db_to_listen(self, context):
"""Allow remote connections with encrypted passwords."""
LOG.debug(
"{guest_id}: Writing hba file to /tmp/pgsql_hba_config.".format(
guest_id=CONF.guest_id,
)
)
def start_db_with_conf_changes(self, context, config_contents):
"""Starts the PgSql instance with a new configuration."""
if PgSqlAppStatus.get().is_running:
raise RuntimeError(_("The service is still running."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_db(context)
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
LOG.debug("Applying initial guestagent configuration.")
file_locations = {
'data_directory': self._quote(self.pgsql_data_dir),
'hba_file': self._quote(self.pgsql_hba_config),
'ident_file': self._quote(self.pgsql_ident_config),
'external_pid_file': self._quote(self.PID_FILE),
'unix_socket_directories': self._quote(self.UNIX_SOCKET_DIR),
'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)),
'port': CONF.postgresql.postgresql_port}
self.configuration_manager.apply_system_override(file_locations)
self._apply_access_rules()
@staticmethod
def _quote(value):
return "'%s'" % value
def _apply_access_rules(self):
LOG.debug("Applying database access rules.")
# Connections to all resources are granted.
#
# Local access from administrative users is implicitly trusted.
#
# Remote access from the Trove's account is always rejected as
# it is not needed and could be used by malicious users to hijack the
# instance.
#
# Connections from other accounts always require a hashed password.
with open('/tmp/pgsql_hba_config', 'w+') as config_file:
config_file.write(
"local all postgres,os_admin trust\n")
config_file.write(
"local all all md5\n")
config_file.write(
"host all postgres,os_admin 127.0.0.1/32 trust\n")
config_file.write(
"host all postgres,os_admin ::1/128 trust\n")
config_file.write(
"host all postgres,os_admin localhost trust\n")
config_file.write(
"host all os_admin 0.0.0.0/0 reject\n")
config_file.write(
"host all os_admin ::/0 reject\n")
config_file.write(
"host all all 0.0.0.0/0 md5\n")
config_file.write(
"host all all ::/0 md5\n")
operating_system.chown('/tmp/pgsql_hba_config',
'postgres', None, recursive=False, as_root=True)
operating_system.move('/tmp/pgsql_hba_config', PGSQL_HBA_CONFIG.format(
version=self._get_psql_version(),
), timeout=30, as_root=True)
def start_db_with_conf_changes(self, context, config_contents):
"""Restarts the PgSql instance with a new configuration."""
LOG.info(
_("{guest_id}: Going into restart mode for config file changes.")
.format(
guest_id=CONF.guest_id,
)
)
PgSqlAppStatus.get().begin_restart()
self.stop_db(context)
self.reset_configuration(context, config_contents)
self.start_db(context)
LOG.info(
_("{guest_id}: Ending restart mode for config file changes.")
.format(
guest_id=CONF.guest_id,
)
)
PgSqlAppStatus.get().end_restart()
# Connections from other accounts always require a double-MD5-hashed
# password.
#
# Make the rules readable only by the Postgres service.
#
# NOTE: The order of entries is important.
# The first failure to authenticate stops the lookup.
# That is why the 'local' connections validate first.
# The OrderedDict is necessary to guarantee the iteration order.
access_rules = OrderedDict(
[('local', [['all', 'postgres,os_admin', None, 'trust'],
['all', 'all', None, 'md5']]),
('host', [['all', 'postgres,os_admin', '127.0.0.1/32', 'trust'],
['all', 'postgres,os_admin', '::1/128', 'trust'],
['all', 'postgres,os_admin', 'localhost', 'trust'],
['all', 'os_admin', '0.0.0.0/0', 'reject'],
['all', 'os_admin', '::/0', 'reject'],
['all', 'all', '0.0.0.0/0', 'md5'],
['all', 'all', '::/0', 'md5']])
])
operating_system.write_file(self.pgsql_hba_config, access_rules,
PropertiesCodec(
string_mappings={'\t': None}),
as_root=True)
operating_system.chown(self.pgsql_hba_config,
self.PGSQL_OWNER, self.PGSQL_OWNER,
as_root=True)
operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO,
as_root=True)

View File

@ -27,6 +27,9 @@ CONF = cfg.CONF
class PgSqlDatabase(object):
def __init__(self, *args, **kwargs):
super(PgSqlDatabase, self).__init__(*args, **kwargs)
def create_database(self, context, databases):
"""Create the list of specified databases.

View File

@ -31,6 +31,9 @@ class PgSqlInstall(PgSqlProcess):
This mixin has a dependency on the PgSqlProcess mixin.
"""
def __init__(self, *args, **kwargs):
super(PgSqlInstall, self).__init__(*args, **kwargs)
def install(self, context, packages):
"""Install one or more packages that postgresql needs to run.

View File

@ -13,30 +13,55 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from trove.common import cfg
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.postgresql.service.status import (
PgSqlAppStatus)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SERVICE_CANDIDATES = ["postgresql"]
class PgSqlProcess(object):
"""Mixin that manages the PgSql process."""
SERVICE_CANDIDATES = ["postgresql"]
PGSQL_OWNER = 'postgres'
DATA_BASE = '/var/lib/postgresql/'
PID_FILE = '/var/run/postgresql/postgresql.pid'
UNIX_SOCKET_DIR = '/var/run/postgresql/'
@property
def pgsql_data_dir(self):
return os.path.dirname(self.pg_version[0])
@property
def pg_version(self):
"""Find the database version file stored in the data directory.
:returns: A tuple with the path to the version file
(in the root of the data directory) and the version string.
"""
version_files = operating_system.list_files_in_directory(
self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True)
version_file = sorted(version_files, key=len)[0]
version = operating_system.read_file(version_file, as_root=True)
return version_file, version.strip()
def restart(self, context):
PgSqlAppStatus.get().restart_db_service(
SERVICE_CANDIDATES, CONF.state_change_wait_time)
self.SERVICE_CANDIDATES, CONF.state_change_wait_time)
def start_db(self, context, enable_on_boot=True, update_db=False):
PgSqlAppStatus.get().start_db_service(
SERVICE_CANDIDATES, CONF.state_change_wait_time,
self.SERVICE_CANDIDATES, CONF.state_change_wait_time,
enable_on_boot=enable_on_boot, update_db=update_db)
def stop_db(self, context, do_not_start_on_reboot=False, update_db=False):
PgSqlAppStatus.get().stop_db_service(
SERVICE_CANDIDATES, CONF.state_change_wait_time,
self.SERVICE_CANDIDATES, CONF.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)

View File

@ -28,6 +28,9 @@ CONF = cfg.CONF
class PgSqlRoot(PgSqlUsers):
"""Mixin that provides the root-enable API."""
def __init__(self, *args, **kwargs):
super(PgSqlRoot, self).__init__(*args, **kwargs)
def is_root_enabled(self, context):
"""Return True if there is a superuser account enabled.
"""

View File

@ -1,26 +1,648 @@
data_directory = '/var/lib/postgresql/{{datastore['version']}}/main'
# Pre-compute values used by the template expressions.
# Note: The variables have to be in lists due to how scoping works in JINJA templates.
#
# The recommended amount for 'shared_buffers' on a dedicated database server is 25% of RAM.
# Servers with less than 3GB of RAM require a more conservative value to save memory for other processes.
{% set shared_buffers_mb = [(0.25 if flavor['ram'] >= 3072 else 0.10) * flavor['ram']] %}
#
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#
# The properties marked as controlled by Trove are managed by the Trove
# guest-agent. Any changes to them will be overwritten.
hba_file = '/etc/postgresql/{{datastore['version']}}/main/pg_hba.conf'
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
ident_file = '/etc/postgresql/{{datastore['version']}}/main/pg_ident.conf'
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
external_pid_file = '/var/run/postgresql/postgresql.pid'
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
# (controlled by Trove)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
# (controlled by Trove)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# (controlled by Trove)
listen_addresses = '*'
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
# (controlled by Trove)
port = 5432
max_connections = 100
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
shared_buffers = 24MB
# - Connection Settings -
log_line_prefix = '%t '
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
# (controlled by Trove)
#port = 5432 # (change requires restart)
# (controlled by Trove)
#max_connections = 100 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/tmp' # comma-separated list of directories
# (change requires restart)
# (controlled by Trove)
#unix_socket_group = '' # (change requires restart)
# (controlled by Trove)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
# (controlled by Trove)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
lc_messages = 'en_US.UTF-8'
lc_monetary = 'en_US.UTF-8'
lc_numeric = 'en_US.UTF-8'
lc_time = 'en_US.UTF-8'
# - Security and Authentication -
default_text_search_config = 'pg_catalog.english'
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
# (change requires restart)
#ssl_prefer_server_ciphers = on # (change requires restart)
#ssl_ecdh_curve = 'prime256v1' # (change requires restart)
#ssl_renegotiation_limit = 0 # amount of data between renegotiations
#ssl_cert_file = 'server.crt' # (change requires restart)
#ssl_key_file = 'server.key' # (change requires restart)
#ssl_ca_file = '' # (change requires restart)
#ssl_crl_file = '' # (change requires restart)
#password_encryption = on
#db_user_namespace = off
unix_socket_directories = '/var/run/postgresql'
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = {{ shared_buffers_mb[0]|int }}MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
# It is not advisable to set max_prepared_transactions nonzero unless you
# actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
max_stack_depth = 7MB # min 100kB
# The ideal value is the actual limit enforced
# by the OS (8MB on 64-bit flavors) less a safety
# margin of 1MB or so.
#dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory
# - Disk -
#temp_file_limit = -1 # limits per-session temp file space
# in kB, or -1 for no limit
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, archive, hot_standby, or logical
# (change requires restart)
# (controlled by Trove)
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # synchronization level;
# off, local, remote_write, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
checkpoint_segments = {{ (shared_buffers_mb[0] / 16 + 1)|int }} # in logfile segments, min 1, 16MB each
# Each segment is normally 16MB long.
# The number of segments should be enough to
# span the 'shared_buffers' size.
# We set the default to (shared_buffers / 16 + 1).
#checkpoint_timeout = 5min # range 30s-1h
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_warning = 30s # 0 disables
# - Archiving -
archive_mode = off # allows archiving to be done
# (change requires restart)
# (controlled by Trove)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
# (controlled by Trove)
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# (controlled by Trove)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Server(s) -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 0 # max number of replication slots
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = off # "on" allows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the amount of available RAM
# less the minimum required for other processes or 512MB.
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# (controlled by Trove)
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# (controlled by Trove)
# These are only used if logging_collector is on:
#log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
# (controlled by Trove)
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
# (controlled by Trove)
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
# This is only relevant when logging to eventlog (win32):
#event_source = 'PostgreSQL'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
#log_line_prefix = '' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
#log_timezone = 'GMT'
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
update_process_title = off # (controlled by Trove)
#stats_temp_directory = 'pg_stat_tmp'
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user",public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
# - Locale and Formatting -
#datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
#timezone = 'GMT'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
#lc_messages = 'C' # locale for system error message
# strings
#lc_monetary = 'C' # locale for monetary formatting
#lc_numeric = 'C' # locale for number formatting
#lc_time = 'C' # locale for time formatting
# default configuration for text search
#default_text_search_config = 'pg_catalog.simple'
# - Other Defaults -
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#session_preload_libraries = ''
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
# lock table slots.
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#sql_inheritance = on
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf.
#include_dir = 'conf.d' # include files ending in '.conf' from
# directory 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists
#include = 'special.conf' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@ -0,0 +1,3 @@
{% for key, value in overrides.iteritems() -%}
{{key}} = {{value}}
{% endfor %}

View File

@ -0,0 +1,903 @@
{
"configuration-parameters": [
{
"name": "max_connections",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "superuser_reserved_connections",
"restart_required": true,
"min": 1,
"type": "integer"
},
{
"name": "bonjour",
"restart_required": true,
"type": "boolean"
},
{
"name": "bonjour_name",
"restart_required": true,
"type": "string"
},
{
"name": "authentication_timeout",
"restart_required": false,
"type": "string"
},
{
"name": "password_encryption",
"restart_required": false,
"type": "boolean"
},
{
"name": "db_user_namespace",
"restart_required": false,
"type": "boolean"
},
{
"name": "tcp_keepalives_idle",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "tcp_keepalives_interval",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "tcp_keepalives_count",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "shared_buffers",
"restart_required": true,
"type": "string"
},
{
"name": "huge_pages",
"restart_required": true,
"type": "string"
},
{
"name": "temp_buffers",
"restart_required": false,
"type": "string"
},
{
"name": "max_prepared_transactions",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "work_mem",
"restart_required": false,
"type": "string"
},
{
"name": "maintenance_work_mem",
"restart_required": false,
"type": "string"
},
{
"name": "autovacuum_work_mem",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "max_stack_depth",
"restart_required": false,
"type": "string"
},
{
"name": "dynamic_shared_memory_type",
"restart_required": false,
"type": "string"
},
{
"name": "temp_file_limit",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "max_files_per_process",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_cost_delay",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_cost_page_hit",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_cost_page_miss",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_cost_page_dirty",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_cost_limit",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "bgwriter_delay",
"restart_required": false,
"type": "string"
},
{
"name": "bgwriter_lru_maxpages",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "bgwriter_lru_multiplier",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "effective_io_concurrency",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "max_worker_processes",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "fsync",
"restart_required": false,
"type": "boolean"
},
{
"name": "synchronous_commit",
"restart_required": false,
"type": "boolean"
},
{
"name": "wal_sync_method",
"restart_required": false,
"type": "string"
},
{
"name": "full_page_writes",
"restart_required": false,
"type": "boolean"
},
{
"name": "wal_log_hints",
"restart_required": true,
"type": "boolean"
},
{
"name": "wal_buffers",
"restart_required": true,
"min": -1,
"type": "integer"
},
{
"name": "wal_writer_delay",
"restart_required": false,
"type": "string"
},
{
"name": "commit_delay",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "commit_siblings",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "checkpoint_segments",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "checkpoint_timeout",
"restart_required": false,
"type": "string"
},
{
"name": "checkpoint_completion_target",
"restart_required": false,
"type": "string"
},
{
"name": "checkpoint_warning",
"restart_required": false,
"type": "string"
},
{
"name": "wal_keep_segments",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "wal_sender_timeout",
"restart_required": false,
"type": "string"
},
{
"name": "synchronous_standby_names",
"restart_required": false,
"type": "string"
},
{
"name": "vacuum_defer_cleanup_age",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "hot_standby",
"restart_required": true,
"type": "boolean"
},
{
"name": "max_standby_archive_delay",
"restart_required": false,
"type": "string"
},
{
"name": "max_standby_streaming_delay",
"restart_required": false,
"type": "string"
},
{
"name": "wal_receiver_status_interval",
"restart_required": false,
"type": "string"
},
{
"name": "hot_standby_feedback",
"restart_required": false,
"type": "boolean"
},
{
"name": "wal_receiver_timeout",
"restart_required": false,
"type": "string"
},
{
"name": "enable_bitmapscan",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_hashagg",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_hashjoin",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_indexscan",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_indexonlyscan",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_material",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_mergejoin",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_nestloop",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_seqscan",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_sort",
"restart_required": false,
"type": "boolean"
},
{
"name": "enable_tidscan",
"restart_required": false,
"type": "boolean"
},
{
"name": "seq_page_cost",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "random_page_cost",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "cpu_tuple_cost",
"restart_required": false,
"type": "string"
},
{
"name": "cpu_index_tuple_cost",
"restart_required": false,
"type": "string"
},
{
"name": "cpu_operator_cost",
"restart_required": false,
"type": "string"
},
{
"name": "effective_cache_size",
"restart_required": false,
"type": "string"
},
{
"name": "geqo",
"restart_required": false,
"type": "boolean"
},
{
"name": "geqo_threshold",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "geqo_effort",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "geqo_pool_size",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "geqo_generations",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "geqo_selection_bias",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "geqo_seed",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "default_statistics_target",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "constraint_exclusion",
"restart_required": false,
"type": "string"
},
{
"name": "cursor_tuple_fraction",
"restart_required": false,
"type": "string"
},
{
"name": "from_collapse_limit",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "join_collapse_limit",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "log_truncate_on_rotation",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_rotation_age",
"restart_required": false,
"type": "string"
},
{
"name": "log_rotation_size",
"restart_required": false,
"type": "string"
},
{
"name": "client_min_messages",
"restart_required": false,
"type": "string"
},
{
"name": "log_min_messages",
"restart_required": false,
"type": "string"
},
{
"name": "log_min_error_statement",
"restart_required": false,
"type": "string"
},
{
"name": "log_min_duration_statement",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "debug_print_parse",
"restart_required": false,
"type": "boolean"
},
{
"name": "debug_print_rewritten",
"restart_required": false,
"type": "boolean"
},
{
"name": "debug_print_plan",
"restart_required": false,
"type": "boolean"
},
{
"name": "debug_pretty_print",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_checkpoints",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_connections",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_disconnections",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_duration",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_error_verbosity",
"restart_required": false,
"type": "string"
},
{
"name": "log_hostname",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_line_prefix",
"restart_required": false,
"type": "string"
},
{
"name": "log_lock_waits",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_statement",
"restart_required": false,
"type": "string"
},
{
"name": "log_temp_files",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "log_timezone",
"restart_required": false,
"type": "string"
},
{
"name": "track_activities",
"restart_required": false,
"type": "boolean"
},
{
"name": "track_counts",
"restart_required": false,
"type": "boolean"
},
{
"name": "track_io_timing",
"restart_required": false,
"type": "boolean"
},
{
"name": "track_functions",
"restart_required": false,
"type": "string"
},
{
"name": "track_activity_query_size",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "log_parser_stats",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_planner_stats",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_executor_stats",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_statement_stats",
"restart_required": false,
"type": "boolean"
},
{
"name": "autovacuum",
"restart_required": false,
"type": "boolean"
},
{
"name": "log_autovacuum_min_duration",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "autovacuum_max_workers",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "autovacuum_naptime",
"restart_required": false,
"type": "string"
},
{
"name": "autovacuum_vacuum_threshold",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "autovacuum_analyze_threshold",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "autovacuum_vacuum_scale_factor",
"restart_required": false,
"type": "string"
},
{
"name": "autovacuum_analyze_scale_factor",
"restart_required": false,
"type": "string"
},
{
"name": "autovacuum_freeze_max_age",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "autovacuum_multixact_freeze_max_age",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "autovacuum_vacuum_cost_delay",
"restart_required": false,
"type": "string"
},
{
"name": "autovacuum_vacuum_cost_limit",
"restart_required": false,
"min": -1,
"type": "integer"
},
{
"name": "search_path",
"restart_required": false,
"type": "string"
},
{
"name": "default_tablespace",
"restart_required": false,
"type": "string"
},
{
"name": "temp_tablespaces",
"restart_required": false,
"type": "string"
},
{
"name": "check_function_bodies",
"restart_required": false,
"type": "boolean"
},
{
"name": "default_transaction_isolation",
"restart_required": false,
"type": "string"
},
{
"name": "default_transaction_read_only",
"restart_required": false,
"type": "boolean"
},
{
"name": "default_transaction_deferrable",
"restart_required": false,
"type": "boolean"
},
{
"name": "session_replication_role",
"restart_required": false,
"type": "string"
},
{
"name": "statement_timeout",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "lock_timeout",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_freeze_min_age",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_freeze_table_age",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_multixact_freeze_min_age",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "vacuum_multixact_freeze_table_age",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "bytea_output",
"restart_required": false,
"type": "string"
},
{
"name": "xmlbinary",
"restart_required": false,
"type": "string"
},
{
"name": "xmloption",
"restart_required": false,
"type": "string"
},
{
"name": "datestyle",
"restart_required": false,
"type": "string"
},
{
"name": "intervalstyle",
"restart_required": false,
"type": "string"
},
{
"name": "timezone",
"restart_required": false,
"type": "string"
},
{
"name": "timezone_abbreviations",
"restart_required": false,
"type": "string"
},
{
"name": "extra_float_digits",
"restart_required": false,
"min": 0,
"type": "integer"
},
{
"name": "client_encoding",
"restart_required": false,
"type": "string"
},
{
"name": "lc_messages",
"restart_required": false,
"type": "string"
},
{
"name": "lc_monetary",
"restart_required": false,
"type": "string"
},
{
"name": "lc_numeric",
"restart_required": false,
"type": "string"
},
{
"name": "lc_time",
"restart_required": false,
"type": "string"
},
{
"name": "default_text_search_config",
"restart_required": false,
"type": "string"
},
{
"name": "deadlock_timeout",
"restart_required": false,
"type": "string"
},
{
"name": "max_locks_per_transaction",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "max_pred_locks_per_transaction",
"restart_required": true,
"min": 0,
"type": "integer"
},
{
"name": "array_nulls",
"restart_required": false,
"type": "boolean"
},
{
"name": "backslash_quote",
"restart_required": false,
"type": "string"
},
{
"name": "default_with_oids",
"restart_required": false,
"type": "boolean"
},
{
"name": "escape_string_warning",
"restart_required": false,
"type": "boolean"
},
{
"name": "lo_compat_privileges",
"restart_required": false,
"type": "boolean"
},
{
"name": "quote_all_identifiers",
"restart_required": false,
"type": "boolean"
},
{
"name": "sql_inheritance",
"restart_required": false,
"type": "boolean"
},
{
"name": "standard_conforming_strings",
"restart_required": false,
"type": "boolean"
},
{
"name": "synchronize_seqscans",
"restart_required": false,
"type": "boolean"
},
{
"name": "transform_null_equals",
"restart_required": false,
"type": "boolean"
},
{
"name": "exit_on_error",
"restart_required": false,
"type": "boolean"
},
{
"name": "restart_after_crash",
"restart_required": false,
"type": "boolean"
}
]
}

View File

@ -264,7 +264,7 @@ class BackupAgentTest(trove_testtools.TestCase):
self.assertIsNotNone(mongodump.manifest)
self.assertIn('gz.enc', mongodump.manifest)
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))

View File

@ -360,7 +360,7 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
self.assertEqual(restr.restore_cmd,
DECRYPT + PIPE + UNZIP + PIPE + MONGODUMP_RESTORE)
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
@ -373,7 +373,7 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
REDISBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command)
self.assertIn("gz.enc", bkp.manifest)
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
@ -416,7 +416,8 @@ class CouchbaseBackupTests(trove_testtools.TestCase):
def setUp(self):
super(CouchbaseBackupTests, self).setUp()
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout')
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout',
return_value=('0', ''))
self.exec_timeout_patch.start()
self.backup_runner = utils.import_class(BACKUP_CBBACKUP_CLS)
self.backup_runner_patch = patch.multiple(
@ -496,8 +497,8 @@ class MongodbBackupTests(trove_testtools.TestCase):
def setUp(self):
super(MongodbBackupTests, self).setUp()
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout')
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout',
return_value=('0', ''))
self.exec_timeout_mock = self.exec_timeout_patch.start()
self.addCleanup(self.exec_timeout_patch.stop)
@ -586,7 +587,8 @@ class RedisBackupTests(trove_testtools.TestCase):
def setUp(self):
super(RedisBackupTests, self).setUp()
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout')
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout',
return_value=('0', ''))
self.exec_timeout_patch.start()
self.addCleanup(self.exec_timeout_patch.stop)
self.conf_man_patch = patch.object(

View File

@ -52,7 +52,8 @@ class TestConfigurationManager(trove_testtools.TestCase):
override_strategy=sample_strategy)
manager.parse_configuration()
read_file.assert_called_with(sample_path, codec=sample_codec)
read_file.assert_called_with(sample_path, codec=sample_codec,
as_root=sample_requires_root)
with patch.object(manager, 'parse_configuration',
return_value={'key1': 'v1', 'key2': 'v2'}):

View File

@ -141,7 +141,8 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
if self.tempname:
os.unlink(self.tempname)
@mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=0))
@mock.patch.object(utils, 'execute_with_timeout',
Mock(return_value=('0', '')))
def test_write_password_to_file1(self):
self.original_mkstemp = tempfile.mkstemp
self.tempname = None
@ -157,7 +158,8 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
filepermissions = os.stat(self.tempname).st_mode
self.assertEqual(stat.S_IRUSR, filepermissions & 0o777)
@mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=0))
@mock.patch.object(utils, 'execute_with_timeout',
Mock(return_value=('0', '')))
@mock.patch(
'trove.guestagent.datastore.experimental.couchbase.service.LOG')
def test_write_password_to_file2(self, mock_logging):

View File

@ -63,7 +63,7 @@ from trove.guestagent.datastore.experimental.mongodb import (
from trove.guestagent.datastore.experimental.postgresql import (
manager as pg_manager)
from trove.guestagent.datastore.experimental.postgresql.service import (
process as pg_process)
config as pg_config)
from trove.guestagent.datastore.experimental.postgresql.service import (
status as pg_status)
from trove.guestagent.datastore.experimental.pxc import (
@ -967,7 +967,7 @@ class MySqlAppTest(testtools.TestCase):
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test_stop_mysql_do_not_start_on_reboot(self, mock_execute):
self.appStatus.set_next_status(
@ -995,7 +995,7 @@ class MySqlAppTest(testtools.TestCase):
@patch('trove.guestagent.datastore.mysql_common.service.LOG')
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test_stop_mysql_key_error(self, mock_execute, mock_service,
mock_logging):
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
@ -1147,7 +1147,7 @@ class MySqlAppTest(testtools.TestCase):
'password': auth_pwd_mock.return_value}})
wipe_ib_mock.assert_called_once_with()
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test__enable_mysql_on_boot(self, mock_execute):
mysql_service = \
dbaas_base.operating_system.service_discovery(["mysql"])
@ -1159,14 +1159,14 @@ class MySqlAppTest(testtools.TestCase):
@patch('trove.guestagent.datastore.mysql_common.service.LOG')
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service,
mock_logging):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._enable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test__disable_mysql_on_boot(self, mock_execute):
mysql_service = \
dbaas_base.operating_system.service_discovery(["mysql"])
@ -1178,7 +1178,7 @@ class MySqlAppTest(testtools.TestCase):
@patch('trove.guestagent.datastore.mysql_common.service.LOG')
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service,
mock_logging):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
@ -2384,7 +2384,8 @@ class TestRedisApp(BaseAppTest.AppTestCase):
RedisApp._install_redis.assert_any_call('asdf')
def test_install_redis(self):
with patch.object(utils, 'execute_with_timeout'):
with patch.object(utils, 'execute_with_timeout',
return_value=('0', '')):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
with patch.object(RedisApp, 'start_db', return_value=None):
self.app._install_redis('redis')
@ -2392,7 +2393,7 @@ class TestRedisApp(BaseAppTest.AppTestCase):
RedisApp.start_db.assert_any_call()
self.assertTrue(utils.execute_with_timeout.called)
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test_service_cleanup(self, exec_mock):
rservice.RedisAppStatus(Mock()).cleanup_stalled_db_services()
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
@ -3591,7 +3592,7 @@ class PXCAppTest(testtools.TestCase):
self.assertEqual(expected, args[0].text,
"Sql statements are not the same")
@patch.object(utils, 'execute_with_timeout')
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
def test__bootstrap_cluster(self, mock_execute):
pxc_service_cmds = pxc_system.service_discovery(['mysql'])
self.PXCApp._bootstrap_cluster(timeout=20)
@ -3657,7 +3658,8 @@ class PostgresAppTest(BaseAppTest.AppTestCase):
def stop_db(self):
super(PostgresAppTest.FakePostgresApp, self).stop_db(Mock())
def setUp(self):
@patch.object(pg_config.PgSqlConfig, '_find_config_file', return_value='')
def setUp(self, _):
super(PostgresAppTest, self).setUp(str(uuid4()))
self.orig_time_sleep = time.sleep
self.orig_time_time = time.time
@ -3685,7 +3687,7 @@ class PostgresAppTest(BaseAppTest.AppTestCase):
@property
def expected_service_candidates(self):
return pg_process.SERVICE_CANDIDATES
return self.postgres.SERVICE_CANDIDATES
def tearDown(self):
time.sleep = self.orig_time_sleep

View File

@ -35,7 +35,7 @@ class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
self.manager = manager.Manager()
self.execute_with_timeout_patch = mock.patch.object(
utils, 'execute_with_timeout'
utils, 'execute_with_timeout', return_value=('0', '')
)
self.addCleanup(self.execute_with_timeout_patch.stop)
self.execute_with_timeout_patch.start()

View File

@ -97,7 +97,8 @@ class TestOperatingSystem(trove_testtools.TestCase):
'key7': 0,
'key8': None,
'key9': [['str1', 'str2'], ['str3', 'str4']],
'key10': [['str1', 'str2', 'str3'], ['str3', 'str4'], 'str5']
'key10': [['str1', 'str2', 'str3'], ['str3', 'str4'], 'str5'],
'key11': True
}
self._test_file_codec(data, PropertiesCodec())
@ -713,6 +714,31 @@ class TestOperatingSystem(trove_testtools.TestCase):
"Got unknown keyword args: {'_unknown_kw': 0}"),
'path', _unknown_kw=0)
def test_exists(self):
self.assertFalse(
operating_system.exists(tempfile.gettempdir(), is_directory=False))
self.assertTrue(
operating_system.exists(tempfile.gettempdir(), is_directory=True))
with tempfile.NamedTemporaryFile() as test_file:
self.assertTrue(
operating_system.exists(test_file.name, is_directory=False))
self.assertFalse(
operating_system.exists(test_file.name, is_directory=True))
self._assert_execute_call(
[['test -f path && echo 1 || echo 0']],
[{'shell': True, 'check_exit_code': False,
'run_as_root': True, 'root_helper': 'sudo'}],
operating_system.exists, None, 'path', is_directory=False,
as_root=True)
self._assert_execute_call(
[['test -d path && echo 1 || echo 0']],
[{'shell': True, 'check_exit_code': False,
'run_as_root': True, 'root_helper': 'sudo'}],
operating_system.exists, None, 'path', is_directory=True,
as_root=True)
def _assert_execute_call(self, exec_args, exec_kwargs,
fun, return_value, *args, **kwargs):
"""
@ -746,7 +772,8 @@ class TestOperatingSystem(trove_testtools.TestCase):
:type kwargs: dict
"""
with patch.object(utils, 'execute_with_timeout') as exec_call:
with patch.object(utils, 'execute_with_timeout',
return_value=('0', '')) as exec_call:
if isinstance(return_value, ExpectedException):
with return_value:
fun(*args, **kwargs)
@ -832,39 +859,67 @@ class TestOperatingSystem(trove_testtools.TestCase):
root_path, 3, 3, ['txt', 'py', ''], 1, all_paths)
# All files in the top directory.
self._assert_list_files(root_path, False, None, all_paths, 9)
self._assert_list_files(
root_path, False, None, False, all_paths, 9)
# All files & directories in the top directory.
self._assert_list_files(
root_path, False, None, True, all_paths, 10)
# All files recursive.
self._assert_list_files(root_path, True, None, all_paths, 27)
self._assert_list_files(
root_path, True, None, False, all_paths, 27)
# All files & directories recursive.
self._assert_list_files(
root_path, True, None, True, all_paths, 29)
# Only '*.txt' in the top directory.
self._assert_list_files(root_path, False, '.*\.txt$', all_paths, 3)
self._assert_list_files(
root_path, False, '.*\.txt$', False, all_paths, 3)
# Only '*.txt' (including directories) in the top directory.
self._assert_list_files(
root_path, False, '.*\.txt$', True, all_paths, 3)
# Only '*.txt' recursive.
self._assert_list_files(root_path, True, '.*\.txt$', all_paths, 9)
self._assert_list_files(
root_path, True, '.*\.txt$', True, all_paths, 9)
# Only '*.txt' (including directories) recursive.
self._assert_list_files(
root_path, True, '.*\.txt$', False, all_paths, 9)
# Only extension-less files in the top directory.
self._assert_list_files(root_path, False, '[^\.]*$', all_paths, 3)
self._assert_list_files(
root_path, False, '[^\.]*$', False, all_paths, 3)
# Only extension-less files recursive.
self._assert_list_files(root_path, True, '[^\.]*$', all_paths, 9)
self._assert_list_files(
root_path, True, '[^\.]*$', False, all_paths, 9)
# Non-existing extension in the top directory.
self._assert_list_files(root_path, False, '.*\.bak$', all_paths, 0)
self._assert_list_files(
root_path, False, '.*\.bak$', False, all_paths, 0)
# Non-existing extension recursive.
self._assert_list_files(root_path, True, '.*\.bak$', all_paths, 0)
self._assert_list_files(
root_path, True, '.*\.bak$', False, all_paths, 0)
finally:
try:
os.remove(root_path)
except Exception:
pass # Do not fail in the cleanup.
def _assert_list_files(self, root, recursive, pattern, all_paths, count):
def _assert_list_files(self, root, recursive, pattern, include_dirs,
all_paths, count):
found = operating_system.list_files_in_directory(
root, recursive=recursive, pattern=pattern)
root, recursive=recursive, pattern=pattern,
include_dirs=include_dirs)
expected = {
path for path in all_paths if (
path for path in filter(
lambda item: include_dirs or not os.path.isdir(item),
all_paths) if (
(recursive or os.path.dirname(path) == root) and (
not pattern or re.match(
pattern, os.path.basename(path))))}
@ -884,6 +939,7 @@ class TestOperatingSystem(trove_testtools.TestCase):
if level < num_levels:
path = tempfile.mkdtemp(dir=root_path)
created_paths.add(path)
self._create_temp_fs_structure(
path, num_levels, num_files_per_extension,
file_extensions, level + 1, created_paths)

View File

@ -221,7 +221,8 @@ class VolumeMountPointTest(trove_testtools.TestCase):
os.path.exists = MagicMock(return_value=False)
fake_spawn = _setUp_fake_spawn()
with patch.object(utils, 'execute_with_timeout'):
with patch.object(utils, 'execute_with_timeout',
return_value=('0', '')):
self.volumeMountPoint.mount()
self.assertEqual(1, os.path.exists.call_count)