Merge "Add options for osd backfill pressure"

This commit is contained in:
Zuul 2017-11-10 14:09:41 +00:00 committed by Gerrit Code Review
commit 7d20f7e37a
4 changed files with 40 additions and 0 deletions

View File

@ -135,6 +135,24 @@ options:
default, the initial crush weight for the newly added osd is set to its
volume size in TB. Leave this option unset to use the default provided
by Ceph itself. This option only affects NEW OSDs, not existing ones.
osd-max-backfills:
type: int
default:
description: |
The maximum number of backfills allowed to or from a single OSD.
.
Setting this option on a running Ceph OSD node will not affect running
OSD devices, but will add the setting to ceph.conf for the next restart.
osd-recovery-max-active:
type: int
default:
description: |
The number of active recovery requests per OSD at one time. More requests
will accelerate recovery, but the requests places an increased load on the
cluster.
.
Setting this option on a running Ceph OSD node will not affect running
OSD devices, but will add the setting to ceph.conf for the next restart.
ignore-device-errors:
type: boolean
default: False

View File

@ -245,6 +245,8 @@ def get_ceph_context(upgrading=False):
'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
'crush_initial_weight': config('crush-initial-weight'),
'osd_journal_size': config('osd-journal-size'),
'osd_max_backfills': config('osd-max-backfills'),
'osd_recovery_max_active': config('osd-recovery-max-active'),
'use_syslog': str(config('use-syslog')).lower(),
'ceph_public_network': public_network,
'ceph_cluster_network': cluster_network,

View File

@ -74,6 +74,12 @@ journal dio = {{ dio }}
osd max object name len = 256
osd max object namespace len = 64
{% endif %}
{%- if osd_max_backfills %}
osd max backfills = {{ osd_max_backfills }}
{%- endif %}
{%- if osd_recovery_max_active %}
osd recovery max active = {{ osd_recovery_max_active }}
{%- endif %}
{% if osd -%}
# The following are user-provided options provided via the config-flags charm option.
{% for key in osd -%}

View File

@ -28,6 +28,8 @@ CHARM_CONFIG = {'config-flags': '',
'loglevel': 1,
'use-syslog': True,
'osd-journal-size': 1024,
'osd-max-backfills': 1,
'osd-recovery-max-active': 2,
'use-direct-io': True,
'osd-format': 'ext4',
'prefer-ipv6': False,
@ -66,6 +68,8 @@ class CephHooksTestCase(unittest.TestCase):
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -101,6 +105,8 @@ class CephHooksTestCase(unittest.TestCase):
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -136,6 +142,8 @@ class CephHooksTestCase(unittest.TestCase):
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -172,6 +180,8 @@ class CephHooksTestCase(unittest.TestCase):
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -208,6 +218,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd': {'osd max write size': 1024},
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -246,6 +258,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd': {'osd max write size': 1024},
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,