Add option for OSD initial weight

In small clusters, adding OSDs at their full weight causes massive IO
workload which makes performance unacceptable.  This adds a config
option to change the initial weight, we can set it to 0 or something
small for clusters that would be affected.

Closes-Bug: 1716783
Change-Id: Idadfd565fbda9ffc3952de73c5c58a0dc1dc69c9
This commit is contained in:
Xav Paice 2017-05-03 16:04:01 +12:00 committed by James Page
parent b9cbfeb906
commit ef3c3c7a0d
4 changed files with 24 additions and 1 deletions

View File

@ -123,6 +123,18 @@ options:
.
Specifying this option on a running Ceph OSD node will have no effect
until new disks are added, at which point new disks will be encrypted.
crush-initial-weight:
type: float
default:
description: |
The initial crush weight for newly added osds into crushmap. Use this
option only if you wish to set the weight for newly added OSDs in order
to gradually increase the weight over time. Be very aware that setting
this overrides the default setting, which can lead to imbalance in the
cluster, especially if there are OSDs of different sizes in use. By
default, the initial crush weight for the newly added osd is set to its
volume size in TB. Leave this option unset to use the default provided
by Ceph itself. This option only affects NEW OSDs, not existing ones.
ignore-device-errors:
type: boolean
default: False

View File

@ -242,6 +242,7 @@ def get_ceph_context(upgrading=False):
'mon_hosts': ' '.join(mon_hosts),
'fsid': get_fsid(),
'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
'crush_initial_weight': config('crush-initial-weight'),
'osd_journal_size': config('osd-journal-size'),
'use_syslog': str(config('use-syslog')).lower(),
'ceph_public_network': public_network,

View File

@ -33,6 +33,9 @@ osd crush location = {{crush_location}}
{%- if upgrade_in_progress %}
setuser match path = /var/lib/ceph/$type/$cluster-$id
{%- endif %}
{%- if crush_initial_weight %}
osd crush initial weight = {{ crush_initial_weight }}
{%- endif %}
{% if global -%}
# The following are user-provided options provided via the config-flags charm option.
# User-provided [global] section config

View File

@ -29,7 +29,8 @@ CHARM_CONFIG = {'config-flags': '',
'osd-format': 'ext4',
'prefer-ipv6': False,
'customize-failure-domain': False,
'bluestore': False}
'bluestore': False,
'crush-initial-weight': '0'}
class CephHooksTestCase(unittest.TestCase):
@ -60,6 +61,7 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,
@ -94,6 +96,7 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,
@ -128,6 +131,7 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,
@ -163,6 +167,7 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,
@ -198,6 +203,7 @@ class CephHooksTestCase(unittest.TestCase):
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'osd': {'osd max write size': 1024},
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,
@ -235,6 +241,7 @@ class CephHooksTestCase(unittest.TestCase):
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'osd': {'osd max write size': 1024},
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'public_addr': '10.0.0.1',
'short_object_len': True,