NetApp fix free space as zero during 1st vol stats update

NetApp clustered ontap drivers report space as zero
till first 60 seconds of driver start. This is causing
discomfort for some performance sensitive deployements.
This was due to async nature of the NetApp stats collection job.
Job is changed to sync at driver start to improve customer
experience.

Change-Id: I7d5cbf590897a0d328ece3a60516c92c0ad0ee7f
Closes-bug: #1253660
(cherry picked from commit 585f34ff77)
This commit is contained in:
Navneet Singh 2013-11-18 02:38:07 +05:30
parent ad05556a2f
commit c07d60fe45
4 changed files with 30 additions and 15 deletions

View File

@ -545,7 +545,8 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
def _custom_setup(self):
self.stubs.Set(
ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None)
ssc_utils, 'refresh_cluster_ssc',
lambda a, b, c, synchronous: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',

View File

@ -781,7 +781,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
self.client.set_api_version(major, minor)
self.ssc_vols = None
self.stale_vols = set()
ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
def _create_lun_on_eligible_vol(self, name, size, metadata,
extra_specs=None):
@ -1057,6 +1056,9 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
sync = True if self.ssc_vols is None else False
ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver,
synchronous=sync)
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
@ -1090,7 +1092,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
data['free_capacity_gb'] = 0
else:
LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):

View File

@ -714,7 +714,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
self.ssc_enabled = True
LOG.info(_("Shares on vserver %s will only"
" be used for provisioning.") % (self.vserver))
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
else:
self.ssc_enabled = False
LOG.warn(_("No vserver set in config. SSC will be disabled."))
@ -881,6 +880,12 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
if self.ssc_enabled:
sync = True if self.ssc_vols is None else False
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver,
synchronous=sync)
else:
LOG.warn(_("No vserver set in config. SSC will be disabled."))
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
@ -914,10 +919,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
elif self.ssc_enabled:
LOG.warn(_("No cluster ssc stats found."
" Wait for next volume stats update."))
if self.ssc_enabled:
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
else:
LOG.warn(_("No vserver set in config. SSC will be disabled."))
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):

View File

@ -434,6 +434,9 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
@ -464,13 +467,16 @@ def get_cluster_latest_ssc(*args, **kwargs):
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_('Successfully completed ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
def refresh_cluster_ssc(backend, na_server, vserver):
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
if not isinstance(backend, driver.VolumeDriver):
raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
@ -483,17 +489,23 @@ def refresh_cluster_ssc(backend, na_server, vserver):
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
t = Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
t = Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
t = Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
t = Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
def get_volumes_for_specs(ssc_vols, specs):