Fix Actions
Adding another directory leayer to src/lib/gluster resolves naming conflicts with the gluster cli library. This also fixes many small problems with the actions.py code. Change-Id: I8b2ad6835f5c02852391858baa77c7fa38426da5
This commit is contained in:
parent
9e5239f0ee
commit
28af3e90f1
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2017 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -334,208 +334,208 @@ class GlusterOption(object):
|
|||
|
||||
@staticmethod
|
||||
def from_str(s: str, value):
|
||||
if s == "auth.allow":
|
||||
if s == "auth-allow":
|
||||
return GlusterOption(option=GlusterOption.AuthAllow, value=value)
|
||||
elif s == "auth.reject":
|
||||
elif s == "auth-reject":
|
||||
return GlusterOption(option=GlusterOption.AuthReject, value=value)
|
||||
elif s == "auth.ssl-allow":
|
||||
elif s == "auth-ssl-allow":
|
||||
return GlusterOption(option=GlusterOption.SslAllow, value=value)
|
||||
elif s == "client.ssl":
|
||||
elif s == "client-ssl":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.ClientSsl, value=t)
|
||||
elif s == "cluster.favorite-child-policy":
|
||||
elif s == "cluster-favorite-child-policy":
|
||||
policy = SplitBrainPolicy.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.FavoriteChildPolicy,
|
||||
value=policy)
|
||||
elif s == "client.grace-timeout":
|
||||
elif s == "client-grace-timeout":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.ClientGraceTimeout,
|
||||
value=i)
|
||||
elif s == "cluster.self-heal-window-size":
|
||||
elif s == "cluster-self-heal-window-size":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.ClusterSelfHealWindowSize,
|
||||
value=i)
|
||||
elif s == "cluster.data-self-heal-algorithm":
|
||||
elif s == "cluster-data-self-heal-algorithm":
|
||||
s = SelfHealAlgorithm.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.ClusterDataSelfHealAlgorithm, value=s)
|
||||
elif s == "cluster.min-free-disk":
|
||||
elif s == "cluster-min-free-disk":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.ClusterMinFreeDisk,
|
||||
value=i)
|
||||
elif s == "cluster.stripe-block-size":
|
||||
elif s == "cluster-stripe-block-size":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.ClusterStripeBlockSize,
|
||||
value=i)
|
||||
elif s == "cluster.self-heal-daemon":
|
||||
elif s == "cluster-self-heal-daemon":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.ClusterSelfHealDaemon,
|
||||
value=t)
|
||||
elif s == "cluster.ensure-durability":
|
||||
elif s == "cluster-ensure-durability":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.ClusterEnsureDurability,
|
||||
value=t)
|
||||
elif s == "diagnostics.brick-log-level":
|
||||
elif s == "diagnostics-brick-log-level":
|
||||
return GlusterOption(option=GlusterOption.DiagnosticsBrickLogLevel,
|
||||
value=value)
|
||||
elif s == "diagnostics.client-log-level":
|
||||
elif s == "diagnostics-client-log-level":
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsClientLogLevel,
|
||||
value=value)
|
||||
elif s == "diagnostics.latency-measurement":
|
||||
elif s == "diagnostics-latency-measurement":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsLatencyMeasurement, value=t)
|
||||
elif s == "diagnostics.count-fop-hits":
|
||||
elif s == "diagnostics-count-fop-hits":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.DiagnosticsCountFopHits,
|
||||
value=t)
|
||||
elif s == "diagnostics.stats-dump-interval":
|
||||
elif s == "diagnostics-stats-dump-interval":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsStatsDumpInterval, value=i)
|
||||
elif s == "diagnostics.fop-sample-buf-size":
|
||||
elif s == "diagnostics-fop-sample-buf-size":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsFopSampleBufSize,
|
||||
value=i)
|
||||
elif s == "diagnostics.fop-sample-interval":
|
||||
elif s == "diagnostics-fop-sample-interval":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsFopSampleInterval, value=i)
|
||||
elif s == "diagnostics.stats-dnscache-ttl-sec":
|
||||
elif s == "diagnostics-stats-dnscache-ttl-sec":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.DiagnosticsStatsDnscacheTtlSec, value=i)
|
||||
elif s == "diagnostics.dump-fd-stats":
|
||||
elif s == "diagnostics-dump-fd-stats":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.DiagnosticsDumpFdStats,
|
||||
value=t)
|
||||
elif s == "features.read-only":
|
||||
elif s == "features-read-only":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.FeaturesReadOnly, value=t)
|
||||
elif s == "features.lock-heal":
|
||||
elif s == "features-lock-heal":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.FeaturesLockHeal, value=t)
|
||||
elif s == "features.quota-timeout":
|
||||
elif s == "features-quota-timeout":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.FeaturesQuotaTimeout,
|
||||
value=i)
|
||||
elif s == "geo-replication.indexing":
|
||||
elif s == "geo-replication-indexing":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.GeoReplicationIndexing,
|
||||
value=t)
|
||||
elif s == "network.frame-timeout":
|
||||
elif s == "network-frame-timeout":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.NetworkFrameTimeout,
|
||||
value=i)
|
||||
elif s == "nfs.enable-ino32":
|
||||
elif s == "nfs-enable-ino32":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsEnableIno32, value=t)
|
||||
elif s == "nfs.volume-access":
|
||||
elif s == "nfs-volume-access":
|
||||
s = AccessMode.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsVolumeAccess, value=s)
|
||||
elif s == "nfs.trusted-write":
|
||||
elif s == "nfs-trusted-write":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsTrustedWrite, value=t)
|
||||
elif s == "nfs.trusted-sync":
|
||||
elif s == "nfs-trusted-sync":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsTrustedSync, value=t)
|
||||
elif s == "nfs.export-dir":
|
||||
elif s == "nfs-export-dir":
|
||||
return GlusterOption(
|
||||
option=GlusterOption.NfsExportDir, value=value)
|
||||
elif s == "nfs.export-volumes":
|
||||
elif s == "nfs-export-volumes":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.NfsExportVolumes, value=t)
|
||||
elif s == "nfs.rpc-auth-unix":
|
||||
elif s == "nfs-rpc-auth-unix":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsRpcAuthUnix, value=t)
|
||||
elif s == "nfs.rpc-auth-null":
|
||||
elif s == "nfs-rpc-auth-null":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsRpcAuthNull, value=t)
|
||||
elif s == "nfs.ports-insecure":
|
||||
elif s == "nfs-ports-insecure":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.NfsPortsInsecure, value=t)
|
||||
elif s == "nfs.addr-namelookup":
|
||||
elif s == "nfs-addr-namelookup":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsAddrNamelookup,
|
||||
value=t)
|
||||
elif s == "nfs.register-with-portmap":
|
||||
elif s == "nfs-register-with-portmap":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsRegisterWithPortmap,
|
||||
value=t)
|
||||
elif s == "nfs.disable":
|
||||
elif s == "nfs-disable":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.NfsDisable, value=t)
|
||||
elif s == "performance.write-behind-window-size":
|
||||
elif s == "performance-write-behind-window-size":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.PerformanceWriteBehindWindowSize, value=i)
|
||||
elif s == "performance.io-thread-count":
|
||||
elif s == "performance-io-thread-count":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.PerformanceIoThreadCount,
|
||||
value=i)
|
||||
elif s == "performance.flush-behind":
|
||||
elif s == "performance-flush-behind":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.PerformanceFlushBehind,
|
||||
value=t)
|
||||
elif s == "performance.cache-max-file-size":
|
||||
elif s == "performance-cache-max-file-size":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.PerformanceCacheMaxFileSize,
|
||||
value=i)
|
||||
elif s == "performance.cache-min-file-size":
|
||||
elif s == "performance-cache-min-file-size":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.PerformanceCacheMinFileSize,
|
||||
value=i)
|
||||
elif s == "performance.cache-refresh-timeout":
|
||||
elif s == "performance-cache-refresh-timeout":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.PerformanceCacheRefreshTimeout, value=i)
|
||||
elif s == "performance.cache-size":
|
||||
elif s == "performance-cache-size":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.PerformanceCacheSize,
|
||||
value=i)
|
||||
elif s == "performance.readdir-ahead":
|
||||
elif s == "performance-readdir-ahead":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.PerformanceReadDirAhead,
|
||||
value=t)
|
||||
elif s == "performance.parallel-readdir":
|
||||
elif s == "performance-parallel-readdir":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.PerformanceReadDirAhead,
|
||||
value=t)
|
||||
elif s == "performance.readdir-cache-limit":
|
||||
elif s == "performance-readdir-cache-limit":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.PerformanceReadDirAheadCacheLimit,
|
||||
value=i)
|
||||
elif s == "server.ssl":
|
||||
elif s == "server-ssl":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.ServerSsl, value=t)
|
||||
elif s == "server.allow-insecure":
|
||||
elif s == "server-allow-insecure":
|
||||
t = Toggle.from_str(value)
|
||||
return GlusterOption(option=GlusterOption.ServerAllowInsecure,
|
||||
value=t)
|
||||
elif s == "server.grace-timeout":
|
||||
elif s == "server-grace-timeout":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.ServerGraceTimeout,
|
||||
value=i)
|
||||
elif s == "server.statedump-path":
|
||||
elif s == "server-statedump-path":
|
||||
return GlusterOption(option=GlusterOption.ServerStatedumpPath,
|
||||
value=value)
|
||||
elif s == "ssl.certificate-depth":
|
||||
elif s == "ssl-certificate-depth":
|
||||
i = int(value)
|
||||
return GlusterOption(option=GlusterOption.SslCertificateDepth,
|
||||
value=i)
|
||||
elif s == "ssl.cipher-list":
|
||||
elif s == "ssl-cipher-list":
|
||||
return GlusterOption(GlusterOption.SslCipherList, value=value)
|
||||
elif s == "storage.health-check-interval":
|
||||
elif s == "storage-health-check-interval":
|
||||
i = int(value)
|
||||
return GlusterOption(
|
||||
option=GlusterOption.StorageHealthCheckInterval,
|
||||
|
@ -972,6 +972,7 @@ def volume_info(vol_name: str) -> List[Volume]:
|
|||
if len(info) > 0:
|
||||
v = info[0]
|
||||
brick_list = []
|
||||
option_dict = {}
|
||||
for brick in v['bricks']:
|
||||
ip_addr = brick['name'].split(':')[0]
|
||||
path = brick['name'].split(':')[1]
|
||||
|
@ -984,6 +985,8 @@ def volume_info(vol_name: str) -> List[Volume]:
|
|||
path=path,
|
||||
# Not enough info to answer this next field
|
||||
is_arbiter=False))
|
||||
for key_value in v['options']:
|
||||
option_dict[key_value['name']] = key_value['value']
|
||||
return [Volume(
|
||||
name=v['name'],
|
||||
vol_id=uuid.UUID(v['uuid']),
|
||||
|
@ -998,7 +1001,7 @@ def volume_info(vol_name: str) -> List[Volume]:
|
|||
redundancy_count=None,
|
||||
transport=Transport.from_str(v['transport']),
|
||||
bricks=brick_list,
|
||||
options=v['options'],
|
||||
options=option_dict,
|
||||
)]
|
||||
else:
|
||||
return []
|
||||
|
@ -1045,7 +1048,7 @@ def volume_enable_bitrot(vol_name: str) -> None:
|
|||
"""
|
||||
Enable bitrot detection and remediation on the volume
|
||||
volume: String. The volume to operate on.
|
||||
:return: 0 on success
|
||||
:return: None on success
|
||||
:raises: GlusterError if the command fails to run
|
||||
"""
|
||||
try:
|
||||
|
@ -1072,20 +1075,25 @@ def volume_set_bitrot_option(vol_name: str, setting: BitrotOption) -> None:
|
|||
Set a bitrot option on the vol_name
|
||||
vol_name: String. The vol_name to operate on.
|
||||
setting: BitrotOption. The option to set on the bitrot daemon
|
||||
:return: 0 on success
|
||||
:return: None on success
|
||||
:raises: GlusterError if the command fails to run
|
||||
"""
|
||||
try:
|
||||
if setting == BitrotOption.ScrubThrottle:
|
||||
bitrot.scrub_throttle(volname=vol_name, throttle_type=str(setting))
|
||||
elif setting == BitrotOption.ScrubFrequency:
|
||||
bitrot.scrub_frequency(volname=vol_name, freq=str(setting))
|
||||
elif setting == BitrotOption.Scrub.Pause:
|
||||
bitrot.scrub_pause(volname=vol_name)
|
||||
elif setting == BitrotOption.Scrub.Resume:
|
||||
bitrot.scrub_resume(volname=vol_name)
|
||||
elif setting == BitrotOption.Scrub.Status:
|
||||
bitrot.scrub_status(volname=vol_name)
|
||||
log("Setting bitrot option {}={} on volume: {}".format(setting,
|
||||
setting.value,
|
||||
vol_name))
|
||||
if type(setting) is BitrotOption.ScrubThrottle:
|
||||
bitrot.scrub_throttle(volname=vol_name,
|
||||
throttle_type=str(setting.value))
|
||||
elif type(setting) is BitrotOption.ScrubFrequency:
|
||||
bitrot.scrub_frequency(volname=vol_name, freq=str(setting.value))
|
||||
elif type(setting) is BitrotOption.Scrub:
|
||||
if setting == BitrotOption.Scrub.Pause:
|
||||
bitrot.scrub_pause(volname=vol_name)
|
||||
elif setting == BitrotOption.Scrub.Resume:
|
||||
bitrot.scrub_resume(volname=vol_name)
|
||||
elif setting == BitrotOption.Scrub.Status:
|
||||
bitrot.scrub_status(volname=vol_name)
|
||||
except GlusterCmdException:
|
||||
raise
|
||||
|
||||
|
@ -1111,10 +1119,11 @@ def volume_quotas_enabled(vol_name: str) -> Result:
|
|||
vol_info = volume_info(vol_name)
|
||||
for vol in vol_info:
|
||||
if vol.name == vol_name:
|
||||
quota = vol.options["features.quota"]
|
||||
if quota is None or quota == "false":
|
||||
if "features.quota" not in vol.options:
|
||||
return Ok(False)
|
||||
elif quota == "on":
|
||||
elif vol.options['features.quota'] == "off":
|
||||
return Ok(False)
|
||||
elif vol.options['features.quota'] == "on":
|
||||
return Ok(True)
|
||||
else:
|
||||
# No idea what this is
|
||||
|
@ -1320,7 +1329,7 @@ def vol_set(vol_name: str, options: Dict[str, str]) -> None:
|
|||
try:
|
||||
volume.optset(volname=vol_name, opts=options)
|
||||
except GlusterCmdException as e:
|
||||
log("volume.optsetfailed: {}".format(e), ERROR)
|
||||
log("volume.optset failed: {}".format(e), ERROR)
|
||||
raise
|
||||
|
||||
|
||||
|
@ -1337,6 +1346,7 @@ def volume_set_options(volume: str, settings: List[GlusterOption]) -> Result:
|
|||
for setting in settings:
|
||||
options[setting.option] = str(setting.value)
|
||||
try:
|
||||
log("vol_set: {} with options: {}".format(volume, options))
|
||||
vol_set(volume, options)
|
||||
except GlusterCmdException as e:
|
||||
error_list.append(e)
|
|
@ -0,0 +1 @@
|
|||
__author__ = 'Chris Holcombe <chris.holcombe@canonical.com>'
|
|
@ -16,16 +16,12 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append('lib')
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core.hookenv import action_get, action_fail, action_set
|
||||
from lib.gluster.volume import (quota_list,
|
||||
BitrotOption, ScrubAggression, ScrubSchedule,
|
||||
ScrubControl, GlusterOption,
|
||||
volume_add_quota,
|
||||
volume_disable_bitrot, volume_enable_bitrot,
|
||||
volume_enable_quotas, volume_quotas_enabled,
|
||||
volume_rebalance, volume_remove_quota,
|
||||
volume_set_bitrot_option, volume_set_options)
|
||||
from charmhelpers.core.hookenv import action_get, action_fail, action_set, log
|
||||
from gluster.cli import GlusterCmdException
|
||||
from charm.gluster import volume
|
||||
|
||||
|
||||
def rebalance_volume():
|
||||
|
@ -35,10 +31,10 @@ def rebalance_volume():
|
|||
vol = action_get("volume")
|
||||
if not vol:
|
||||
action_fail("volume not specified")
|
||||
output = volume_rebalance(vol)
|
||||
if output.is_err():
|
||||
action_fail(
|
||||
"volume rebalance failed with error: {}".format(output.value))
|
||||
try:
|
||||
volume.volume_rebalance(vol)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("volume rebalance failed with error: {}".format(e))
|
||||
|
||||
|
||||
def enable_bitrot_scan():
|
||||
|
@ -48,9 +44,10 @@ def enable_bitrot_scan():
|
|||
vol = action_get("volume")
|
||||
if not vol:
|
||||
action_fail("volume not specified")
|
||||
output = volume_enable_bitrot(vol)
|
||||
if output.is_err():
|
||||
action_fail("enable bitrot failed with error: {}".format(output.value))
|
||||
try:
|
||||
volume.volume_enable_bitrot(vol)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("enable bitrot failed with error: {}".format(e))
|
||||
|
||||
|
||||
def disable_bitrot_scan():
|
||||
|
@ -60,10 +57,10 @@ def disable_bitrot_scan():
|
|||
vol = action_get("volume")
|
||||
if not vol:
|
||||
action_fail("volume not specified")
|
||||
output = volume_disable_bitrot(vol)
|
||||
if output.is_err():
|
||||
action_fail("enable disable failed with error: {}".format(
|
||||
output.value))
|
||||
try:
|
||||
volume.volume_disable_bitrot(vol)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("enable disable failed with error: {}".format(e))
|
||||
|
||||
|
||||
def pause_bitrot_scan():
|
||||
|
@ -71,11 +68,11 @@ def pause_bitrot_scan():
|
|||
Pause bitrot scan
|
||||
"""
|
||||
vol = action_get("volume")
|
||||
option = BitrotOption.Scrub(ScrubControl.Pause)
|
||||
output = volume_set_bitrot_option(vol, option)
|
||||
if output.is_err():
|
||||
action_fail(
|
||||
"pause bitrot scan failed with error: {}".format(output.value))
|
||||
option = volume.BitrotOption.Scrub(volume.ScrubControl.Pause)
|
||||
try:
|
||||
volume.volume_set_bitrot_option(vol, option)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("pause bitrot scan failed with error: {}".format(e))
|
||||
|
||||
|
||||
def resume_bitrot_scan():
|
||||
|
@ -83,11 +80,11 @@ def resume_bitrot_scan():
|
|||
Resume bitrot scan
|
||||
"""
|
||||
vol = action_get("volume")
|
||||
option = BitrotOption.Scrub(ScrubControl.Resume)
|
||||
output = volume_set_bitrot_option(vol, option)
|
||||
if output.is_err():
|
||||
action_fail(
|
||||
"resume bitrot scan failed with error: {}".format(option.value))
|
||||
option = volume.BitrotOption.Scrub(volume.ScrubControl.Resume)
|
||||
try:
|
||||
volume.volume_set_bitrot_option(vol, option)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("resume bitrot scan failed with error: {}".format(e))
|
||||
|
||||
|
||||
def set_bitrot_scan_frequency():
|
||||
|
@ -96,11 +93,14 @@ def set_bitrot_scan_frequency():
|
|||
"""
|
||||
vol = action_get("volume")
|
||||
frequency = action_get("frequency")
|
||||
option = ScrubSchedule.from_str(frequency)
|
||||
output = volume_set_bitrot_option(vol, BitrotOption.ScrubFrequency(option))
|
||||
if output.is_err():
|
||||
option = volume.ScrubSchedule.from_str(frequency)
|
||||
try:
|
||||
volume.volume_set_bitrot_option(vol,
|
||||
volume.BitrotOption.ScrubFrequency(
|
||||
option))
|
||||
except GlusterCmdException as e:
|
||||
action_fail("set bitrot scan frequency failed with error: {}".format(
|
||||
output.value))
|
||||
e))
|
||||
|
||||
|
||||
def set_bitrot_throttle():
|
||||
|
@ -109,11 +109,13 @@ def set_bitrot_throttle():
|
|||
"""
|
||||
vol = action_get("volume")
|
||||
throttle = action_get("throttle")
|
||||
option = ScrubAggression.from_str(throttle)
|
||||
output = volume_set_bitrot_option(vol, BitrotOption.ScrubThrottle(option))
|
||||
if output.is_err():
|
||||
option = volume.ScrubAggression.from_str(throttle)
|
||||
try:
|
||||
volume.volume_set_bitrot_option(vol, volume.BitrotOption.ScrubThrottle(
|
||||
option))
|
||||
except GlusterCmdException as e:
|
||||
action_fail(
|
||||
"set bitrot throttle failed with error: {}".format(output.value))
|
||||
"set bitrot throttle failed with error: {}".format(e))
|
||||
|
||||
|
||||
def enable_volume_quota():
|
||||
|
@ -121,52 +123,52 @@ def enable_volume_quota():
|
|||
Enable quotas on the volume
|
||||
"""
|
||||
# Gather our action parameters
|
||||
volume = action_get("volume")
|
||||
vol = action_get("volume")
|
||||
usage_limit = action_get("usage-limit")
|
||||
parsed_usage_limit = int(usage_limit)
|
||||
path = action_get("path")
|
||||
# Turn quotas on if not already enabled
|
||||
quotas_enabled = volume_quotas_enabled(volume)
|
||||
quotas_enabled = volume.volume_quotas_enabled(vol)
|
||||
if quotas_enabled.is_err():
|
||||
action_fail("Enable quota failed: {}".format(quotas_enabled.value))
|
||||
if not quotas_enabled.value:
|
||||
output = volume_enable_quotas(volume)
|
||||
if output.is_err():
|
||||
action_fail("Enable quotas failed: {}".format(output.value))
|
||||
try:
|
||||
volume.volume_enable_quotas(vol)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("Enable quotas failed: {}".format(e))
|
||||
|
||||
output = volume_add_quota(volume, path, parsed_usage_limit)
|
||||
if output.is_err():
|
||||
action_fail("Add quota failed: {}".format(output.value))
|
||||
try:
|
||||
volume.volume_add_quota(vol, path, parsed_usage_limit)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("Add quota failed: {}".format(e))
|
||||
|
||||
|
||||
def disable_volume_quota():
|
||||
"""
|
||||
Disable quotas on the volume
|
||||
"""
|
||||
volume = action_get("volume")
|
||||
vol = action_get("volume")
|
||||
path = action_get("path")
|
||||
quotas_enabled = volume_quotas_enabled(volume)
|
||||
quotas_enabled = volume.volume_quotas_enabled(vol)
|
||||
if quotas_enabled.is_err():
|
||||
action_fail("Disable quota failed: {}".format(quotas_enabled.value))
|
||||
if quotas_enabled.value:
|
||||
output = volume_remove_quota(volume, path)
|
||||
if output.is_err():
|
||||
# Notify the user of the failure and then return the error
|
||||
# up the stack
|
||||
action_fail(
|
||||
"remove quota failed with error: {}".format(output.value))
|
||||
try:
|
||||
volume.volume_remove_quota(vol, path)
|
||||
except GlusterCmdException as e:
|
||||
action_fail("remove quota failed with error: {}".format(e))
|
||||
|
||||
|
||||
def list_volume_quotas():
|
||||
"""
|
||||
List quotas on the volume
|
||||
"""
|
||||
volume = action_get("volume")
|
||||
quotas_enabled = volume_quotas_enabled(volume)
|
||||
vol = action_get("volume")
|
||||
quotas_enabled = volume.volume_quotas_enabled(vol)
|
||||
if quotas_enabled.is_err():
|
||||
action_fail("List quota failed: {}".format(quotas_enabled.value))
|
||||
if quotas_enabled.value:
|
||||
quotas = quota_list(volume)
|
||||
quotas = volume.quota_list(vol)
|
||||
if quotas.is_err():
|
||||
action_fail(
|
||||
"Failed to get volume quotas: {}".format(quotas.value))
|
||||
|
@ -184,19 +186,19 @@ def set_volume_options():
|
|||
"""
|
||||
Set one or more options on the volume at once
|
||||
"""
|
||||
volume = action_get("volume")
|
||||
vol = action_get("volume")
|
||||
|
||||
# Gather all of the action parameters up at once. We don't know what
|
||||
# the user wants to change.
|
||||
options = action_get()
|
||||
settings = []
|
||||
for (key, value) in options:
|
||||
for key in options:
|
||||
if key != "volume":
|
||||
settings.append(GlusterOption(key, value))
|
||||
else:
|
||||
volume = value
|
||||
log("settings.append {} {}".format(key, options[key]))
|
||||
settings.append(
|
||||
volume.GlusterOption.from_str(key, options[key]))
|
||||
|
||||
volume_set_options(volume, settings)
|
||||
volume.volume_set_options(vol, settings)
|
||||
|
||||
|
||||
# Actions to function mapping, to allow for illegal python action names that
|
||||
|
@ -224,7 +226,7 @@ def main(args):
|
|||
return "Action %s undefined" % action_name
|
||||
else:
|
||||
try:
|
||||
action(args)
|
||||
action()
|
||||
except Exception as e:
|
||||
hookenv.action_fail(str(e))
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
from charmhelpers.core.hookenv import ERROR, log, relation_set, unit_public_ip
|
||||
|
||||
from lib.gluster.volume import volume_list
|
||||
from charm.gluster.volume import volume_list
|
||||
|
||||
|
||||
def fuse_relation_joined():
|
||||
|
|
|
@ -16,6 +16,23 @@ import os
|
|||
import subprocess
|
||||
from typing import Optional, Dict
|
||||
|
||||
from charm.gluster.lib import (check_for_new_devices, run_command, Status,
|
||||
get_brick_list, wait_for_peers)
|
||||
# from .ctdb import VirtualIp
|
||||
# from .nfs_relation_joined import nfs_relation_joined
|
||||
from charm.gluster.peer import peer_probe, Peer
|
||||
from charm.gluster.volume import (Transport, volume_create_arbiter,
|
||||
get_local_bricks, Volume,
|
||||
GlusterOption, SplitBrainPolicy, Toggle,
|
||||
volume_create_distributed,
|
||||
volume_create_striped,
|
||||
volume_create_replicated,
|
||||
volume_create_striped_replicated,
|
||||
volume_add_brick, volume_create_erasure,
|
||||
VolumeType,
|
||||
volume_enable_bitrot, volume_list,
|
||||
volume_set_options,
|
||||
volume_remove_brick, volume_info)
|
||||
from charmhelpers.contrib.storage.linux.ceph import filesystem_mounted
|
||||
from charmhelpers.core import hookenv, sysctl
|
||||
from charmhelpers.core.hookenv import (application_version_set, relation_id)
|
||||
|
@ -27,23 +44,6 @@ from charms.reactive import when, when_not, set_state, remove_state
|
|||
from gluster.cli import GlusterCmdException
|
||||
from gluster.cli.parsers import GlusterCmdOutputParseError
|
||||
from gluster.cli.volume import start
|
||||
from lib.gluster.lib import (check_for_new_devices, run_command, Status,
|
||||
get_brick_list, wait_for_peers)
|
||||
# from .ctdb import VirtualIp
|
||||
# from .nfs_relation_joined import nfs_relation_joined
|
||||
from lib.gluster.peer import peer_probe, Peer
|
||||
from lib.gluster.volume import (Transport, volume_create_arbiter,
|
||||
get_local_bricks, Volume,
|
||||
GlusterOption, SplitBrainPolicy, Toggle,
|
||||
volume_create_distributed,
|
||||
volume_create_striped,
|
||||
volume_create_replicated,
|
||||
volume_create_striped_replicated,
|
||||
volume_add_brick, volume_create_erasure,
|
||||
VolumeType,
|
||||
volume_enable_bitrot, volume_list,
|
||||
volume_set_options,
|
||||
volume_remove_brick, volume_info)
|
||||
from result import Err, Ok, Result
|
||||
|
||||
# from .brick_detached import brick_detached
|
||||
|
|
|
@ -20,14 +20,13 @@ from typing import Optional
|
|||
|
||||
import apt
|
||||
import apt_pkg
|
||||
from charm.gluster import peer, volume
|
||||
from charm.gluster.apt import get_candidate_package_version
|
||||
from charmhelpers.cli import hookenv
|
||||
from charmhelpers.core.hookenv import config, log, status_set, ERROR
|
||||
from charmhelpers.core.host import service_start, service_stop
|
||||
from charmhelpers.fetch import apt_install, add_source, apt_update
|
||||
from gluster.cli.parsers import GlusterCmdOutputParseError
|
||||
from lib.gluster.apt import get_candidate_package_version
|
||||
from lib.gluster.peer import Peer
|
||||
from lib.gluster.volume import volume_info
|
||||
from result import Err, Ok, Result
|
||||
|
||||
|
||||
|
@ -83,7 +82,7 @@ def roll_cluster(new_version: str) -> Result:
|
|||
|
||||
# volume_name always has a default
|
||||
try:
|
||||
volume_bricks = volume_info(volume_name)
|
||||
volume_bricks = volume.volume_info(volume_name)
|
||||
peer_list = volume_bricks.value.bricks.peers
|
||||
|
||||
log("peer_list: {}".format(peer_list))
|
||||
|
@ -211,10 +210,10 @@ def gluster_key_exists(key: str) -> bool:
|
|||
return os.path.exists(location)
|
||||
|
||||
|
||||
def wait_on_previous_node(previous_node: Peer, version: str) -> Result:
|
||||
def wait_on_previous_node(previous_node: peer.Peer, version: str) -> Result:
|
||||
"""
|
||||
Wait on a previous node to finish upgrading
|
||||
:param previous_node: Peer to wait on
|
||||
:param previous_node: peer.Peer to wait on
|
||||
:param version: str. Version we're upgrading to
|
||||
:return: Result with Ok or Err
|
||||
"""
|
||||
|
|
|
@ -15,3 +15,4 @@
|
|||
import sys
|
||||
|
||||
sys.path.append('src')
|
||||
sys.path.append('src/lib')
|
||||
|
|
|
@ -16,10 +16,9 @@ import sys
|
|||
import unittest
|
||||
|
||||
import mock
|
||||
from result import Ok
|
||||
|
||||
from lib.gluster.volume import Quota
|
||||
from lib.charm.gluster.volume import Quota
|
||||
from reactive import actions
|
||||
from result import Ok
|
||||
|
||||
mock_apt = mock.MagicMock()
|
||||
sys.modules['apt'] = mock_apt
|
||||
|
@ -27,13 +26,12 @@ mock_apt.apt_pkg = mock.MagicMock()
|
|||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
@mock.patch('reactive.actions.quota_list')
|
||||
@mock.patch('reactive.actions.volume_quotas_enabled')
|
||||
@mock.patch('reactive.actions.volume')
|
||||
@mock.patch('reactive.actions.action_get')
|
||||
@mock.patch('reactive.actions.action_set')
|
||||
def testListVolQuotas(self, _action_set, _action_get,
|
||||
_volume_quotas_enabled, _quota_list):
|
||||
_quota_list.return_value = Ok(
|
||||
_volume):
|
||||
_volume.quota_list.return_value = Ok(
|
||||
[Quota(path="/test1",
|
||||
used=10,
|
||||
avail=90,
|
||||
|
@ -42,7 +40,7 @@ class Test(unittest.TestCase):
|
|||
hard_limit_exceeded=False,
|
||||
soft_limit_exceeded=False,
|
||||
soft_limit_percentage="80%")])
|
||||
_volume_quotas_enabled.return_value = Ok(True)
|
||||
_volume.volume_quotas_enabled.return_value = Ok(True)
|
||||
_action_get.return_value = "test"
|
||||
actions.list_volume_quotas()
|
||||
_action_set.assert_called_with(
|
||||
|
|
|
@ -17,17 +17,17 @@ import unittest
|
|||
import mock
|
||||
from result import Ok
|
||||
|
||||
from lib.gluster import block
|
||||
from lib.charm.gluster import block
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
def testGetDeviceInfo(self):
|
||||
pass
|
||||
|
||||
@mock.patch('lib.gluster.block.scan_devices')
|
||||
@mock.patch('lib.gluster.block.storage_get')
|
||||
@mock.patch('lib.gluster.block.storage_list')
|
||||
@mock.patch('lib.gluster.block.log')
|
||||
@mock.patch('lib.charm.gluster.block.scan_devices')
|
||||
@mock.patch('lib.charm.gluster.block.storage_get')
|
||||
@mock.patch('lib.charm.gluster.block.storage_list')
|
||||
@mock.patch('lib.charm.gluster.block.log')
|
||||
def testGetJujuBricks(self, _log, _storage_list, _storage_get,
|
||||
_scan_devices):
|
||||
_storage_list.return_value = ['data/0', 'data/1', 'data/2']
|
||||
|
@ -38,9 +38,9 @@ class Test(unittest.TestCase):
|
|||
self.assertTrue(bricks.is_ok())
|
||||
self.assertListEqual(["/dev/0", "/dev/1", "/dev/2"], bricks.value)
|
||||
|
||||
@mock.patch('lib.gluster.block.scan_devices')
|
||||
@mock.patch('lib.gluster.block.config')
|
||||
@mock.patch('lib.gluster.block.log')
|
||||
@mock.patch('lib.charm.gluster.block.scan_devices')
|
||||
@mock.patch('lib.charm.gluster.block.config')
|
||||
@mock.patch('lib.charm.gluster.block.log')
|
||||
def testGetManualBricks(self, _log, _config, _scan_devices):
|
||||
_config.return_value = "/dev/sda /dev/sdb /dev/sdc"
|
||||
_scan_devices.return_value = Ok(["/dev/sda", "/dev/sdb", "/dev/sdc"])
|
||||
|
@ -52,9 +52,9 @@ class Test(unittest.TestCase):
|
|||
def testSetElevator(self):
|
||||
pass
|
||||
|
||||
@mock.patch('lib.gluster.block.is_block_device')
|
||||
@mock.patch('lib.gluster.block.device_initialized')
|
||||
@mock.patch('lib.gluster.block.log')
|
||||
@mock.patch('lib.charm.gluster.block.is_block_device')
|
||||
@mock.patch('lib.charm.gluster.block.device_initialized')
|
||||
@mock.patch('lib.charm.gluster.block.log')
|
||||
def testScanDevices(self, _log, _is_block_device, _device_initialized):
|
||||
expected = [
|
||||
block.BrickDevice(is_block_device=True, initialized=True,
|
||||
|
@ -70,7 +70,7 @@ class Test(unittest.TestCase):
|
|||
self.assertTrue(result.is_ok())
|
||||
self.assertListEqual(expected, result.value)
|
||||
|
||||
# @mock.patch('lib.gluster.block.log')
|
||||
# @mock.patch('lib.charm.gluster.block.log')
|
||||
# def testWeeklyDefrag(self, _log):
|
||||
# block.weekly_defrag(mount="/mnt/sda",
|
||||
# fs_type=block.FilesystemType.Xfs,
|
||||
|
|
|
@ -15,16 +15,11 @@
|
|||
import os
|
||||
import unittest
|
||||
|
||||
from lib.gluster.fstab import FsEntry, FsTab
|
||||
from lib.charm.gluster.fstab import FsEntry, FsTab
|
||||
from mock import patch
|
||||
from result import Ok
|
||||
|
||||
|
||||
# mock_apt = mock.MagicMock()
|
||||
# sys.modules['apt'] = mock_apt
|
||||
# mock_apt.apt_pkg = mock.MagicMock()
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
@patch.object(FsTab, 'save_fstab')
|
||||
def testAddEntry(self, _save_fstab):
|
||||
|
|
|
@ -16,12 +16,7 @@ import unittest
|
|||
from unittest.mock import MagicMock
|
||||
|
||||
import mock
|
||||
from lib.gluster import heal
|
||||
|
||||
|
||||
# mock_apt = MagicMock()
|
||||
# sys.modules['apt'] = mock_apt
|
||||
# mock_apt.apt_pkg = MagicMock()
|
||||
from lib.charm.gluster import heal
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
|
|
@ -16,13 +16,13 @@ import unittest
|
|||
import uuid
|
||||
|
||||
import mock
|
||||
from lib.gluster import lib
|
||||
from lib.gluster.peer import Peer, State
|
||||
from lib.gluster.volume import Brick, Volume, VolumeType, Transport
|
||||
from lib.charm.gluster import lib
|
||||
from lib.charm.gluster.peer import Peer, State
|
||||
from lib.charm.gluster.volume import Brick, Volume, VolumeType, Transport
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
@mock.patch('lib.gluster.lib.log')
|
||||
@mock.patch('lib.charm.gluster.lib.log')
|
||||
def testPeersAreNotReady(self, _log):
|
||||
peer_list = [
|
||||
Peer(uuid=uuid.UUID('3da2c343-7c67-499d-a6bb-68591cc72bc1'),
|
||||
|
@ -37,7 +37,7 @@ class Test(unittest.TestCase):
|
|||
result = lib.peers_are_ready(peer_list)
|
||||
self.assertFalse(result)
|
||||
|
||||
@mock.patch('lib.gluster.lib.log')
|
||||
@mock.patch('lib.charm.gluster.lib.log')
|
||||
def testPeersAreReady(self, _log):
|
||||
peer_list = [
|
||||
Peer(uuid=uuid.UUID('3da2c343-7c67-499d-a6bb-68591cc72bc1'),
|
||||
|
|
|
@ -17,12 +17,12 @@ import uuid
|
|||
from ipaddress import ip_address
|
||||
|
||||
import mock
|
||||
from lib.gluster import peer
|
||||
from lib.gluster.peer import Peer, State
|
||||
from lib.charm.gluster import peer
|
||||
from lib.charm.gluster.peer import Peer, State
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
@mock.patch('lib.gluster.peer.peer_list')
|
||||
@mock.patch('lib.charm.gluster.peer.peer_list')
|
||||
def testGetPeer(self, _peer_list):
|
||||
existing_peers = [
|
||||
peer.Peer(
|
||||
|
@ -38,7 +38,7 @@ class Test(unittest.TestCase):
|
|||
result = peer.get_peer(hostname=ip_address('172.31.21.242'))
|
||||
self.assertIs(result, existing_peers[1])
|
||||
|
||||
@mock.patch('lib.gluster.peer.gpeer.pool')
|
||||
@mock.patch('lib.charm.gluster.peer.gpeer.pool')
|
||||
def testPeerList(self, _peer_pool):
|
||||
# Ignore parse_peer_list. We test that above
|
||||
peer.peer_list()
|
||||
|
@ -46,8 +46,8 @@ class Test(unittest.TestCase):
|
|||
# arg_list=["pool", "list", "--xml"],
|
||||
# script_mode=False)
|
||||
|
||||
@mock.patch('lib.gluster.peer.peer_list')
|
||||
@mock.patch('lib.gluster.peer.gpeer.probe')
|
||||
@mock.patch('lib.charm.gluster.peer.peer_list')
|
||||
@mock.patch('lib.charm.gluster.peer.gpeer.probe')
|
||||
def testPeerProbe(self, _peer_probe, _peer_list):
|
||||
_peer_list.return_value = [
|
||||
Peer(hostname="172.31.18.192",
|
||||
|
|
|
@ -17,7 +17,7 @@ import uuid
|
|||
from ipaddress import ip_address
|
||||
|
||||
import mock
|
||||
from lib.gluster import peer, volume
|
||||
from lib.charm.gluster import peer, volume
|
||||
|
||||
# mock_apt = mock.MagicMock()
|
||||
# sys.modules['apt'] = mock_apt
|
||||
|
@ -58,8 +58,8 @@ class Test(unittest.TestCase):
|
|||
def testOkToRemove(self):
|
||||
pass
|
||||
|
||||
@mock.patch("lib.gluster.volume.unit_get")
|
||||
@mock.patch("lib.gluster.volume.get_host_ip")
|
||||
@mock.patch("lib.charm.gluster.volume.unit_get")
|
||||
@mock.patch("lib.charm.gluster.volume.get_host_ip")
|
||||
def testGetLocalIp(self, _get_host_ip, _unit_get):
|
||||
_unit_get.return_value = "192.168.1.6"
|
||||
_get_host_ip.return_value = "192.168.1.6"
|
||||
|
@ -87,7 +87,7 @@ class Test(unittest.TestCase):
|
|||
def testVolumeAddBrick(self):
|
||||
pass
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateArbiter(self, _volume_create):
|
||||
volume.volume_create_arbiter(vol="test", replica_count=3,
|
||||
arbiter_count=1,
|
||||
|
@ -97,7 +97,7 @@ class Test(unittest.TestCase):
|
|||
volname='test', replica=3, arbiter=1, transport='tcp',
|
||||
volbricks=[str(b) for b in brick_list], force=False)
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateDistributed(self, _volume_create):
|
||||
volume.volume_create_distributed(vol="test",
|
||||
transport=volume.Transport.Tcp,
|
||||
|
@ -106,7 +106,7 @@ class Test(unittest.TestCase):
|
|||
volbricks=[str(b) for b in
|
||||
brick_list], force=False)
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateErasure(self, _volume_create):
|
||||
volume.volume_create_erasure(vol="test", disperse_count=1,
|
||||
redundancy_count=3,
|
||||
|
@ -116,7 +116,7 @@ class Test(unittest.TestCase):
|
|||
volname='test', disperse=1, redundancy=3, transport='tcp',
|
||||
volbricks=[str(b) for b in brick_list], force=False)
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateReplicated(self, _volume_create):
|
||||
volume.volume_create_replicated(vol="test", replica_count=3,
|
||||
transport=volume.Transport.Tcp,
|
||||
|
@ -125,7 +125,7 @@ class Test(unittest.TestCase):
|
|||
volname='test', replica=3, transport='tcp',
|
||||
volbricks=[str(b) for b in brick_list], force=False)
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateStriped(self, _volume_create):
|
||||
volume.volume_create_striped(vol="test", stripe_count=3,
|
||||
transport=volume.Transport.Tcp,
|
||||
|
@ -135,7 +135,7 @@ class Test(unittest.TestCase):
|
|||
volname='test', stripe=3, transport='tcp',
|
||||
volbricks=[str(b) for b in brick_list], force=False)
|
||||
|
||||
@mock.patch('lib.gluster.volume.volume.create')
|
||||
@mock.patch('lib.charm.gluster.volume.volume.create')
|
||||
def testVolumeCreateStripedReplicated(self, _volume_create):
|
||||
volume.volume_create_striped_replicated(vol="test", stripe_count=1,
|
||||
replica_count=3,
|
||||
|
|
Loading…
Reference in New Issue