Add storage volume
It adds the storage volume resource of Redfish standard schema and also provides the attribute ``max_size_bytes`` which gives largest logical disk size available in bytes among all disk volumes. Co-Authored-By: Nisha Agarwal <agarwalnisha1980@gmail.com> Partial-Bug: 1751143 Change-Id: I38724275967cb01a0e905ef2a10c5b99dde482f2
This commit is contained in:
parent
4e9e629fa1
commit
ecaa3c8062
|
@ -0,0 +1,57 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This is referred from Redfish standard schema.
|
||||
# http://redfish.dmtf.org/schemas/v1/Volume.v1_0_3.json
|
||||
|
||||
import logging
|
||||
|
||||
from sushy.resources import base
|
||||
from sushy import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Volume(base.ResourceBase):
|
||||
"""This class adds the Storage Volume resource"""
|
||||
|
||||
identity = base.Field('Id', required=True)
|
||||
"""The Volume identity string"""
|
||||
|
||||
name = base.Field('Name')
|
||||
"""The name of the resource"""
|
||||
|
||||
capacity_bytes = base.Field('CapacityBytes', adapter=utils.int_or_none)
|
||||
"""The size in bytes of this Volume."""
|
||||
|
||||
|
||||
class VolumeCollection(base.ResourceCollectionBase):
|
||||
"""This class represents the Storage Volume collection"""
|
||||
|
||||
_max_size_bytes = None
|
||||
|
||||
@property
|
||||
def _resource_type(self):
|
||||
return Volume
|
||||
|
||||
@property
|
||||
def max_size_bytes(self):
|
||||
"""Max size available in bytes among all Volumes of this collection."""
|
||||
if self._max_size_bytes is None:
|
||||
self._max_size_bytes = (
|
||||
utils.max_safe([vol.capacity_bytes
|
||||
for vol in self.get_members()]))
|
||||
return self._max_size_bytes
|
||||
|
||||
def _do_refresh(self, force=False):
|
||||
# invalidate the attribute
|
||||
self._max_size_bytes = None
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"@odata.type": "#Volume.v1_0_3.Volume",
|
||||
"Id": "1",
|
||||
"Name": "Virtual Disk 1",
|
||||
"Status": {
|
||||
"@odata.type": "#Resource.Status",
|
||||
"State": "Enabled",
|
||||
"Health": "OK"
|
||||
},
|
||||
"Encrypted": false,
|
||||
"VolumeType": "Mirrored",
|
||||
"CapacityBytes": 899527000000,
|
||||
"Identifiers": [
|
||||
{
|
||||
"@odata.type": "#Resource.v1_1_0.Identifier",
|
||||
"DurableNameFormat": "UUID",
|
||||
"DurableName": "38f1818b-111e-463a-aa19-fa54f792e468"
|
||||
}
|
||||
],
|
||||
"Links": {
|
||||
"@odata.type": "#Volume.v1_0_0.Links",
|
||||
"Drives": [
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3F5A8C54207B7233"
|
||||
},
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/35D38F11ACEF7BD3"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Actions": {
|
||||
"@odata.type": "#Volume.v1_0_0.Actions",
|
||||
"#Volume.Initialize": {
|
||||
"target": "/redfish/v1/Systems/3/Storage/RAIDIntegrated/Volumes/1/Actions/Volume.Initialize",
|
||||
"InitializeType@Redfish.AllowableValues": [
|
||||
"Fast",
|
||||
"Slow"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@odata.context": "/redfish/v1/$metadata#Volume.Volume",
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1",
|
||||
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"@odata.type": "#Volume.v1_0_3.Volume",
|
||||
"Id": "2",
|
||||
"Name": "Virtual Disk 2",
|
||||
"Status": {
|
||||
"@odata.type": "#Resource.Status",
|
||||
"State": "Enabled",
|
||||
"Health": "OK"
|
||||
},
|
||||
"Encrypted": false,
|
||||
"VolumeType": "NonRedundant",
|
||||
"CapacityBytes": 107374182400,
|
||||
"Identifiers": [
|
||||
{
|
||||
"@odata.type": "#Resource.v1_1_0.Identifier",
|
||||
"DurableNameFormat": "UUID",
|
||||
"DurableName": "0324c96c-8031-4f5e-886c-50cd90aca854"
|
||||
}
|
||||
],
|
||||
"Links": {
|
||||
"@odata.type": "#Volume.v1_0_0.Links",
|
||||
"Drives": [
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3D58ECBC375FD9F2"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Actions": {
|
||||
"@odata.type": "#Volume.v1_0_0.Actions",
|
||||
"#Volume.Initialize": {
|
||||
"target": "/redfish/v1/Systems/3/Storage/RAIDIntegrated/Volumes/1/Actions/Volume.Initialize",
|
||||
"InitializeType@Redfish.AllowableValues": [
|
||||
"Fast",
|
||||
"Slow"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@odata.context": "/redfish/v1/$metadata#Volume.Volume",
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/2",
|
||||
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"@odata.type": "#Volume.v1_0_3.Volume",
|
||||
"Id": "3",
|
||||
"Name": "Virtual Disk 3",
|
||||
"Status": {
|
||||
"@odata.type": "#Resource.Status",
|
||||
"State": "Enabled",
|
||||
"Health": "OK"
|
||||
},
|
||||
"Encrypted": false,
|
||||
"VolumeType": "NonRedundant",
|
||||
"CapacityBytes": 1073741824000,
|
||||
"Identifiers": [
|
||||
{
|
||||
"@odata.type": "#Resource.v1_1_0.Identifier",
|
||||
"DurableNameFormat": "UUID",
|
||||
"DurableName": "eb179a30-6f87-4fdb-8f92-639eb7aaabcb"
|
||||
}
|
||||
],
|
||||
"Links": {
|
||||
"@odata.type": "#Volume.v1_0_0.Links",
|
||||
"Drives": [
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3D58ECBC375FD9F2"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Actions": {
|
||||
"@odata.type": "#Volume.v1_0_0.Actions",
|
||||
"#Volume.Initialize": {
|
||||
"target": "/redfish/v1/Systems/3/Storage/RAIDIntegrated/Volumes/1/Actions/Volume.Initialize",
|
||||
"InitializeType@Redfish.AllowableValues": [
|
||||
"Fast",
|
||||
"Slow"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@odata.context": "/redfish/v1/$metadata#Volume.Volume",
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/3",
|
||||
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"@odata.type": "#VolumeCollection.VolumeCollection",
|
||||
"Name": "Storage Volume Collection",
|
||||
"Description": "Storage Volume Collection",
|
||||
"Members@odata.count": 3,
|
||||
"Members": [
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1"
|
||||
},
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/2"
|
||||
},
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/3"
|
||||
}
|
||||
],
|
||||
"Oem": {},
|
||||
"@odata.context": "/redfish/v1/$metadata#VolumeCollection.VolumeCollection",
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes",
|
||||
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
import mock
|
||||
|
||||
from sushy.resources.system.storage import volume
|
||||
from sushy.tests.unit import base
|
||||
|
||||
|
||||
class VolumeTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VolumeTestCase, self).setUp()
|
||||
self.conn = mock.Mock()
|
||||
volume_file = 'sushy/tests/unit/json_samples/volume.json'
|
||||
with open(volume_file, 'r') as f:
|
||||
self.conn.get.return_value.json.return_value = json.loads(f.read())
|
||||
|
||||
self.stor_volume = volume.Volume(
|
||||
self.conn, '/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1',
|
||||
redfish_version='1.0.2')
|
||||
|
||||
def test__parse_attributes(self):
|
||||
self.stor_volume._parse_attributes()
|
||||
self.assertEqual('1.0.2', self.stor_volume.redfish_version)
|
||||
self.assertEqual('1', self.stor_volume.identity)
|
||||
self.assertEqual('Virtual Disk 1', self.stor_volume.name)
|
||||
self.assertEqual(899527000000, self.stor_volume.capacity_bytes)
|
||||
|
||||
|
||||
class VolumeCollectionTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VolumeCollectionTestCase, self).setUp()
|
||||
self.conn = mock.Mock()
|
||||
with open('sushy/tests/unit/json_samples/'
|
||||
'volume_collection.json', 'r') as f:
|
||||
self.conn.get.return_value.json.return_value = json.loads(f.read())
|
||||
self.stor_vol_col = volume.VolumeCollection(
|
||||
self.conn, '/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes',
|
||||
redfish_version='1.0.2')
|
||||
|
||||
def test__parse_attributes(self):
|
||||
self.stor_vol_col._parse_attributes()
|
||||
self.assertEqual((
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1',
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/2',
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/3'),
|
||||
self.stor_vol_col.members_identities)
|
||||
|
||||
@mock.patch.object(volume, 'Volume', autospec=True)
|
||||
def test_get_member(self, Volume_mock):
|
||||
self.stor_vol_col.get_member(
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1')
|
||||
Volume_mock.assert_called_once_with(
|
||||
self.stor_vol_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1',
|
||||
redfish_version=self.stor_vol_col.redfish_version)
|
||||
|
||||
@mock.patch.object(volume, 'Volume', autospec=True)
|
||||
def test_get_members(self, Volume_mock):
|
||||
members = self.stor_vol_col.get_members()
|
||||
calls = [
|
||||
mock.call(self.stor_vol_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/1',
|
||||
redfish_version=self.stor_vol_col.redfish_version),
|
||||
mock.call(self.stor_vol_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/2',
|
||||
redfish_version=self.stor_vol_col.redfish_version),
|
||||
mock.call(self.stor_vol_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/3',
|
||||
redfish_version=self.stor_vol_col.redfish_version),
|
||||
]
|
||||
Volume_mock.assert_has_calls(calls)
|
||||
self.assertIsInstance(members, list)
|
||||
self.assertEqual(3, len(members))
|
||||
|
||||
def test_max_size_bytes(self):
|
||||
self.assertIsNone(self.stor_vol_col._max_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/volume.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
with open('sushy/tests/unit/json_samples/volume2.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
with open('sushy/tests/unit/json_samples/volume3.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(1073741824000, self.stor_vol_col.max_size_bytes)
|
||||
|
||||
# for any subsequent fetching it gets it from the cached value
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
self.assertEqual(1073741824000, self.stor_vol_col.max_size_bytes)
|
||||
self.conn.get.return_value.json.assert_not_called()
|
||||
|
||||
def test_max_size_bytes_after_refresh(self):
|
||||
self.stor_vol_col.refresh()
|
||||
self.assertIsNone(self.stor_vol_col._max_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/volume.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
with open('sushy/tests/unit/json_samples/volume2.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
with open('sushy/tests/unit/json_samples/volume3.json', 'r') as f:
|
||||
successive_return_values.append(json.loads(f.read()))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(1073741824000, self.stor_vol_col.max_size_bytes)
|
|
@ -90,3 +90,9 @@ class UtilsTestCase(base.TestCase):
|
|||
'"subresource_name" cannot be empty',
|
||||
utils.get_sub_resource_path_by,
|
||||
self.sys_inst, '')
|
||||
|
||||
def test_max_safe(self):
|
||||
self.assertEqual(10, utils.max_safe([1, 3, 2, 8, 5, 10, 6]))
|
||||
self.assertEqual(821, utils.max_safe([15, 300, 270, None, 821, None]))
|
||||
self.assertEqual(0, utils.max_safe([]))
|
||||
self.assertIsNone(utils.max_safe([], default=None))
|
||||
|
|
|
@ -89,3 +89,20 @@ def get_sub_resource_path_by(resource, subresource_name):
|
|||
resource=resource.path)
|
||||
|
||||
return body['@odata.id']
|
||||
|
||||
|
||||
def max_safe(iterable, default=0):
|
||||
"""Helper wrapper over builtin max() function.
|
||||
|
||||
This function is just a wrapper over builtin max() w/o ``key`` argument.
|
||||
The ``default`` argument specifies an object to return if the provided
|
||||
``iterable`` is empty. Also it filters out the None type values.
|
||||
:param iterable: an iterable
|
||||
:param default: 0 by default
|
||||
"""
|
||||
|
||||
try:
|
||||
return max([x for x in iterable if x is not None])
|
||||
except ValueError:
|
||||
# TypeError is not caught here as that should be thrown.
|
||||
return default
|
||||
|
|
Loading…
Reference in New Issue