Fix PyScripts processing

The PyScript process in CloudKitty has been broken for a very long
time. This patch introduces changes required to make it work again.

Change-Id: I143ee6aa4352903921d2ab7b8d8468aedbdd6911
This commit is contained in:
Rafael Weingärtner 2022-11-15 15:27:18 -03:00
parent 0c1eabc364
commit ee99f7ef0d
6 changed files with 331 additions and 124 deletions

View File

@ -37,8 +37,8 @@ DATAPOINT_SCHEMA = voluptuous.Schema({
voluptuous.Required('price', default=0):
voluptuous.Coerce(str),
},
voluptuous.Required('groupby'): vutils.DictTypeValidator(str, str),
voluptuous.Required('metadata'): vutils.DictTypeValidator(str, str),
voluptuous.Required('groupby'): voluptuous.Coerce(dict),
voluptuous.Required('metadata'): voluptuous.Coerce(dict),
})

View File

@ -13,10 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
#
from cloudkitty import dataframe
from cloudkitty import rating
from cloudkitty.rating.pyscripts.controllers import root as root_api
from cloudkitty.rating.pyscripts.db import api as pyscripts_db_api
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class PyScripts(rating.RatingProcessorBase):
"""PyScripts rating module.
@ -33,20 +38,17 @@ class PyScripts(rating.RatingProcessorBase):
db_api = pyscripts_db_api.get_instance()
def __init__(self, tenant_id=None):
# current scripts loaded to memory
self._scripts = {}
self.load_scripts_in_memory()
super(PyScripts, self).__init__(tenant_id)
def load_scripts_in_memory(self):
db = pyscripts_db_api.get_instance()
scripts_uuid_list = db.list_scripts()
# Purge old entries
scripts_to_purge = []
for script_uuid in self._scripts.keys():
if script_uuid not in scripts_uuid_list:
scripts_to_purge.append(script_uuid)
for script_uuid in scripts_to_purge:
del self._scripts[script_uuid]
self.purge_removed_scripts(scripts_uuid_list)
# Load or update script
for script_uuid in scripts_uuid_list:
script_db = db.get_script(uuid=script_uuid)
@ -67,11 +69,31 @@ class PyScripts(rating.RatingProcessorBase):
'code': code,
'checksum': checksum})
def purge_removed_scripts(self, scripts_uuid_list):
scripts_to_purge = self.get_all_script_to_remove(scripts_uuid_list)
self.remove_purged_scripts(scripts_to_purge)
def get_all_script_to_remove(self, new_scripts_uuid_list):
scripts_to_purge = []
for script_uuid in self._scripts.keys():
if script_uuid not in new_scripts_uuid_list:
scripts_to_purge.append(script_uuid)
return scripts_to_purge
def remove_purged_scripts(self, scripts_to_purge):
for script_uuid in scripts_to_purge:
LOG.info("Removing script [%s] from the script list to execute.",
self._scripts[script_uuid])
del self._scripts[script_uuid]
def reload_config(self):
"""Reload the module's configuration.
"""
LOG.debug("Executing the reload of configurations.")
self.load_scripts_in_memory()
LOG.debug("Configurations reloaded.")
def start_script(self, code, data):
context = {'data': data}
@ -80,5 +102,14 @@ class PyScripts(rating.RatingProcessorBase):
def process(self, data):
for script in self._scripts.values():
data = self.start_script(script['code'], data)
data_dict = data.as_dict(mutable=True)
LOG.debug("Executing pyscript [%s] with data [%s].",
script, data_dict)
data_output = self.start_script(script['code'], data_dict)
LOG.debug("Result [%s] for processing with pyscript [%s] with "
"data [%s].", data_output, script, data_dict)
data = dataframe.DataFrame.from_dict(data_output)
return data

View File

@ -75,8 +75,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -84,8 +84,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "1.337"
$.dataframes[1].resources[0].service: "cpu"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[2].begin: "2015-01-04T13:00:00"
$.dataframes[2].end: "2015-01-04T14:00:00"
@ -93,8 +93,8 @@ tests:
$.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "0.121"
$.dataframes[2].resources[0].service: "image.size"
$.dataframes[2].resources[0].desc.dummy: 'True'
$.dataframes[2].resources[0].desc.fake_meta: '1.0'
$.dataframes[2].resources[0].desc.dummy: True
$.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[3].begin: "2015-01-04T13:00:00"
$.dataframes[3].end: "2015-01-04T14:00:00"
@ -102,8 +102,8 @@ tests:
$.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: 'True'
$.dataframes[3].resources[0].desc.fake_meta: '1.0'
$.dataframes[3].resources[0].desc.dummy: True
$.dataframes[3].resources[0].desc.fake_meta: 1.0
- name: fetch data for the second tenant
url: /v1/storage/dataframes
@ -121,8 +121,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -130,8 +130,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "1.337"
$.dataframes[1].resources[0].service: "cpu"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[2].begin: "2015-01-04T13:00:00"
$.dataframes[2].end: "2015-01-04T14:00:00"
@ -139,8 +139,8 @@ tests:
$.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "0.121"
$.dataframes[2].resources[0].service: "image.size"
$.dataframes[2].resources[0].desc.dummy: 'True'
$.dataframes[2].resources[0].desc.fake_meta: '1.0'
$.dataframes[2].resources[0].desc.dummy: True
$.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[3].begin: "2015-01-04T13:00:00"
$.dataframes[3].end: "2015-01-04T14:00:00"
@ -148,8 +148,8 @@ tests:
$.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: 'True'
$.dataframes[3].resources[0].desc.fake_meta: '1.0'
$.dataframes[3].resources[0].desc.dummy: True
$.dataframes[3].resources[0].desc.fake_meta: 1.0
- name: fetch data for multiple tenants
@ -167,8 +167,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -176,8 +176,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "1.337"
$.dataframes[1].resources[0].service: "cpu"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[2].begin: "2015-01-04T13:00:00"
$.dataframes[2].end: "2015-01-04T14:00:00"
@ -185,8 +185,8 @@ tests:
$.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "0.121"
$.dataframes[2].resources[0].service: "image.size"
$.dataframes[2].resources[0].desc.dummy: 'True'
$.dataframes[2].resources[0].desc.fake_meta: '1.0'
$.dataframes[2].resources[0].desc.dummy: True
$.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[3].begin: "2015-01-04T13:00:00"
$.dataframes[3].end: "2015-01-04T14:00:00"
@ -194,8 +194,8 @@ tests:
$.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: 'True'
$.dataframes[3].resources[0].desc.fake_meta: '1.0'
$.dataframes[3].resources[0].desc.dummy: True
$.dataframes[3].resources[0].desc.fake_meta: 1.0
$.dataframes[0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[0].begin: "2015-01-04T13:00:00"
$.dataframes[0].end: "2015-01-04T14:00:00"
@ -203,8 +203,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -212,8 +212,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "1.337"
$.dataframes[1].resources[0].service: "cpu"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[2].begin: "2015-01-04T13:00:00"
$.dataframes[2].end: "2015-01-04T14:00:00"
@ -221,8 +221,8 @@ tests:
$.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "0.121"
$.dataframes[2].resources[0].service: "image.size"
$.dataframes[2].resources[0].desc.dummy: 'True'
$.dataframes[2].resources[0].desc.fake_meta: '1.0'
$.dataframes[2].resources[0].desc.dummy: True
$.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.dataframes[3].begin: "2015-01-04T13:00:00"
$.dataframes[3].end: "2015-01-04T14:00:00"
@ -230,8 +230,8 @@ tests:
$.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: 'True'
$.dataframes[3].resources[0].desc.fake_meta: '1.0'
$.dataframes[3].resources[0].desc.dummy: True
$.dataframes[3].resources[0].desc.fake_meta: 1.0
- name: fetch data filtering on cpu service and tenant
url: /v1/storage/dataframes
@ -250,8 +250,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -259,8 +259,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "1.337"
$.dataframes[1].resources[0].service: "cpu"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
- name: fetch data filtering on image service and tenant
url: /v1/storage/dataframes
@ -279,8 +279,8 @@ tests:
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "0.121"
$.dataframes[0].resources[0].service: "image.size"
$.dataframes[0].resources[0].desc.dummy: 'True'
$.dataframes[0].resources[0].desc.fake_meta: '1.0'
$.dataframes[0].resources[0].desc.dummy: True
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.dataframes[1].begin: "2015-01-04T13:00:00"
$.dataframes[1].end: "2015-01-04T14:00:00"
@ -288,8 +288,8 @@ tests:
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: 'True'
$.dataframes[1].resources[0].desc.fake_meta: '1.0'
$.dataframes[1].resources[0].desc.dummy: True
$.dataframes[1].resources[0].desc.fake_meta: 1.0
- name: fetch data filtering on service with no data and tenant
url: /v1/storage/dataframes

View File

@ -21,64 +21,85 @@ import zlib
from oslo_utils import uuidutils
from cloudkitty import dataframe
from cloudkitty.rating import pyscripts
from cloudkitty.rating.pyscripts.db import api
from cloudkitty import tests
from dateutil import parser
FAKE_UUID = '6c1b8a30-797f-4b7e-ad66-9879b79059fb'
CK_RESOURCES_DATA = [{
CK_RESOURCES_DATA = {
"period": {
"begin": "2014-10-01T00:00:00",
"end": "2014-10-01T01:00:00"},
"usage": {
"instance_status": [
dataframe.DataPoint(
"instance", 1, 0,
{"availability_zone": "nova",
"flavor": "m1.ultra",
"image_id": "f5600101-8fa2-4864-899e-ebcb7ed6b568",
"memory": "64",
"name": "prod1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"
},
{"farm": "prod"}),
dataframe.DataPoint(
"instance", 1, 0,
{"availability_zone": "nova",
"flavor": "m1.not_so_ultra",
"image_id": "f5600101-8fa2-4864-899e-ebcb7ed6b568",
"memory": "64",
"name": "prod1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"
},
{"farm": "prod"})],
"compute": [
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "f5600101-8fa2-4864-899e-ebcb7ed6b568",
"memory": "64",
"metadata": {
"farm": "prod"},
"name": "prod1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"},
"vol": {
"qty": 1,
"unit": "instance"}
},
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.tiny",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "512",
"metadata": {
"farm": "dev"},
"name": "dev1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"},
"vol": {
"qty": 2,
"unit": "instance"}},
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "64",
"metadata": {
"farm": "dev"},
"name": "dev2",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"},
"vol": {
"qty": 1,
"unit": "instance"}}]}}]
dataframe.DataPoint(
"instance", 1, 0,
{"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "f5600101-8fa2-4864-899e-ebcb7ed6b568",
"memory": "64",
"name": "prod1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"
},
{"farm": "prod"}),
dataframe.DataPoint(
"instance", 2, 0,
{"availability_zone": "nova",
"flavor": "m1.tiny",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "512",
"name": "dev1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"
},
{"farm": "dev"}),
dataframe.DataPoint(
"instance", 1, 0,
{"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "64",
"name": "dev2",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"
},
{"farm": "dev"}),
]
}
}
TEST_CODE1 = 'a = 1'.encode('utf-8')
TEST_CODE1_CHECKSUM = hashlib.sha512(TEST_CODE1).hexdigest()
@ -90,14 +111,89 @@ TEST_CODE3_CHECKSUM = hashlib.sha512(TEST_CODE3).hexdigest()
COMPLEX_POLICY1 = """
import decimal
usage_data = data['usage']
for service in usage_data.keys():
if service == 'compute':
all_points = usage_data.get(service, [])
for resource in all_points:
if resource['groupby'].get('flavor') == 'm1.nano':
resource['rating'] = {
'price': decimal.Decimal(2.0)}
if service == 'instance_status':
all_points = usage_data.get(service, [])
for resource in all_points:
if resource['groupby'].get('flavor') == 'm1.ultra':
resource['rating'] = {
'price': decimal.Decimal(
resource['groupby'].get(
'memory')) * decimal.Decimal(1.5)}
""".encode('utf-8')
for period in data:
for service, resources in period['usage'].items():
if service == 'compute':
for resource in resources:
if resource['desc'].get('flavor') == 'm1.nano':
resource['rating'] = {
'price': decimal.Decimal(1.0)}
DOCUMENTATION_RATING_POLICY = """
import decimal
# Price for each flavor. These are equivalent to hashmap field mappings.
flavors = {
'm1.micro': decimal.Decimal(0.65),
'm1.nano': decimal.Decimal(0.35),
'm1.large': decimal.Decimal(2.67)
}
# Price per MB / GB for images and volumes. These are equivalent to
# hashmap service mappings.
image_mb_price = decimal.Decimal(0.002)
volume_gb_price = decimal.Decimal(0.35)
# These functions return the price of a service usage on a collect period.
# The price is always equivalent to the price per unit multiplied by
# the quantity.
def get_compute_price(item):
flavor_name = item['groupby']['flavor']
if not flavor_name in flavors:
return 0
else:
return (decimal.Decimal(item['vol']['qty']) * flavors[flavor_name])
def get_image_price(item):
if not item['vol']['qty']:
return 0
else:
return decimal.Decimal(item['vol']['qty']) * image_mb_price
def get_volume_price(item):
if not item['vol']['qty']:
return 0
else:
return decimal.Decimal(item['vol']['qty']) * volume_gb_price
# Mapping each service to its price calculation function
services = {
'compute': get_compute_price,
'volume': get_volume_price,
'image': get_image_price
}
def process(data):
# The 'data' is a dictionary with the usage entries for each service for
# each given period.
usage_data = data['usage']
for service_name, service_data in usage_data.items():
# Do not calculate the price if the service has no
# price calculation function
if service_name in services.keys():
# A service can have several items. For example,
# each running instance is an item of the compute service
for item in service_data:
item['rating'] = {'price': services[service_name](item)}
return data
# 'data' is passed as a global variable. The script is supposed to set the
# 'rating' element of each item in each service
data = process(data)
""".encode('utf-8')
@ -109,6 +205,11 @@ class PyScriptsRatingTest(tests.TestCase):
self._db_api.get_migration().upgrade('head')
self._pyscripts = pyscripts.PyScripts(self._tenant_id)
self.dataframe_for_tests = dataframe.DataFrame(
parser.parse(CK_RESOURCES_DATA['period']['begin']),
parser.parse(CK_RESOURCES_DATA['period']['end']),
CK_RESOURCES_DATA['usage'])
# Scripts tests
@mock.patch.object(uuidutils, 'generate_uuid',
return_value=FAKE_UUID)
@ -295,16 +396,87 @@ class PyScriptsRatingTest(tests.TestCase):
self._db_api.create_script('policy1', TEST_CODE1)
self._db_api.create_script('policy2', TEST_CODE3)
self._pyscripts.reload_config()
self.assertRaises(NameError, self._pyscripts.process, {})
self.assertEqual(2, len(self._pyscripts._scripts))
self.assertRaises(NameError, self._pyscripts.process,
self.dataframe_for_tests)
# Processing
def test_process_rating(self):
self._db_api.create_script('policy1', COMPLEX_POLICY1)
self._pyscripts.reload_config()
actual_data = copy.deepcopy(CK_RESOURCES_DATA)
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
compute_list = expected_data[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal('1')}
compute_list[2]['rating'] = {'price': decimal.Decimal('1')}
self._pyscripts.process(actual_data)
self.assertEqual(expected_data, actual_data)
data_output = self._pyscripts.process(self.dataframe_for_tests)
self.assertIsInstance(data_output, dataframe.DataFrame)
dict_output = data_output.as_dict()
for point in dict_output['usage']['compute']:
if point['groupby'].get('flavor') == 'm1.nano':
self.assertEqual(
decimal.Decimal('2'), point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('0'), point['rating']['price'])
for point in dict_output['usage']['instance_status']:
if point['groupby'].get('flavor') == 'm1.ultra':
self.assertEqual(
decimal.Decimal('96'), point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('0'), point['rating']['price'])
# Processing
def test_process_rating_with_documentation_rules(self):
self._db_api.create_script('policy1', DOCUMENTATION_RATING_POLICY)
self._pyscripts.reload_config()
dataframe_for_tests = copy.deepcopy(self.dataframe_for_tests)
dataframe_for_tests.add_point(
dataframe.DataPoint("GB", 5, 0, {"tag": "A"}, {}), "image")
dataframe_for_tests.add_point(
dataframe.DataPoint("GB", 15, 0, {"tag": "B"}, {}), "image")
dataframe_for_tests.add_point(
dataframe.DataPoint("GB", 500, 0, {"tag": "D"}, {}), "volume")
dataframe_for_tests.add_point(
dataframe.DataPoint("GB", 80, 0, {"tag": "E"}, {}), "volume")
data_output = self._pyscripts.process(dataframe_for_tests)
self.assertIsInstance(data_output, dataframe.DataFrame)
dict_output = data_output.as_dict()
for point in dict_output['usage']['compute']:
if point['groupby'].get('flavor') == 'm1.nano':
self.assertEqual(
decimal.Decimal('0.3499999999999999777955395075'),
point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('0'), point['rating']['price'])
for point in dict_output['usage']['instance_status']:
if point['groupby'].get('flavor') == 'm1.ultra':
self.assertEqual(
decimal.Decimal('0'), point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('0'), point['rating']['price'])
for point in dict_output['usage']['image']:
if point['groupby'].get('tag') == 'A':
self.assertEqual(
decimal.Decimal('0.01000000000000000020816681712'),
point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('0.03000000000000000062450045135'),
point['rating']['price'])
for point in dict_output['usage']['volume']:
if point['groupby'].get('tag') == 'D':
self.assertEqual(
decimal.Decimal('174.9999999999999888977697537'),
point['rating']['price'])
else:
self.assertEqual(
decimal.Decimal('27.99999999999999822364316060'),
point['rating']['price'])

View File

@ -74,18 +74,18 @@ Processing the data
.. code-block:: python
def process(data):
# The 'data' parameter is a list of dictionaries containing a
# "usage" and a "period" field
for d in data:
usage = d['usage']
for service_name, service_data in usage.items():
# Do not calculate the price if the service has no
# price calculation function
if service_name in services.keys():
# A service can have several items. For example,
# each running instance is an item of the compute service
for item in service_data:
item['rating'] = {'price': services[service_name](item)}
# The 'data' is a dictionary with the usage entries for each service
# in a given period.
usage_data = data['usage']
for service_name, service_data in usage_data.items():
# Do not calculate the price if the service has no
# price calculation function
if service_name in services.keys():
# A service can have several items. For example,
# each running instance is an item of the compute service
for item in service_data:
item['rating'] = {'price': services[service_name](item)}
return data

View File

@ -0,0 +1,4 @@
---
fixes:
- |
Fix failure to process rating using the PyScripts rating module.