V3 json schema validation: workers

This patch adds jsonschema validation for below workers API
* POST v3/{project_id}/workers/cleanup

Change-Id: I4eab4d0aa254d9ce7dcb9e15077779fa508cad41
Partial-Implements: bp json-schema-validation
This commit is contained in:
Neha Alhat 2017-11-23 17:16:19 +05:30
parent e6028249cc
commit 9ae5811625
4 changed files with 109 additions and 53 deletions

View File

@ -0,0 +1,36 @@
# Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Workers API.
"""
from cinder.api.validation import parameter_types
cleanup = {
'type': 'object',
'properties': {
'cluster_name': parameter_types.hostname,
'disabled': parameter_types.boolean,
'host': parameter_types.hostname,
'is_up': parameter_types.boolean,
'binary': {'enum': ['cinder-volume', 'cinder-scheduler']},
'resource_id': parameter_types.optional_uuid,
'resource_type': parameter_types.resource_type,
'service_id': parameter_types.service_id,
},
'additionalProperties': False,
}

View File

@ -13,13 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import workers
from cinder.api.v3.views import workers as workers_view
from cinder.common import constants
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
@ -31,29 +32,25 @@ from cinder import utils
class WorkerController(wsgi.Controller):
allowed_clean_keys = {'service_id', 'cluster_name', 'host', 'binary',
'is_up', 'disabled', 'resource_id', 'resource_type'}
def __init__(self, *args, **kwargs):
self.sch_api = sch_rpc.SchedulerAPI()
def _prepare_params(self, ctxt, params, allowed):
if not allowed.issuperset(params):
invalid_keys = set(params).difference(allowed)
msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
raise exception.InvalidInput(reason=msg)
if params.get('binary') not in (None, constants.VOLUME_BINARY,
'cinder-scheduler'):
msg = _('binary must be empty or set to cinder-volume or '
'cinder-scheduler')
raise exception.InvalidInput(reason=msg)
@wsgi.Controller.api_version(mv.WORKERS_CLEANUP)
@wsgi.response(202)
@validation.schema(workers.cleanup)
def cleanup(self, req, body=None):
"""Do the cleanup on resources from a specific service/host/node."""
# Let the wsgi middleware convert NotAuthorized exceptions
ctxt = req.environ['cinder.context']
ctxt.authorize(policy.CLEAN_POLICY)
body = body or {}
for boolean in ('disabled', 'is_up'):
if params.get(boolean) is not None:
params[boolean] = utils.get_bool_param(boolean, params)
if body.get(boolean) is not None:
body[boolean] = strutils.bool_from_string(body[boolean])
resource_type = params.get('resource_type')
resource_type = body.get('resource_type')
if resource_type:
resource_type = resource_type.title()
@ -65,23 +62,19 @@ class WorkerController(wsgi.Controller):
msg = msg % {"resource_type": resource_type,
"valid_types": valid_types}
raise exception.InvalidInput(reason=msg)
params['resource_type'] = resource_type
body['resource_type'] = resource_type
resource_id = params.get('resource_id')
resource_id = body.get('resource_id')
if resource_id:
if not uuidutils.is_uuid_like(resource_id):
msg = (_('Resource ID must be a UUID, and %s is not.') %
resource_id)
raise exception.InvalidInput(reason=msg)
# If we have the resource type but we don't have where it is
# located, we get it from the DB to limit the distribution of the
# request by the scheduler, otherwise it will be distributed to all
# the services.
location_keys = {'service_id', 'cluster_name', 'host'}
if not location_keys.intersection(params):
if not location_keys.intersection(body):
workers = db.worker_get_all(ctxt, resource_id=resource_id,
binary=params.get('binary'),
binary=body.get('binary'),
resource_type=resource_type)
if len(workers) == 0:
@ -95,26 +88,14 @@ class WorkerController(wsgi.Controller):
raise exception.InvalidInput(reason=msg)
worker = workers[0]
params.update(service_id=worker.service_id,
resource_type=worker.resource_type)
body.update(service_id=worker.service_id,
resource_type=worker.resource_type)
return params
@wsgi.Controller.api_version(mv.WORKERS_CLEANUP)
@wsgi.response(202)
def cleanup(self, req, body=None):
"""Do the cleanup on resources from a specific service/host/node."""
# Let the wsgi middleware convert NotAuthorized exceptions
ctxt = req.environ['cinder.context']
ctxt.authorize(policy.CLEAN_POLICY)
body = body or {}
params = self._prepare_params(ctxt, body, self.allowed_clean_keys)
params['until'] = timeutils.utcnow()
body['until'] = timeutils.utcnow()
# NOTE(geguileo): If is_up is not specified in the request
# CleanupRequest's default will be used (False)
cleanup_request = objects.CleanupRequest(**params)
cleanup_request = objects.CleanupRequest(**body)
cleaning, unavailable = self.sch_api.work_cleanup(ctxt,
cleanup_request)
return {

View File

@ -208,3 +208,27 @@ volume_size = {
'pattern': '^[0-9]+$',
'minimum': 1
}
hostname = {
'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255,
# NOTE: 'host' is defined in "services" table, and that
# means a hostname. The hostname grammar in RFC952 does
# not allow for underscores in hostnames. However, this
# schema allows them, because it sometimes occurs in
# real systems.
'pattern': '^[a-zA-Z0-9-._#@]*$'
}
resource_type = {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 40}
service_id = {
'type': ['integer', 'string', 'null'],
'pattern': '^[0-9]*$', 'maxLength': 11
}
optional_uuid = {'oneOf': [{'type': 'null'},
{'type': 'string', 'format': 'uuid'}]}

View File

@ -91,17 +91,25 @@ class WorkersTestCase(test.TestCase):
self.assertEqual(http_client.FORBIDDEN, res.status_code)
rpc_mock.assert_not_called()
@ddt.data({'fake_key': 'value'}, {'binary': 'nova-scheduler'},
@ddt.data({'binary': 'nova-scheduler'},
{'disabled': 'sure'}, {'is_up': 'nop'},
{'resource_type': 'service'}, {'resource_id': 'non UUID'})
{'resource_type': 'service'}, {'resource_id': 'non UUID'},
{'is_up': 11}, {'disabled': 11},
{'is_up': ' true '}, {'disabled': ' false '})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_wrong_param(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
if 'disabled' in body or 'is_up' in body:
expected = 'is not a boolean'
else:
expected = 'Invalid input'
expected = 'Invalid input'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@ddt.data({'fake_key': 'value'})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_with_additional_properties(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
expected = 'Additional properties are not allowed'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@ -113,11 +121,17 @@ class WorkersTestCase(test.TestCase):
return {'cleaning': [service_view(s) for s in cleaning],
'unavailable': [service_view(s) for s in unavailable]}
@ddt.data({'service_id': 10}, {'cluster_name': 'cluster_name'},
{'host': 'hostname'}, {'binary': 'cinder-volume'},
{'binary': 'cinder-scheduler'}, {'disabled': 'true'},
@ddt.data({'service_id': 10}, {'binary': 'cinder-volume'},
{'binary': 'cinder-scheduler'}, {'disabled': 'false'},
{'is_up': 'no'}, {'resource_type': 'Volume'},
{'resource_id': fake.VOLUME_ID, 'host': 'hostname'})
{'resource_id': fake.VOLUME_ID, 'host': 'host@backend'},
{'host': 'host@backend#pool'},
{'cluster_name': 'cluster@backend'},
{'cluster_name': 'cluster@backend#pool'},
{'service_id': None},
{'cluster_name': None}, {'host': None},
{'resource_type': ''}, {'resource_type': None},
{'resource_id': None})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=SERVICES)
def test_cleanup_params(self, body, rpc_mock):
@ -127,7 +141,8 @@ class WorkersTestCase(test.TestCase):
cleanup_request = rpc_mock.call_args[0][1]
for key, value in body.items():
if key in ('disabled', 'is_up'):
value = value == 'true'
if value is not None:
value = value == 'true'
self.assertEqual(value, getattr(cleanup_request, key))
self.assertEqual(self._expected_services(*SERVICES), res.json)