From 46f0e16d11f5e0d008419e799e78bf72edec23c8 Mon Sep 17 00:00:00 2001 From: ricolin Date: Mon, 18 Sep 2017 21:36:43 +0800 Subject: [PATCH] [policy in code] part3 (resource types) Allow use policy in code to resource type's rule. Also add test for override the in-code resource type rule in json file. Partially-Implements: bp policy-in-code Change-Id: Id6c21732e66de6c421427ded98de52f5da0a4db2 --- etc/heat/policy.json | 18 +-------- heat/common/policy.py | 25 ++++++++---- heat/engine/environment.py | 2 +- heat/engine/service.py | 25 +++++++----- heat/policies/__init__.py | 2 + heat/policies/resource_types.py | 69 ++++++++++++++++++++++++++++++++ heat/tests/policy/resources.json | 2 +- heat/tests/test_common_policy.py | 68 ++++++++++++++++++------------- 8 files changed, 148 insertions(+), 63 deletions(-) create mode 100644 heat/policies/resource_types.py diff --git a/etc/heat/policy.json b/etc/heat/policy.json index 3c85e1df28..9fbf21a804 100644 --- a/etc/heat/policy.json +++ b/etc/heat/policy.json @@ -47,21 +47,5 @@ "software_deployments:delete": "rule:deny_stack_user", "software_deployments:metadata": "", - "service:index": "rule:context_is_admin", - - "resource_types:OS::Nova::Flavor": "rule:project_admin", - "resource_types:OS::Cinder::EncryptedVolumeType": "rule:project_admin", - "resource_types:OS::Cinder::VolumeType": "rule:project_admin", - "resource_types:OS::Cinder::Quota": "rule:project_admin", - "resource_types:OS::Neutron::Quota": "rule:project_admin", - "resource_types:OS::Nova::Quota": "rule:project_admin", - "resource_types:OS::Manila::ShareType": "rule:project_admin", - "resource_types:OS::Neutron::ProviderNet": "rule:project_admin", - "resource_types:OS::Neutron::QoSPolicy": "rule:project_admin", - "resource_types:OS::Neutron::QoSBandwidthLimitRule": "rule:project_admin", - "resource_types:OS::Neutron::Segment": "rule:project_admin", - "resource_types:OS::Nova::HostAggregate": "rule:project_admin", - "resource_types:OS::Cinder::QoSSpecs": "rule:project_admin", - "resource_types:OS::Cinder::QoSAssociation": "rule:project_admin", - "resource_types:OS::Keystone::*": "rule:project_admin" + "service:index": "rule:context_is_admin" } diff --git a/heat/common/policy.py b/heat/common/policy.py index b504e0289d..4a7be9e0bd 100644 --- a/heat/common/policy.py +++ b/heat/common/policy.py @@ -125,12 +125,15 @@ class ResourceEnforcer(Enforcer): super(ResourceEnforcer, self).__init__( default_rule=default_rule, **kwargs) - def _enforce(self, context, res_type, scope=None, target=None): + def _enforce(self, context, res_type, scope=None, target=None, + is_registered_policy=False): try: result = super(ResourceEnforcer, self).enforce( context, res_type, scope=scope or 'resource_types', - target=target) + target=target, is_registered_policy=is_registered_policy) + except policy.PolicyNotRegistered: + result = True except self.exc as ex: LOG.info(six.text_type(ex)) raise @@ -139,19 +142,27 @@ class ResourceEnforcer(Enforcer): raise self.exc(action=res_type) return result - def enforce(self, context, res_type, scope=None, target=None): + def enforce(self, context, res_type, scope=None, target=None, + is_registered_policy=False): # NOTE(pas-ha): try/except just to log the exception - result = self._enforce(context, res_type, scope, target) + result = self._enforce(context, res_type, scope, target, + is_registered_policy=is_registered_policy) if result: # check for wildcard resource types subparts = res_type.split("::")[:-1] subparts.append('*') res_type_wc = "::".join(subparts) - return self._enforce(context, res_type_wc, scope, target) + try: + return self._enforce(context, res_type_wc, scope, target, + is_registered_policy=is_registered_policy) + except self.exc: + raise self.exc(action=res_type) return result - def enforce_stack(self, stack, scope=None, target=None): + def enforce_stack(self, stack, scope=None, target=None, + is_registered_policy=False): for res in stack.resources.values(): - self.enforce(stack.context, res.type(), scope=scope, target=target) + self.enforce(stack.context, res.type(), scope=scope, target=target, + is_registered_policy=is_registered_policy) diff --git a/heat/engine/environment.py b/heat/engine/environment.py index b97875bd60..74bfe7f115 100644 --- a/heat/engine/environment.py +++ b/heat/engine/environment.py @@ -618,7 +618,7 @@ class ResourceRegistry(object): if cnxt is None: return True try: - enforcer.enforce(cnxt, name) + enforcer.enforce(cnxt, name, is_registered_policy=True) except enforcer.exc: return False else: diff --git a/heat/engine/service.py b/heat/engine/service.py index fb94cb6b62..654ece2f07 100644 --- a/heat/engine/service.py +++ b/heat/engine/service.py @@ -730,7 +730,7 @@ class EngineService(service.ServiceBase): parent_resource=parent_resource_name, **common_params) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, is_registered_policy=True) self._validate_deferred_auth_context(cnxt, stack) is_root = stack.nested_depth == 0 stack.validate() @@ -964,7 +964,8 @@ class EngineService(service.ServiceBase): if invalid_params: raise exception.ImmutableParameterModified(*invalid_params) - self.resource_enforcer.enforce_stack(updated_stack) + self.resource_enforcer.enforce_stack(updated_stack, + is_registered_policy=True) updated_stack.parameters.set_stack_id(current_stack.identifier()) self._validate_deferred_auth_context(cnxt, updated_stack) @@ -999,7 +1000,8 @@ class EngineService(service.ServiceBase): cnxt, stack=db_stack, use_stored_context=True) else: current_stack = parser.Stack.load(cnxt, stack=db_stack) - self.resource_enforcer.enforce_stack(current_stack) + self.resource_enforcer.enforce_stack(current_stack, + is_registered_policy=True) if current_stack.action == current_stack.SUSPEND: msg = _('Updating a stack when it is suspended') @@ -1417,7 +1419,7 @@ class EngineService(service.ServiceBase): LOG.info('Deleting stack %s', st.name) stack = parser.Stack.load(cnxt, stack=st) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, is_registered_policy=True) if stack.convergence and cfg.CONF.convergence_engine: def convergence_delete(): @@ -1465,7 +1467,8 @@ class EngineService(service.ServiceBase): def reload(): st = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=st) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, + is_registered_policy=True) return stack def wait_then_delete(stack): @@ -1642,7 +1645,8 @@ class EngineService(service.ServiceBase): :param type_name: Name of the resource type to obtain the schema of. :param with_description: Return result with description or not. """ - self.resource_enforcer.enforce(cnxt, type_name) + self.resource_enforcer.enforce(cnxt, type_name, + is_registered_policy=True) try: resource_class = resources.global_env().get_class(type_name) except exception.NotFound: @@ -1703,7 +1707,8 @@ class EngineService(service.ServiceBase): :param type_name: Name of the resource type to generate a template for. :param template_type: the template type to generate, cfn or hot. """ - self.resource_enforcer.enforce(cnxt, type_name) + self.resource_enforcer.enforce(cnxt, type_name, + is_registered_policy=True) try: resource_class = resources.global_env().get_class(type_name) except exception.NotFound: @@ -2047,7 +2052,7 @@ class EngineService(service.ServiceBase): s = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=s) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, is_registered_policy=True) self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id, _stack_suspend, stack) @@ -2061,7 +2066,7 @@ class EngineService(service.ServiceBase): s = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=s) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, is_registered_policy=True) self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id, _stack_resume, stack) @@ -2146,7 +2151,7 @@ class EngineService(service.ServiceBase): s = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=s) - self.resource_enforcer.enforce_stack(stack) + self.resource_enforcer.enforce_stack(stack, is_registered_policy=True) snapshot = snapshot_object.Snapshot.get_snapshot_by_stack( cnxt, snapshot_id, s) # FIXME(pas-ha) has to be amended to deny restoring stacks diff --git a/heat/policies/__init__.py b/heat/policies/__init__.py index 0a2c9c6bf5..2707432bbd 100644 --- a/heat/policies/__init__.py +++ b/heat/policies/__init__.py @@ -14,6 +14,7 @@ import itertools from heat.policies import base +from heat.policies import resource_types from heat.policies import stacks @@ -21,4 +22,5 @@ def list_rules(): return itertools.chain( base.list_rules(), stacks.list_rules(), + resource_types.list_rules(), ) diff --git a/heat/policies/resource_types.py b/heat/policies/resource_types.py new file mode 100644 index 0000000000..a706aea0f4 --- /dev/null +++ b/heat/policies/resource_types.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from heat.policies import base + +POLICY_ROOT = 'resource_types:%s' + +resource_types_policies = [ + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Nova::Flavor', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Cinder::EncryptedVolumeType', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Cinder::VolumeType', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Cinder::Quota', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Neutron::Quota', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Nova::Quota', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Manila::ShareType', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Neutron::ProviderNet', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Neutron::QoSPolicy', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Neutron::QoSBandwidthLimitRule', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Neutron::Segment', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Nova::HostAggregate', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Cinder::QoSSpecs', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Cinder::QoSAssociation', + check_str=base.RULE_PROJECT_ADMIN), + policy.RuleDefault( + name=POLICY_ROOT % 'OS::Keystone::*', + check_str=base.RULE_PROJECT_ADMIN) +] + + +def list_rules(): + return resource_types_policies diff --git a/heat/tests/policy/resources.json b/heat/tests/policy/resources.json index 566dac3469..163fdb66ee 100644 --- a/heat/tests/policy/resources.json +++ b/heat/tests/policy/resources.json @@ -1,7 +1,7 @@ { "context_is_admin": "role:admin", - "resource_types:OS::Test::AdminOnly": "rule:context_is_admin", + "resource_types:OS::Cinder::Quota": "!", "resource_types:OS::Keystone::*": "rule:context_is_admin" } diff --git a/heat/tests/test_common_policy.py b/heat/tests/test_common_policy.py index 7c256dd15d..1f149d4571 100644 --- a/heat/tests/test_common_policy.py +++ b/heat/tests/test_common_policy.py @@ -17,7 +17,6 @@ import os.path from oslo_config import fixture as config_fixture -from oslo_policy import policy as base_policy from heat.common import exception from heat.common import policy @@ -177,55 +176,70 @@ class TestPolicyEnforcer(common.HeatTestCase): def test_resource_default_rule(self): context = utils.dummy_context(roles=['non-admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json')) + enforcer = policy.ResourceEnforcer() res_type = "OS::Test::NotInPolicy" - self.assertTrue(enforcer.enforce(context, res_type)) + self.assertTrue(enforcer.enforce(context, res_type, + is_registered_policy=True)) def test_resource_enforce_success(self): context = utils.dummy_context(roles=['admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json')) - res_type = "OS::Test::AdminOnly" - self.assertTrue(enforcer.enforce(context, res_type)) + enforcer = policy.ResourceEnforcer() + res_type = "OS::Keystone::User" + self.assertTrue(enforcer.enforce(context, res_type, + is_registered_policy=True)) def test_resource_enforce_fail(self): context = utils.dummy_context(roles=['non-admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json')) - res_type = "OS::Test::AdminOnly" + enforcer = policy.ResourceEnforcer() + res_type = "OS::Nova::Quota" ex = self.assertRaises(exception.Forbidden, enforcer.enforce, - context, res_type) + context, res_type, + None, None, + True) self.assertIn(res_type, ex.message) def test_resource_wildcard_enforce_fail(self): context = utils.dummy_context(roles=['non-admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json')) + enforcer = policy.ResourceEnforcer() res_type = "OS::Keystone::User" ex = self.assertRaises(exception.Forbidden, enforcer.enforce, - context, res_type) + context, res_type, + None, None, + True) + self.assertIn(res_type.split("::", 1)[0], ex.message) def test_resource_enforce_returns_false(self): context = utils.dummy_context(roles=['non-admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json'), - exc=None) - res_type = "OS::Test::AdminOnly" - self.assertFalse(enforcer.enforce(context, res_type)) - self.assertIsNotNone(enforcer.enforce(context, res_type)) + enforcer = policy.ResourceEnforcer(exc=None) + res_type = "OS::Keystone::User" + self.assertFalse(enforcer.enforce(context, res_type, + is_registered_policy=True)) + self.assertIsNotNone(enforcer.enforce(context, res_type, + is_registered_policy=True)) def test_resource_enforce_exc_on_false(self): context = utils.dummy_context(roles=['non-admin']) - enforcer = policy.ResourceEnforcer( - policy_file=self.get_policy_file('resources.json')) - res_type = "OS::Test::AdminOnly" - self.patchobject(base_policy.Enforcer, 'enforce', - return_value=False) + enforcer = policy.ResourceEnforcer() + res_type = "OS::Keystone::User" ex = self.assertRaises(exception.Forbidden, enforcer.enforce, - context, res_type) + context, res_type, + None, None, + True) + + self.assertIn(res_type, ex.message) + + def test_resource_enforce_override_deny_admin(self): + context = utils.dummy_context(roles=['admin']) + enforcer = policy.ResourceEnforcer( + policy_file=self.get_policy_file('resources.json')) + res_type = "OS::Cinder::Quota" + ex = self.assertRaises(exception.Forbidden, + enforcer.enforce, + context, res_type, + None, None, + True) self.assertIn(res_type, ex.message)