Revert "Request class-read object_prefix rbd_children perm"

This was landed in error, still needs a +2 from another core dev

This reverts commit d9b4bf4923.

Change-Id: I5af9758da7f05900cbc6e590990004a940e0d60c
This commit is contained in:
Liam Young 2017-12-15 10:41:15 +00:00
parent d9b4bf4923
commit dbbfd7338d
8 changed files with 22 additions and 124 deletions

1
.gitignore vendored
View File

@ -6,4 +6,3 @@ tags
*.sw[nop]
*.pyc
.idea
.stestr

View File

@ -101,8 +101,6 @@ from charmhelpers.contrib.openstack.utils import (
git_determine_python_path,
enable_memcache,
snap_install_requested,
CompareOpenStackReleases,
os_release,
)
from charmhelpers.core.unitdata import kv
@ -1568,18 +1566,8 @@ class InternalEndpointContext(OSContextGenerator):
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __init__(self, ost_rel_check_pkg_name):
self.ost_rel_check_pkg_name = ost_rel_check_pkg_name
def __call__(self):
ctxt = {'use_internal_endpoints': config('use-internal-endpoints')}
rel = os_release(self.ost_rel_check_pkg_name, base='icehouse')
if CompareOpenStackReleases(rel) >= 'pike':
ctxt['volume_api_version'] = '3'
else:
ctxt['volume_api_version'] = '2'
return ctxt
return {'use_internal_endpoints': config('use-internal-endpoints')}
class AppArmorContext(OSContextGenerator):

View File

@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
timeout queue 9000
timeout queue 5000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
timeout connect 9000
timeout connect 5000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
timeout client 90000
timeout client 30000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
timeout server 90000
timeout server 30000
{%- endif %}
listen stats

View File

@ -2045,25 +2045,14 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param filename: json filename (i.e.: /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(after)
fd.write(json.dumps(policy, indent=4))
@cached

View File

@ -1064,24 +1064,14 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None,
object_prefix_permissions=None):
permission=None, key_name=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
allowing the key to access only the specified pools
"""
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions})
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace, 'name': key_name or service_name(),
'group-permission': permission})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@ -1117,10 +1107,7 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from subprocess import (
CalledProcessError,
check_call,
@ -102,52 +101,3 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs
list_thin_logical_volume_pools = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^t')
list_thin_logical_volumes = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^V')
def extend_logical_volume_by_device(lv_name, block_device):
'''
Extends the size of logical volume lv_name by the amount of free space on
physical volume block_device.
:param lv_name: str: name of logical volume to be extended (vg/lv format)
:param block_device: str: name of block_device to be allocated to lv_name
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)

View File

@ -91,18 +91,12 @@ def get_ceph_request():
weight=weight,
group="volumes")
if config('restrict-ceph-pools'):
rq.add_op_request_access_to_group(
name="volumes",
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx')
rq.add_op_request_access_to_group(
name="images",
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx')
rq.add_op_request_access_to_group(
name="vms",
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx')
rq.add_op_request_access_to_group(name="volumes",
permission='rwx')
rq.add_op_request_access_to_group(name="images",
permission='rwx')
rq.add_op_request_access_to_group(name="vms",
permission='rwx')
return rq

View File

@ -134,18 +134,9 @@ class TestCinderHooks(CharmTestCase):
mock_create_pool.assert_called_with(name='cinder', replica_count=4,
weight=20, group='volumes')
mock_request_access.assert_has_calls([
call(
name='volumes',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
call(
name='images',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
call(
name='vms',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
call(name='volumes', permission='rwx'),
call(name='images', permission='rwx'),
call(name='vms', permission='rwx'),
])
@patch('charmhelpers.core.hookenv.config')